From 3e81f2cf1500a3589d8236bd04e6ad52534e7f60 Mon Sep 17 00:00:00 2001
From: Volker Schukai <volker.schukai@schukai.com>
Date: Wed, 20 Nov 2024 14:31:32 +0100
Subject: [PATCH] feat: new javascript generation #9

---
 .gitignore                                    |     3 +
 README.md                                     |    40 +
 examples/example5/package-lock.json           |    61 +
 examples/example5/package.json                |    14 +
 examples/example5/scripts/.gitkeep            |     0
 examples/example5/source/lib.mjs              |     4 +
 examples/example5/source/main.css             |     5 +
 examples/example5/source/main.mjs             |     7 +
 examples/example5/source/page.mjs             |     6 +
 examples/example5/styles/.gitkeep             |     0
 examples/example5/test.html                   |    26 +
 flake.lock                                    |     6 +-
 source/.idea/.gitignore                       |     8 +
 source/.idea/codeStyles/codeStyleConfig.xml   |     5 +
 source/.idea/misc.xml                         |    20 +
 source/.idea/modules.xml                      |     8 +
 source/.idea/source.iml                       |    10 +
 source/.idea/vcs.xml                          |     6 +
 source/command.go                             |    48 +-
 source/go.mod                                 |    21 +-
 source/go.sum                                 |    30 +-
 source/html/cut.go                            |     2 +-
 source/html/generate.go                       |     2 +-
 source/html/sync.go                           |     2 +-
 source/javascript/generate.go                 |   251 +
 source/main.go                                |     3 +-
 .../github.com/evanw/esbuild/LICENSE.md       |    21 +
 .../esbuild/internal/api_helpers/use_timer.go |     7 +
 .../evanw/esbuild/internal/ast/ast.go         |   812 +
 .../evanw/esbuild/internal/bundler/bundler.go |  3331 +++
 .../evanw/esbuild/internal/cache/cache.go     |   115 +
 .../evanw/esbuild/internal/cache/cache_ast.go |   190 +
 .../evanw/esbuild/internal/cache/cache_fs.go  |    52 +
 .../evanw/esbuild/internal/compat/compat.go   |    92 +
 .../esbuild/internal/compat/css_table.go      |   361 +
 .../evanw/esbuild/internal/compat/js_table.go |   910 +
 .../evanw/esbuild/internal/config/config.go   |   842 +
 .../evanw/esbuild/internal/config/globals.go  |  1014 +
 .../evanw/esbuild/internal/css_ast/css_ast.go |  1205 ++
 .../internal/css_ast/css_decl_table.go        |   698 +
 .../esbuild/internal/css_lexer/css_lexer.go   |  1081 +
 .../internal/css_parser/css_color_spaces.go   |   620 +
 .../esbuild/internal/css_parser/css_decls.go  |   538 +
 .../css_parser/css_decls_animation.go         |   119 +
 .../css_parser/css_decls_border_radius.go     |   217 +
 .../internal/css_parser/css_decls_box.go      |   206 +
 .../css_parser/css_decls_box_shadow.go        |   106 +
 .../internal/css_parser/css_decls_color.go    |   938 +
 .../internal/css_parser/css_decls_composes.go |   103 +
 .../css_parser/css_decls_container.go         |    53 +
 .../internal/css_parser/css_decls_font.go     |   138 +
 .../css_parser/css_decls_font_family.go       |   162 +
 .../css_parser/css_decls_font_weight.go       |    25 +
 .../internal/css_parser/css_decls_gradient.go |  1057 +
 .../css_parser/css_decls_list_style.go        |   179 +
 .../css_parser/css_decls_transform.go         |   347 +
 .../internal/css_parser/css_nesting.go        |   490 +
 .../esbuild/internal/css_parser/css_parser.go |  2374 ++
 .../css_parser/css_parser_selector.go         |   979 +
 .../internal/css_parser/css_reduce_calc.go    |   605 +
 .../internal/css_printer/css_printer.go       |  1141 +
 .../evanw/esbuild/internal/fs/error_other.go  |     9 +
 .../esbuild/internal/fs/error_wasm+windows.go |    17 +
 .../evanw/esbuild/internal/fs/filepath.go     |   649 +
 .../evanw/esbuild/internal/fs/fs.go           |   287 +
 .../evanw/esbuild/internal/fs/fs_mock.go      |   294 +
 .../evanw/esbuild/internal/fs/fs_real.go      |   543 +
 .../evanw/esbuild/internal/fs/fs_zip.go       |   405 +
 .../evanw/esbuild/internal/fs/iswin_other.go  |     9 +
 .../evanw/esbuild/internal/fs/iswin_wasm.go   |    25 +
 .../esbuild/internal/fs/iswin_windows.go      |     8 +
 .../evanw/esbuild/internal/fs/modkey_other.go |    35 +
 .../evanw/esbuild/internal/fs/modkey_unix.go  |    41 +
 .../evanw/esbuild/internal/graph/graph.go     |   431 +
 .../evanw/esbuild/internal/graph/input.go     |   127 +
 .../evanw/esbuild/internal/graph/meta.go      |   205 +
 .../evanw/esbuild/internal/helpers/bitset.go  |    27 +
 .../evanw/esbuild/internal/helpers/comment.go |    29 +
 .../evanw/esbuild/internal/helpers/dataurl.go |    72 +
 .../evanw/esbuild/internal/helpers/float.go   |   158 +
 .../evanw/esbuild/internal/helpers/glob.go    |    58 +
 .../evanw/esbuild/internal/helpers/hash.go    |    14 +
 .../evanw/esbuild/internal/helpers/joiner.go  |    86 +
 .../evanw/esbuild/internal/helpers/mime.go    |    49 +
 .../evanw/esbuild/internal/helpers/path.go    |    22 +
 .../evanw/esbuild/internal/helpers/quote.go   |   142 +
 .../esbuild/internal/helpers/serializer.go    |    26 +
 .../evanw/esbuild/internal/helpers/stack.go   |    50 +
 .../evanw/esbuild/internal/helpers/strings.go |    41 +
 .../evanw/esbuild/internal/helpers/timer.go   |    94 +
 .../evanw/esbuild/internal/helpers/typos.go   |    38 +
 .../evanw/esbuild/internal/helpers/utf.go     |   230 +
 .../esbuild/internal/helpers/waitgroup.go     |    37 +
 .../evanw/esbuild/internal/js_ast/js_ast.go   |  1841 ++
 .../esbuild/internal/js_ast/js_ast_helpers.go |  2973 +++
 .../evanw/esbuild/internal/js_ast/js_ident.go |   247 +
 .../evanw/esbuild/internal/js_ast/unicode.go  |  2065 ++
 .../esbuild/internal/js_lexer/js_lexer.go     |  2665 +++
 .../evanw/esbuild/internal/js_lexer/tables.go |   382 +
 .../internal/js_parser/global_name_parser.go  |    49 +
 .../esbuild/internal/js_parser/js_parser.go   | 18021 ++++++++++++++++
 .../internal/js_parser/js_parser_lower.go     |  2131 ++
 .../js_parser/js_parser_lower_class.go        |  2573 +++
 .../esbuild/internal/js_parser/json_parser.go |   238 +
 .../internal/js_parser/sourcemap_parser.go    |   277 +
 .../esbuild/internal/js_parser/ts_parser.go   |  1999 ++
 .../esbuild/internal/js_printer/js_printer.go |  4924 +++++
 .../evanw/esbuild/internal/linker/debug.go    |   148 +
 .../evanw/esbuild/internal/linker/linker.go   |  7154 ++++++
 .../evanw/esbuild/internal/logger/logger.go   |  2045 ++
 .../esbuild/internal/logger/logger_darwin.go  |    34 +
 .../esbuild/internal/logger/logger_linux.go   |    34 +
 .../esbuild/internal/logger/logger_other.go   |    16 +
 .../esbuild/internal/logger/logger_windows.go |   136 +
 .../evanw/esbuild/internal/logger/msg_ids.go  |   371 +
 .../evanw/esbuild/internal/renamer/renamer.go |   662 +
 .../esbuild/internal/resolver/dataurl.go      |    76 +
 .../esbuild/internal/resolver/package_json.go |  1462 ++
 .../esbuild/internal/resolver/resolver.go     |  2923 +++
 .../internal/resolver/testExpectations.json   |   311 +
 .../internal/resolver/tsconfig_json.go        |   481 +
 .../esbuild/internal/resolver/yarnpnp.go      |   665 +
 .../evanw/esbuild/internal/runtime/runtime.go |   604 +
 .../esbuild/internal/sourcemap/sourcemap.go   |   834 +
 .../evanw/esbuild/internal/xxhash/LICENSE.txt |    22 +
 .../evanw/esbuild/internal/xxhash/README.md   |     1 +
 .../evanw/esbuild/internal/xxhash/xxhash.go   |   235 +
 .../esbuild/internal/xxhash/xxhash_other.go   |    74 +
 .../github.com/evanw/esbuild/pkg/api/api.go   |   718 +
 .../evanw/esbuild/pkg/api/api_impl.go         |  2530 +++
 .../evanw/esbuild/pkg/api/api_js_table.go     |    50 +
 .../evanw/esbuild/pkg/api/favicon.go          |    31 +
 .../evanw/esbuild/pkg/api/serve_other.go      |   990 +
 .../evanw/esbuild/pkg/api/serve_wasm.go       |    21 +
 .../evanw/esbuild/pkg/api/watcher.go          |   187 +
 .../application/{xflags => xflags.git}/.envrc |     0
 .../{xflags => xflags.git}/.gitignore         |     0
 .../{xflags => xflags.git}/.gitlab-ci.yml     |     0
 .../{xflags => xflags.git}/CHANGELOG.md       |     0
 .../{xflags => xflags.git}/CONTRIBUTING.md    |     0
 .../{xflags => xflags.git}/LICENSE            |     0
 .../{xflags => xflags.git}/README.md          |     0
 .../application/{xflags => xflags.git}/api.go |     0
 .../{xflags => xflags.git}/command.go         |     0
 .../{xflags => xflags.git}/devenv.lock        |     0
 .../{xflags => xflags.git}/devenv.nix         |     0
 .../{xflags => xflags.git}/devenv.yaml        |     0
 .../application/{xflags => xflags.git}/doc.go |     0
 .../{xflags => xflags.git}/error.go           |     0
 .../{xflags => xflags.git}/execute.go         |     0
 .../{xflags => xflags.git}/flake.lock         |     0
 .../{xflags => xflags.git}/flake.nix          |     0
 .../{xflags => xflags.git}/help-util.go       |     0
 .../{xflags => xflags.git}/help.go            |     0
 .../{xflags => xflags.git}/hint.go            |     0
 .../{xflags => xflags.git}/mapping.go         |     2 +-
 .../{xflags => xflags.git}/parse.go           |     0
 .../{xflags => xflags.git}/release.json       |     0
 .../{xflags => xflags.git}/setting.go         |     0
 .../{xflags => xflags.git}/tags.go            |     0
 .../{xflags => xflags.git}/type.go            |     0
 .../go/markup/{html => html.git}/LICENSE      |     0
 .../{html => html.git}/engine/engine.go       |     0
 .../markup/{html => html.git}/engine/error.go |     0
 .../libraries/go/utilities/pathfinder/.envrc  |     2 -
 .../go/utilities/pathfinder/.gitignore        |   155 -
 .../go/utilities/pathfinder/.gitlab-ci.yml    |    43 -
 .../go/utilities/pathfinder/CONTRIBUTING.md   |    58 -
 .../libraries/go/utilities/pathfinder/LICENSE |    14 -
 .../go/utilities/pathfinder/README.md         |    69 -
 .../go/utilities/pathfinder/error.go          |    39 -
 .../libraries/go/utilities/pathfinder/find.go |    51 -
 .../go/utilities/pathfinder/flake.lock        |   181 -
 .../go/utilities/pathfinder/flake.nix         |   158 -
 .../libraries/go/utilities/pathfinder/get.go  |    74 -
 .../go/utilities/pathfinder/pathfinder.iml    |    12 -
 .../go/utilities/pathfinder/release.json      |     1 -
 .../libraries/go/utilities/pathfinder/set.go  |   293 -
 source/vendor/golang.org/x/net/html/doc.go    |     7 +-
 source/vendor/golang.org/x/net/html/iter.go   |    56 +
 source/vendor/golang.org/x/net/html/node.go   |     4 +
 .../golang.org/x/sys/unix/ioctl_linux.go      |    96 +
 .../vendor/golang.org/x/sys/unix/mkerrors.sh  |    12 +
 .../golang.org/x/sys/unix/syscall_linux.go    |     1 +
 .../x/sys/unix/syscall_zos_s390x.go           |   104 +-
 .../golang.org/x/sys/unix/zerrors_linux.go    |    22 +
 .../x/sys/unix/zerrors_linux_386.go           |    14 +
 .../x/sys/unix/zerrors_linux_amd64.go         |    14 +
 .../x/sys/unix/zerrors_linux_arm.go           |    14 +
 .../x/sys/unix/zerrors_linux_arm64.go         |    14 +
 .../x/sys/unix/zerrors_linux_loong64.go       |    14 +
 .../x/sys/unix/zerrors_linux_mips.go          |    14 +
 .../x/sys/unix/zerrors_linux_mips64.go        |    14 +
 .../x/sys/unix/zerrors_linux_mips64le.go      |    14 +
 .../x/sys/unix/zerrors_linux_mipsle.go        |    14 +
 .../x/sys/unix/zerrors_linux_ppc.go           |    14 +
 .../x/sys/unix/zerrors_linux_ppc64.go         |    14 +
 .../x/sys/unix/zerrors_linux_ppc64le.go       |    14 +
 .../x/sys/unix/zerrors_linux_riscv64.go       |    14 +
 .../x/sys/unix/zerrors_linux_s390x.go         |    14 +
 .../x/sys/unix/zerrors_linux_sparc64.go       |    14 +
 .../golang.org/x/sys/unix/zsyscall_linux.go   |    10 +
 .../golang.org/x/sys/unix/ztypes_linux.go     |   120 +-
 .../golang.org/x/sys/unix/ztypes_zos_s390x.go |     6 +
 .../x/sys/windows/syscall_windows.go          |    34 +-
 .../golang.org/x/sys/windows/types_windows.go |   126 +
 .../x/sys/windows/zsyscall_windows.go         |    53 +
 source/vendor/modules.txt                     |    50 +-
 208 files changed, 90024 insertions(+), 1213 deletions(-)
 create mode 100644 examples/example5/package-lock.json
 create mode 100644 examples/example5/package.json
 create mode 100644 examples/example5/scripts/.gitkeep
 create mode 100644 examples/example5/source/lib.mjs
 create mode 100644 examples/example5/source/main.css
 create mode 100644 examples/example5/source/main.mjs
 create mode 100644 examples/example5/source/page.mjs
 create mode 100644 examples/example5/styles/.gitkeep
 create mode 100644 examples/example5/test.html
 create mode 100644 source/.idea/.gitignore
 create mode 100644 source/.idea/codeStyles/codeStyleConfig.xml
 create mode 100644 source/.idea/misc.xml
 create mode 100644 source/.idea/modules.xml
 create mode 100644 source/.idea/source.iml
 create mode 100644 source/.idea/vcs.xml
 create mode 100644 source/javascript/generate.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/LICENSE.md
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/api_helpers/use_timer.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/ast/ast.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/cache/cache.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/cache/cache_ast.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/cache/cache_fs.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/compat/compat.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/compat/css_table.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/compat/js_table.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/config/config.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/config/globals.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_ast/css_ast.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_ast/css_decl_table.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_lexer/css_lexer.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_color_spaces.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_animation.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_border_radius.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_box.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_box_shadow.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_color.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_composes.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_container.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font_family.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font_weight.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_gradient.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_list_style.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_transform.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_nesting.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_parser.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_parser_selector.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_parser/css_reduce_calc.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/css_printer/css_printer.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/error_other.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/error_wasm+windows.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/filepath.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/fs.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/fs_mock.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/fs_real.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/fs_zip.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/iswin_other.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/iswin_wasm.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/iswin_windows.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/modkey_other.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/fs/modkey_unix.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/graph/graph.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/graph/input.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/graph/meta.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/bitset.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/comment.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/dataurl.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/float.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/glob.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/hash.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/joiner.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/mime.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/path.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/quote.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/serializer.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/stack.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/strings.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/timer.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/typos.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/utf.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/helpers/waitgroup.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast_helpers.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ident.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_ast/unicode.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_lexer/js_lexer.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_lexer/tables.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_parser/global_name_parser.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower_class.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_parser/json_parser.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_parser/sourcemap_parser.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_parser/ts_parser.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/js_printer/js_printer.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/linker/debug.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/linker/linker.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/logger/logger.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/logger/logger_darwin.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/logger/logger_linux.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/logger/logger_other.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/logger/logger_windows.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/logger/msg_ids.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/renamer/renamer.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/resolver/dataurl.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/resolver/package_json.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/resolver/resolver.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/resolver/testExpectations.json
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/resolver/tsconfig_json.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/resolver/yarnpnp.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/runtime/runtime.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/sourcemap/sourcemap.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/xxhash/LICENSE.txt
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/xxhash/README.md
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/xxhash/xxhash.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/internal/xxhash/xxhash_other.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/pkg/api/api.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/pkg/api/api_impl.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/pkg/api/api_js_table.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/pkg/api/favicon.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/pkg/api/serve_other.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/pkg/api/serve_wasm.go
 create mode 100644 source/vendor/github.com/evanw/esbuild/pkg/api/watcher.go
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/.envrc (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/.gitignore (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/.gitlab-ci.yml (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/CHANGELOG.md (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/CONTRIBUTING.md (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/LICENSE (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/README.md (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/api.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/command.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/devenv.lock (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/devenv.nix (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/devenv.yaml (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/doc.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/error.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/execute.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/flake.lock (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/flake.nix (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/help-util.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/help.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/hint.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/mapping.go (97%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/parse.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/release.json (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/setting.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/tags.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/application/{xflags => xflags.git}/type.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/markup/{html => html.git}/LICENSE (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/markup/{html => html.git}/engine/engine.go (100%)
 rename source/vendor/gitlab.schukai.com/oss/libraries/go/markup/{html => html.git}/engine/error.go (100%)
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.envrc
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitignore
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitlab-ci.yml
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/CONTRIBUTING.md
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/LICENSE
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/README.md
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/error.go
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/find.go
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/flake.lock
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/flake.nix
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/get.go
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/pathfinder.iml
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/release.json
 delete mode 100644 source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/set.go
 create mode 100644 source/vendor/golang.org/x/net/html/iter.go

diff --git a/.gitignore b/.gitignore
index 38e0e60..342599d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -313,3 +313,6 @@ Taskfile.yaml
 
 /environment/do-secrets.json
 documentation/manual/de/book/
+/examples/example5/scripts/main.mjs
+/examples/example5/scripts/page.mjs
+/examples/example5/styles/main.css
diff --git a/README.md b/README.md
index b52d1c3..45e3a8a 100644
--- a/README.md
+++ b/README.md
@@ -259,7 +259,47 @@ the task `task update-code` must be called.
 The hash is currently always null, as a vendor directory is used 
 in the project. This is created with `go mod vendor`.
 
+#### JavaScript
 
+##### generate
+
+Bob can use **ESBuild** to transform JavaScript code directly from an HTML file. 
+For this, the relevant `<script>` tags must include specific attributes. 
+
+ESBuild is licensed under the (https://github.com/evanw/esbuild?tab=MIT-1-ov-file#readme)[MIT license].
+
+Here’s an example:
+
+```html
+<script 
+    data-bob-source="source/main.mjs" 
+    data-bob-script-dist="scripts/main.mjs" 
+    data-bob-style-dist="styles/main.css" 
+    src="/scripts/main.mjs" 
+    type="module">
+</script>
+```
+
+**Attribute Explanation:**
+
+- **`data-bob-source`**  
+  Specifies the file to be used as the source for the build process (e.g., `source/page.mjs`).
+
+- **`data-bob-target`** *(optional)*  
+  Defines the target JavaScript format. The default is `esnext`. A common alternative is `es6`.
+
+- **`data-bob-script-dist`**  
+  Specifies the path to the output JavaScript file relative to the template, e.g., `scripts/page.mjs`.
+
+- **`data-bob-style-dist`**  
+  Defines the path to the output styles file, e.g., `styles/page.css`.
+
+- **`src`**  
+  Indicates the URL where the script is served in the browser. This value is **not** used by ESBuild for the build process.
+
+### Notes:
+- These attributes help separate development and delivery paths clearly.
+- `src` is used solely for delivery and has no impact on the ESBuild process.
 
 
 ## Questions
diff --git a/examples/example5/package-lock.json b/examples/example5/package-lock.json
new file mode 100644
index 0000000..5b8ea29
--- /dev/null
+++ b/examples/example5/package-lock.json
@@ -0,0 +1,61 @@
+{
+  "name": "example5",
+  "version": "1.0.0",
+  "lockfileVersion": 3,
+  "requires": true,
+  "packages": {
+    "": {
+      "name": "example5",
+      "version": "1.0.0",
+      "license": "ISC",
+      "dependencies": {
+        "@schukai/monster": "^3.88.0"
+      }
+    },
+    "node_modules/@floating-ui/core": {
+      "version": "1.6.8",
+      "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.6.8.tgz",
+      "integrity": "sha512-7XJ9cPU+yI2QeLS+FCSlqNFZJq8arvswefkZrYI1yQBbftw6FyrZOxYSh+9S7z7TpeWlRt9zJ5IhM1WIL334jA==",
+      "license": "MIT",
+      "dependencies": {
+        "@floating-ui/utils": "^0.2.8"
+      }
+    },
+    "node_modules/@floating-ui/dom": {
+      "version": "1.6.12",
+      "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.6.12.tgz",
+      "integrity": "sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w==",
+      "license": "MIT",
+      "dependencies": {
+        "@floating-ui/core": "^1.6.0",
+        "@floating-ui/utils": "^0.2.8"
+      }
+    },
+    "node_modules/@floating-ui/utils": {
+      "version": "0.2.8",
+      "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.8.tgz",
+      "integrity": "sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig==",
+      "license": "MIT"
+    },
+    "node_modules/@popperjs/core": {
+      "version": "2.11.8",
+      "resolved": "https://registry.npmjs.org/@popperjs/core/-/core-2.11.8.tgz",
+      "integrity": "sha512-P1st0aksCrn9sGZhp8GMYwBnQsbvAWsZAX44oXNNvLHGqAOcoVxmjZiohstwQ7SqKnbR47akdNi+uleWD8+g6A==",
+      "license": "MIT",
+      "funding": {
+        "type": "opencollective",
+        "url": "https://opencollective.com/popperjs"
+      }
+    },
+    "node_modules/@schukai/monster": {
+      "version": "3.88.0",
+      "resolved": "https://registry.npmjs.org/@schukai/monster/-/monster-3.88.0.tgz",
+      "integrity": "sha512-hwV5hlpubSjxkOS3LZazS0+Up0Cp5OJ+oxpV+2iNGtXUwhiRuvtVmfNlsuN9MxXGu4pOoXzbFUk/jFmPuGOLmA==",
+      "license": "AGPL 3.0",
+      "dependencies": {
+        "@floating-ui/dom": "^1.6.12",
+        "@popperjs/core": "^2.11.8"
+      }
+    }
+  }
+}
diff --git a/examples/example5/package.json b/examples/example5/package.json
new file mode 100644
index 0000000..047a733
--- /dev/null
+++ b/examples/example5/package.json
@@ -0,0 +1,14 @@
+{
+  "name": "example5",
+  "version": "1.0.0",
+  "main": "index.js",
+  "scripts": {
+    "test": "echo \"Error: no test specified\" && exit 1"
+  },
+  "author": "",
+  "license": "ISC",
+  "description": "",
+  "dependencies": {
+    "@schukai/monster": "^3.88.0"
+  }
+}
diff --git a/examples/example5/scripts/.gitkeep b/examples/example5/scripts/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/examples/example5/source/lib.mjs b/examples/example5/source/lib.mjs
new file mode 100644
index 0000000..a5da3be
--- /dev/null
+++ b/examples/example5/source/lib.mjs
@@ -0,0 +1,4 @@
+
+
+
+export const A=1;
\ No newline at end of file
diff --git a/examples/example5/source/main.css b/examples/example5/source/main.css
new file mode 100644
index 0000000..5dbb4c5
--- /dev/null
+++ b/examples/example5/source/main.css
@@ -0,0 +1,5 @@
+
+
+div {
+    color: red;
+}
\ No newline at end of file
diff --git a/examples/example5/source/main.mjs b/examples/example5/source/main.mjs
new file mode 100644
index 0000000..176cb62
--- /dev/null
+++ b/examples/example5/source/main.mjs
@@ -0,0 +1,7 @@
+import  "@schukai/monster/source/components/form/button.mjs";
+import {A} from "./lib.mjs";
+import "./main.css";
+
+console.log(A);
+
+
diff --git a/examples/example5/source/page.mjs b/examples/example5/source/page.mjs
new file mode 100644
index 0000000..d865ece
--- /dev/null
+++ b/examples/example5/source/page.mjs
@@ -0,0 +1,6 @@
+import {A} from "./lib.mjs";
+
+
+console.log(A);
+
+
diff --git a/examples/example5/styles/.gitkeep b/examples/example5/styles/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/examples/example5/test.html b/examples/example5/test.html
new file mode 100644
index 0000000..0f5063a
--- /dev/null
+++ b/examples/example5/test.html
@@ -0,0 +1,26 @@
+<!DOCTYPE html><html lang="en" data-attributes="lang path:lang"><head>
+
+    <meta charset="utf-8"/>
+
+    <link rel="apple-touch-icon" href="/apple-touch-icon.png"/>
+
+    <script data-bob-source="source/main.mjs" data-bob-script-dist="scripts/main.mjs" data-bob-style-dist="styles/main.css" src="/scripts/main.mjs" type="module"></script>
+    <script data-bob-source="source/page.mjs" data-bob-target=es6 data-bob-script-dist="scripts/page.mjs" data-bob-style-dist="styles/page.css" src="/scripts/page.mjs" type="module"></script>
+
+    <script type="application/json" data-monster-role="translations" data-bob-reference="the-translation" data-replace="path:translations.the-translation.content">
+      {
+        "key5": "translation4"
+      }
+    </script>
+  </head>
+
+  <body>
+    <header>
+      <div class="gradient"></div>
+    </header>
+
+
+  <monster-button>test</monster-button>
+  
+
+</body></html>
\ No newline at end of file
diff --git a/flake.lock b/flake.lock
index ec97ba0..c89e91a 100644
--- a/flake.lock
+++ b/flake.lock
@@ -2,11 +2,11 @@
   "nodes": {
     "nixpkgs": {
       "locked": {
-        "lastModified": 1730883749,
-        "narHash": "sha256-mwrFF0vElHJP8X3pFCByJR365Q2463ATp2qGIrDUdlE=",
+        "lastModified": 1731797254,
+        "narHash": "sha256-df3dJApLPhd11AlueuoN0Q4fHo/hagP75LlM5K1sz9g=",
         "owner": "NixOS",
         "repo": "nixpkgs",
-        "rev": "dba414932936fde69f0606b4f1d87c5bc0003ede",
+        "rev": "e8c38b73aeb218e27163376a2d617e61a2ad9b59",
         "type": "github"
       },
       "original": {
diff --git a/source/.idea/.gitignore b/source/.idea/.gitignore
new file mode 100644
index 0000000..13566b8
--- /dev/null
+++ b/source/.idea/.gitignore
@@ -0,0 +1,8 @@
+# Default ignored files
+/shelf/
+/workspace.xml
+# Editor-based HTTP Client requests
+/httpRequests/
+# Datasource local storage ignored files
+/dataSources/
+/dataSources.local.xml
diff --git a/source/.idea/codeStyles/codeStyleConfig.xml b/source/.idea/codeStyles/codeStyleConfig.xml
new file mode 100644
index 0000000..a55e7a1
--- /dev/null
+++ b/source/.idea/codeStyles/codeStyleConfig.xml
@@ -0,0 +1,5 @@
+<component name="ProjectCodeStyleConfiguration">
+  <state>
+    <option name="PREFERRED_PROJECT_CODE_STYLE" value="Default" />
+  </state>
+</component>
\ No newline at end of file
diff --git a/source/.idea/misc.xml b/source/.idea/misc.xml
new file mode 100644
index 0000000..aa935ab
--- /dev/null
+++ b/source/.idea/misc.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectRootManager" version="2" languageLevel="JDK_21" default="true" project-jdk-name="21 (4)" project-jdk-type="JavaSDK">
+    <output url="file://$PROJECT_DIR$/out" />
+  </component>
+  <component name="accountSettings">
+    <option name="activeProfile" value="profile:default" />
+    <option name="activeRegion" value="eu-west-1" />
+    <option name="recentlyUsedProfiles">
+      <list>
+        <option value="profile:default" />
+      </list>
+    </option>
+    <option name="recentlyUsedRegions">
+      <list>
+        <option value="eu-west-1" />
+      </list>
+    </option>
+  </component>
+</project>
\ No newline at end of file
diff --git a/source/.idea/modules.xml b/source/.idea/modules.xml
new file mode 100644
index 0000000..66f3350
--- /dev/null
+++ b/source/.idea/modules.xml
@@ -0,0 +1,8 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="ProjectModuleManager">
+    <modules>
+      <module fileurl="file://$PROJECT_DIR$/.idea/source.iml" filepath="$PROJECT_DIR$/.idea/source.iml" />
+    </modules>
+  </component>
+</project>
\ No newline at end of file
diff --git a/source/.idea/source.iml b/source/.idea/source.iml
new file mode 100644
index 0000000..25ed3f6
--- /dev/null
+++ b/source/.idea/source.iml
@@ -0,0 +1,10 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<module type="JAVA_MODULE" version="4">
+  <component name="Go" enabled="true" />
+  <component name="NewModuleRootManager" inherit-compiler-output="true">
+    <exclude-output />
+    <content url="file://$MODULE_DIR$" />
+    <orderEntry type="inheritedJdk" />
+    <orderEntry type="sourceFolder" forTests="false" />
+  </component>
+</module>
\ No newline at end of file
diff --git a/source/.idea/vcs.xml b/source/.idea/vcs.xml
new file mode 100644
index 0000000..6c0b863
--- /dev/null
+++ b/source/.idea/vcs.xml
@@ -0,0 +1,6 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<project version="4">
+  <component name="VcsDirectoryMappings">
+    <mapping directory="$PROJECT_DIR$/.." vcs="Git" />
+  </component>
+</project>
\ No newline at end of file
diff --git a/source/command.go b/source/command.go
index 901faab..20bd3ea 100644
--- a/source/command.go
+++ b/source/command.go
@@ -4,15 +4,17 @@ import (
 	"fmt"
 	"github.com/charmbracelet/log"
 	html2 "gitlab.schukai.com/oss/bob/html"
+	"gitlab.schukai.com/oss/bob/javascript"
 	"gitlab.schukai.com/oss/bob/release"
 	"gitlab.schukai.com/oss/bob/style"
 	template2 "gitlab.schukai.com/oss/bob/template"
 	"gitlab.schukai.com/oss/bob/types"
-	"gitlab.schukai.com/oss/libraries/go/application/xflags"
+	xflags "gitlab.schukai.com/oss/libraries/go/application/xflags.git"
 	"gopkg.in/yaml.v3"
 	"os"
 	"path"
 	"path/filepath"
+	"strings"
 )
 
 type Definition struct {
@@ -25,6 +27,12 @@ type Definition struct {
 			DataFile string `short:"d" long:"data-file" description:"Name of the main data file" default:"data.yaml"`
 		} `command:"prepare" description:"Prepare content from a file" call:"PrepareTemplate"`
 	} `command:"template" description:"Template commands"`
+	Javascript struct {
+		Generate struct {
+			Input       string `short:"i" long:"input" description:"Directory with prepared html files" required:"true"`
+			Development bool   `short:"d" long:"development" description:"Development mode" default:"false"`
+		} `command:"generate" description:"Generate javascript files from a file" call:"GenerateJavascript"`
+	} `command:"javascript" description:"Javascript related commands"`
 	HTML struct {
 		Generate struct {
 			Input     string `short:"i" long:"input" description:"Directory with prepared html files" required:"true"`
@@ -67,6 +75,44 @@ func (d *Definition) CutHTML(s *xflags.Settings[Definition]) {
 	}
 }
 
+func (d *Definition) GenerateJavascript(s *xflags.Settings[Definition]) {
+
+	skipDirs := map[string]bool{
+		"node_modules": true,
+	}
+
+	err := filepath.Walk(d.Javascript.Generate.Input, func(p string, info os.FileInfo, err error) error {
+
+		if err != nil {
+			return err
+		}
+
+		for _, part := range strings.Split(p, string(filepath.Separator)) {
+			if skipDirs[part] {
+				return filepath.SkipDir
+			}
+		}
+
+		if info.IsDir() {
+			return nil
+		}
+
+		ext := filepath.Ext(p)
+		if ext != ".html" {
+			return nil
+		}
+
+		log.Info("Generate " + p)
+
+		return javascript.ParseHTMLFile(p, d.Javascript.Generate.Development)
+	})
+
+	if err != nil {
+		s.AddError(err)
+	}
+
+}
+
 func (d *Definition) SyncHTML(s *xflags.Settings[Definition]) {
 
 	err := html2.SyncHtml(d.HTML.Sync.Specification)
diff --git a/source/go.mod b/source/go.mod
index 1492ff2..dabdb4b 100644
--- a/source/go.mod
+++ b/source/go.mod
@@ -7,12 +7,22 @@ toolchain go1.22.4
 require (
 	github.com/andybalholm/cascadia v1.3.2
 	github.com/charmbracelet/log v0.4.0
+	github.com/evanw/esbuild v0.24.0
 	github.com/tdewolff/parse/v2 v2.7.19
+	gitlab.schukai.com/oss/libraries/go/application/configuration.git v1.22.9
+	gitlab.schukai.com/oss/libraries/go/application/xflags.git v1.16.5
+	gitlab.schukai.com/oss/libraries/go/markup/html.git v0.4.7
+	gitlab.schukai.com/oss/libraries/go/services/job-queues.git v1.20.2
+	gitlab.schukai.com/oss/libraries/go/utilities/pathfinder.git v0.9.5
+	gitlab.schukai.com/oss/libraries/go/utilities/watch.git v0.4.2
+	golang.org/x/crypto v0.29.0
+	golang.org/x/net v0.31.0
+	gopkg.in/yaml.v3 v3.0.1
+)
+
+require (
 	gitlab.schukai.com/oss/libraries/go/application/xflags v1.16.3
 	gitlab.schukai.com/oss/libraries/go/markup/html v0.4.6
-	golang.org/x/crypto v0.28.0
-	golang.org/x/net v0.30.0
-	gopkg.in/yaml.v3 v3.0.1
 )
 
 require (
@@ -29,8 +39,7 @@ require (
 	github.com/volker-schukai/tokenizer v1.0.0 // indirect
 	gitlab.schukai.com/oss/libraries/go/utilities/data.git v0.2.2 // indirect
 	gitlab.schukai.com/oss/libraries/go/utilities/pathfinder v0.9.4 // indirect
-	gitlab.schukai.com/oss/libraries/go/utilities/pathfinder.git v0.9.5 // indirect
-	golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect
-	golang.org/x/sys v0.26.0 // indirect
+	golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect
+	golang.org/x/sys v0.27.0 // indirect
 	gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
 )
diff --git a/source/go.sum b/source/go.sum
index c1fc4fb..f936d7d 100644
--- a/source/go.sum
+++ b/source/go.sum
@@ -12,6 +12,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
 github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
 github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/evanw/esbuild v0.24.0 h1:GZ78naTLp7FKr+K7eNuM/SLs5maeiHYRPsTg6kmdsSE=
+github.com/evanw/esbuild v0.24.0/go.mod h1:D2vIQZqV/vIf/VRHtViaUtViZmG7o+kKmlBfVQuRi48=
 github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4=
 github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
 github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
@@ -50,22 +52,33 @@ github.com/tdewolff/test v1.0.11-0.20231101010635-f1265d231d52/go.mod h1:6DAvZli
 github.com/volker-schukai/tokenizer v1.0.0 h1:wF4haFoCodq7lgAk8c+th/DZmpFpL2WVD8wDzAGU1mA=
 github.com/volker-schukai/tokenizer v1.0.0/go.mod h1:LPw7lLIxUnZgeg96818N7IvwLE1x8ya31J/Aa0aCq9M=
 github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
+gitlab.schukai.com/oss/libraries/go/application/configuration.git v1.22.9/go.mod h1:pZ+8vTAodhn3BfWMXMe18q3Ys0Dc9v3MPHY4jY4vClk=
 gitlab.schukai.com/oss/libraries/go/application/xflags v1.16.3 h1:IWRCQOsZZPkoh/vIzjsF8BnqT4VVbOlCtfeuaYV5qEQ=
 gitlab.schukai.com/oss/libraries/go/application/xflags v1.16.3/go.mod h1:e+uFr/73kXoSozlAewBBqKsAUCIichlcvNDyj/0fj9Q=
+gitlab.schukai.com/oss/libraries/go/application/xflags v1.16.5 h1:lghTHrRwlF7YSXkG/KRlJvnZuK3Z8HeM7aEiEbFXIEQ=
+gitlab.schukai.com/oss/libraries/go/application/xflags v1.16.5/go.mod h1:6477TWP2W0vWWJ6Ctu/Mdh5SsjKUVVPFDgFfM4YdtcU=
+gitlab.schukai.com/oss/libraries/go/application/xflags.git v1.16.5 h1:YD1skbZRu3iUHafIhGIEcZqPHJXA65Zj0AcSgsTQJAk=
+gitlab.schukai.com/oss/libraries/go/application/xflags.git v1.16.5/go.mod h1:6477TWP2W0vWWJ6Ctu/Mdh5SsjKUVVPFDgFfM4YdtcU=
 gitlab.schukai.com/oss/libraries/go/markup/html v0.4.6 h1:eMold9Nl6ZkygVF1K1lTA3ROGz/mlEIcPt9aUUJC33c=
 gitlab.schukai.com/oss/libraries/go/markup/html v0.4.6/go.mod h1:FAzz3QWPCqQG54ou0zLnF6j3/ZQgGSTGsTHLShc3UFU=
+gitlab.schukai.com/oss/libraries/go/markup/html.git v0.4.7 h1:LxNtSNRXV9Ay3rKDH/KaVLORd0IpUClbfHo9tXG3Y4Q=
+gitlab.schukai.com/oss/libraries/go/markup/html.git v0.4.7/go.mod h1:vlqd5glmCGm0rdM/QXWq/ifGdIM/wq7ynSzHFnl4LzU=
+gitlab.schukai.com/oss/libraries/go/services/job-queues.git v1.20.2/go.mod h1:SZmOS4cms25c9UIOq9S2qTKHrPLmKYk0GqqtVCt4dxk=
+gitlab.schukai.com/oss/libraries/go/utilities/data.git v0.2.0 h1:JVxMHiA8zFVjJDhNl65XeYrhdMkzB+5dyrBUEZ982WU=
+gitlab.schukai.com/oss/libraries/go/utilities/data.git v0.2.0/go.mod h1:BsR4Y9jsvISplkW6UoLFRGxQX69/AUmP1SXRwWhx31o=
 gitlab.schukai.com/oss/libraries/go/utilities/data.git v0.2.2 h1:jRlVTikl73AL1y9OfYxdZ4OYG8Hkbl/8ezbwd9r5l44=
 gitlab.schukai.com/oss/libraries/go/utilities/data.git v0.2.2/go.mod h1:Vl5kzzMjpy1LGe+RUi2pTnZvZFP53Th4JChP9dbkOVk=
 gitlab.schukai.com/oss/libraries/go/utilities/pathfinder v0.9.4 h1:/+fgcPeXqz5tRrT+EZXA2vGV+OWV9R+5hEBpRJUpp80=
 gitlab.schukai.com/oss/libraries/go/utilities/pathfinder v0.9.4/go.mod h1:36psT3WHelpcXWXVp8D33IXvUIpaAXEtrQYYOODUbjE=
 gitlab.schukai.com/oss/libraries/go/utilities/pathfinder.git v0.9.5 h1:PiDmw3O3UDubKILC+t7fKs+m9670a+b8SkrHq5Rkk9M=
 gitlab.schukai.com/oss/libraries/go/utilities/pathfinder.git v0.9.5/go.mod h1:HwXjaFUAxLv+qTN63xrPBO5DEVGuZNF859t29bhTsFQ=
+gitlab.schukai.com/oss/libraries/go/utilities/watch.git v0.4.2/go.mod h1:7Rv8r5eVw5FDRp/K3KlDmjNMrdj7JA7eC3o6s7JBcHU=
 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
 golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
-golang.org/x/crypto v0.28.0 h1:GBDwsMXVQi34v5CCYUm2jkJvu4cbtru2U4TN2PSyQnw=
-golang.org/x/crypto v0.28.0/go.mod h1:rmgy+3RHxRZMyY0jjAJShp2zgEdOqj2AO7U0pYmeQ7U=
-golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c h1:7dEasQXItcW1xKJ2+gg5VOiBnqWrJc+rq0DPKyvvdbY=
-golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:NQtJDoLvd6faHhE7m4T/1IY708gDefGGjR/iUW8yQQ8=
+golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
+golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
+golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo=
+golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak=
 golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
 golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
 golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -73,8 +86,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
 golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
 golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
 golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
-golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
-golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
+golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
+golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
 golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -82,12 +95,13 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
 golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
-golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
+golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
diff --git a/source/html/cut.go b/source/html/cut.go
index 00f9b8d..35d0344 100644
--- a/source/html/cut.go
+++ b/source/html/cut.go
@@ -3,7 +3,7 @@ package html
 import (
 	"github.com/andybalholm/cascadia"
 	"gitlab.schukai.com/oss/bob/types"
-	"gitlab.schukai.com/oss/libraries/go/markup/html/engine"
+	engine "gitlab.schukai.com/oss/libraries/go/markup/html.git/engine"
 	"golang.org/x/net/html"
 	"gopkg.in/yaml.v3"
 	"os"
diff --git a/source/html/generate.go b/source/html/generate.go
index 394c4cd..566f1da 100644
--- a/source/html/generate.go
+++ b/source/html/generate.go
@@ -5,7 +5,7 @@ import (
 	"github.com/andybalholm/cascadia"
 	"github.com/charmbracelet/log"
 	"gitlab.schukai.com/oss/bob/types"
-	"gitlab.schukai.com/oss/libraries/go/markup/html/engine"
+	"gitlab.schukai.com/oss/libraries/go/markup/html.git/engine"
 	"golang.org/x/net/html"
 	"gopkg.in/yaml.v3"
 	"os"
diff --git a/source/html/sync.go b/source/html/sync.go
index b2f619b..745a59a 100644
--- a/source/html/sync.go
+++ b/source/html/sync.go
@@ -4,7 +4,7 @@ import (
 	"fmt"
 	"github.com/andybalholm/cascadia"
 	"gitlab.schukai.com/oss/bob/types"
-	"gitlab.schukai.com/oss/libraries/go/markup/html/engine"
+	engine "gitlab.schukai.com/oss/libraries/go/markup/html.git/engine"
 	"golang.org/x/net/html"
 	"gopkg.in/yaml.v3"
 	"os"
diff --git a/source/javascript/generate.go b/source/javascript/generate.go
new file mode 100644
index 0000000..4a9e720
--- /dev/null
+++ b/source/javascript/generate.go
@@ -0,0 +1,251 @@
+package javascript
+
+import (
+	"github.com/charmbracelet/log"
+	"golang.org/x/net/html"
+	"os"
+	"path"
+	"strings"
+
+	"github.com/evanw/esbuild/pkg/api"
+)
+
+func ParseHTMLFile(p string, development bool) error {
+	data, err := os.ReadFile(p)
+	if err != nil {
+		return err
+	}
+
+	doc, err := html.Parse(strings.NewReader(string(data)))
+	if err != nil {
+		return err
+	}
+
+	var f func(*html.Node)
+	f = func(n *html.Node) {
+
+		var src, source, scriptDist, styleDist, target string
+
+		if n.Type == html.ElementNode && n.Data == "script" {
+			for _, attr := range n.Attr {
+				if attr.Key == "data-bob-source" {
+					source = attr.Val
+				} else if attr.Key == "data-bob-script-dist" {
+					scriptDist = attr.Val
+				} else if attr.Key == "data-bob-style-dist" {
+					styleDist = attr.Val
+				} else if attr.Key == "data-bob-target" {
+					target = attr.Val
+				} else if attr.Key == "src" {
+					src = attr.Val
+				}
+			}
+
+			if src != "" {
+
+				if !path.IsAbs(source) {
+					source = path.Dir(p) + "/" + source
+
+					if _, err := os.Stat(source); os.IsNotExist(err) {
+						log.Error("File does not exist: " + source)
+						return
+					}
+
+				}
+
+				if !path.IsAbs(scriptDist) {
+					scriptDist = path.Dir(p) + "/" + scriptDist
+				}
+
+				if !path.IsAbs(styleDist) {
+					styleDist = path.Dir(p) + "/" + styleDist
+				}
+
+				log.Info("Script: " + src + " " + source + " " + scriptDist + " " + styleDist)
+
+				runESBuild(source, path.Dir(p), scriptDist, styleDist, development, target)
+
+			}
+
+		}
+		for c := n.FirstChild; c != nil; c = c.NextSibling {
+			f(c)
+		}
+	}
+
+	f(doc)
+
+	return nil
+}
+
+func runESBuild(source, dist, scriptDist, styleDist string, development bool, target string) {
+
+	// get temporary directory
+	//tempDir, err := os.MkdirTemp("", "build-bob")
+	//if err != nil {
+	//	fmt.Println("Fehler beim Erstellen des temporären Verzeichnisses:", err)
+	//	return
+	//}
+	//
+	//defer func() {
+	//	err := os.RemoveAll(tempDir)
+	//	if err != nil {
+	//		fmt.Println("Fehler beim Löschen des temporären Verzeichnisses:", err)
+	//	}
+	//
+	//}()
+
+	var treeShaking = api.TreeShakingTrue
+	if development {
+		treeShaking = api.TreeShakingFalse
+	}
+
+	keepNames := development
+	esbuildTarget := api.ESNext
+	if target != "" {
+		switch {
+		case target == "es5" || target == "es2015":
+			esbuildTarget = api.ES5
+			keepNames = false
+		case target == "es6" || target == "es2016":
+			esbuildTarget = api.ES2016
+		case target == "es7" || target == "es2017":
+			esbuildTarget = api.ES2017
+		case target == "es8" || target == "es2018":
+			esbuildTarget = api.ES2018
+		case target == "es9" || target == "es2019":
+			esbuildTarget = api.ES2019
+		case target == "es10" || target == "es2020":
+			esbuildTarget = api.ES2020
+		case target == "es11" || target == "es2021":
+			esbuildTarget = api.ES2021
+		case target == "es12" || target == "es2022":
+			esbuildTarget = api.ES2022
+		case target == "es13" || target == "es2023":
+			esbuildTarget = api.ES2023
+		case target == "es14" || target == "es2024":
+			esbuildTarget = api.ES2024
+		default:
+			log.Error("Unknown target: " + target + ". Using ESNext")
+
+		}
+	}
+
+	footer := map[string]string{
+		"js": `
+/*
+ * Copyright protects this code. Use, reproduction, or
+ * modification of this code without prior written permission from the copyright holder
+ * is strictly prohibited. For inquiries regarding licenses or usage rights,
+ * please contact schukai GmbH.
+ */`,
+		"css": `
+/*
+ * Copyright protects this code. Use, reproduction, or
+ * modification of this code without prior written permission from the copyright holder
+ * is strictly prohibited. For inquiries regarding licenses or usage rights,
+ * please contact schukai GmbH.
+ */
+`,
+	}
+
+	var sourceMap = api.SourceMapNone
+	if development {
+		sourceMap = api.SourceMapInline
+	}
+
+	result := api.Build(api.BuildOptions{
+		EntryPoints: []string{source},
+		Outfile:     path.Join(path.Base(scriptDist)),
+		Bundle:      true,
+		Write:       false,
+		LogLevel:    api.LogLevelInfo,
+		Target:      esbuildTarget,
+
+		MinifySyntax:      !development,
+		MinifyWhitespace:  !development,
+		MinifyIdentifiers: !development,
+		TreeShaking:       treeShaking,
+		KeepNames:         keepNames,
+
+		Sourcemap:     sourceMap,
+		LegalComments: api.LegalCommentsExternal,
+
+		Footer: footer,
+	})
+
+	if len(result.Errors) > 0 {
+		for _, err := range result.Errors {
+			log.Error(err.Text)
+		}
+	}
+
+	for _, warning := range result.Warnings {
+		log.Warn(warning.Text)
+	}
+
+	for _, file := range result.OutputFiles {
+		switch path.Ext(file.Path) {
+		case ".mjs":
+			err := os.WriteFile(scriptDist, file.Contents, os.ModePerm)
+			if err != nil {
+				log.Error(err.Error())
+			} else {
+				log.Info("Saved " + scriptDist)
+			}
+
+		case ".js":
+			err := os.WriteFile(scriptDist, file.Contents, os.ModePerm)
+			if err != nil {
+				log.Error(err.Error())
+			} else {
+				log.Info("Saved " + scriptDist)
+			}
+
+		case ".css":
+			err := os.WriteFile(styleDist, file.Contents, os.ModePerm)
+			if err != nil {
+				log.Error(err.Error())
+			} else {
+				log.Info("Saved " + styleDist)
+			}
+
+		case ".txt":
+
+			content := file.Contents
+			if strings.TrimSpace(string(content)) == "" {
+				continue
+			}
+
+			if strings.Contains(file.Path, "LEGAL") {
+
+				if development {
+					log.Info("Legal information not saved in development mode")
+					continue
+				}
+
+				if strings.Contains(file.Path, "js") {
+					out := path.Join(path.Dir(scriptDist), path.Base(file.Path))
+					err := os.WriteFile(out, file.Contents, os.ModePerm)
+					if err != nil {
+						log.Error(err.Error())
+					} else {
+						log.Info("Saved " + out)
+					}
+				} else {
+					out := path.Join(path.Dir(styleDist), path.Base(file.Path))
+					err := os.WriteFile(out, file.Contents, os.ModePerm)
+					if err != nil {
+						log.Error(err.Error())
+					} else {
+						log.Info("Saved " + out)
+					}
+				}
+
+			}
+
+		}
+
+	}
+
+}
diff --git a/source/main.go b/source/main.go
index 06be5eb..c9e634b 100644
--- a/source/main.go
+++ b/source/main.go
@@ -2,7 +2,8 @@ package main
 
 import (
 	"fmt"
-	"gitlab.schukai.com/oss/libraries/go/application/xflags"
+	xflags "gitlab.schukai.com/oss/libraries/go/application/xflags.git"
+
 	"os"
 )
 
diff --git a/source/vendor/github.com/evanw/esbuild/LICENSE.md b/source/vendor/github.com/evanw/esbuild/LICENSE.md
new file mode 100644
index 0000000..2027e8d
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/LICENSE.md
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2020 Evan Wallace
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/source/vendor/github.com/evanw/esbuild/internal/api_helpers/use_timer.go b/source/vendor/github.com/evanw/esbuild/internal/api_helpers/use_timer.go
new file mode 100644
index 0000000..3b36fe2
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/api_helpers/use_timer.go
@@ -0,0 +1,7 @@
+package api_helpers
+
+// This flag is set by the CLI to activate the timer. It's put here instead of
+// by the timer to discourage code from checking this flag. Only the code that
+// creates the root timer should check this flag. Other code should check that
+// the timer is not null to detect if the timer is being used or not.
+var UseTimer bool
diff --git a/source/vendor/github.com/evanw/esbuild/internal/ast/ast.go b/source/vendor/github.com/evanw/esbuild/internal/ast/ast.go
new file mode 100644
index 0000000..67d2e5b
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/ast/ast.go
@@ -0,0 +1,812 @@
+package ast
+
+// This file contains data structures that are used with the AST packages for
+// both JavaScript and CSS. This helps the bundler treat both AST formats in
+// a somewhat format-agnostic manner.
+
+import (
+	"sort"
+
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type ImportKind uint8
+
+const (
+	// An entry point provided by the user
+	ImportEntryPoint ImportKind = iota
+
+	// An ES6 import or re-export statement
+	ImportStmt
+
+	// A call to "require()"
+	ImportRequire
+
+	// An "import()" expression with a string argument
+	ImportDynamic
+
+	// A call to "require.resolve()"
+	ImportRequireResolve
+
+	// A CSS "@import" rule
+	ImportAt
+
+	// A CSS "composes" declaration
+	ImportComposesFrom
+
+	// A CSS "url(...)" token
+	ImportURL
+)
+
+func (kind ImportKind) StringForMetafile() string {
+	switch kind {
+	case ImportStmt:
+		return "import-statement"
+	case ImportRequire:
+		return "require-call"
+	case ImportDynamic:
+		return "dynamic-import"
+	case ImportRequireResolve:
+		return "require-resolve"
+	case ImportAt:
+		return "import-rule"
+	case ImportComposesFrom:
+		return "composes-from"
+	case ImportURL:
+		return "url-token"
+	case ImportEntryPoint:
+		return "entry-point"
+	default:
+		panic("Internal error")
+	}
+}
+
+func (kind ImportKind) IsFromCSS() bool {
+	switch kind {
+	case ImportAt, ImportComposesFrom, ImportURL:
+		return true
+	}
+	return false
+}
+
+func (kind ImportKind) MustResolveToCSS() bool {
+	switch kind {
+	case ImportAt, ImportComposesFrom:
+		return true
+	}
+	return false
+}
+
+type ImportRecordFlags uint16
+
+const (
+	// Sometimes the parser creates an import record and decides it isn't needed.
+	// For example, TypeScript code may have import statements that later turn
+	// out to be type-only imports after analyzing the whole file.
+	IsUnused ImportRecordFlags = 1 << iota
+
+	// If this is true, the import contains syntax like "* as ns". This is used
+	// to determine whether modules that have no exports need to be wrapped in a
+	// CommonJS wrapper or not.
+	ContainsImportStar
+
+	// If this is true, the import contains an import for the alias "default",
+	// either via the "import x from" or "import {default as x} from" syntax.
+	ContainsDefaultAlias
+
+	// If this is true, the import contains an import for the alias "__esModule",
+	// via the "import {__esModule} from" syntax.
+	ContainsESModuleAlias
+
+	// If true, this "export * from 'path'" statement is evaluated at run-time by
+	// calling the "__reExport()" helper function
+	CallsRunTimeReExportFn
+
+	// Tell the printer to wrap this call to "require()" in "__toESM(...)"
+	WrapWithToESM
+
+	// Tell the printer to wrap this ESM exports object in "__toCJS(...)"
+	WrapWithToCJS
+
+	// Tell the printer to use the runtime "__require()" instead of "require()"
+	CallRuntimeRequire
+
+	// True for the following cases:
+	//
+	//   try { require('x') } catch { handle }
+	//   try { await import('x') } catch { handle }
+	//   try { require.resolve('x') } catch { handle }
+	//   import('x').catch(handle)
+	//   import('x').then(_, handle)
+	//
+	// In these cases we shouldn't generate an error if the path could not be
+	// resolved.
+	HandlesImportErrors
+
+	// If true, this was originally written as a bare "import 'file'" statement
+	WasOriginallyBareImport
+
+	// If true, this import can be removed if it's unused
+	IsExternalWithoutSideEffects
+
+	// If true, "assert { type: 'json' }" was present
+	AssertTypeJSON
+
+	// If true, do not generate "external": true in the metafile
+	ShouldNotBeExternalInMetafile
+
+	// CSS "@import" of an empty file should be removed
+	WasLoadedWithEmptyLoader
+
+	// Unique keys are randomly-generated strings that are used to replace paths
+	// in the source code after it's printed. These must not ever be split apart.
+	ContainsUniqueKey
+)
+
+func (flags ImportRecordFlags) Has(flag ImportRecordFlags) bool {
+	return (flags & flag) != 0
+}
+
+type ImportRecord struct {
+	AssertOrWith *ImportAssertOrWith
+	GlobPattern  *GlobPattern
+	Path         logger.Path
+	Range        logger.Range
+
+	// If the "HandlesImportErrors" flag is present, then this is the location
+	// of the error handler. This is used for error reporting.
+	ErrorHandlerLoc logger.Loc
+
+	// The resolved source index for an internal import (within the bundle) or
+	// invalid for an external import (not included in the bundle)
+	SourceIndex Index32
+
+	// Files imported via the "copy" loader use this instead of "SourceIndex"
+	// because they are sort of like external imports, and are not bundled.
+	CopySourceIndex Index32
+
+	Flags ImportRecordFlags
+	Kind  ImportKind
+}
+
+type AssertOrWithKeyword uint8
+
+const (
+	AssertKeyword AssertOrWithKeyword = iota
+	WithKeyword
+)
+
+func (kw AssertOrWithKeyword) String() string {
+	if kw == AssertKeyword {
+		return "assert"
+	}
+	return "with"
+}
+
+type ImportAssertOrWith struct {
+	Entries            []AssertOrWithEntry
+	KeywordLoc         logger.Loc
+	InnerOpenBraceLoc  logger.Loc
+	InnerCloseBraceLoc logger.Loc
+	OuterOpenBraceLoc  logger.Loc
+	OuterCloseBraceLoc logger.Loc
+	Keyword            AssertOrWithKeyword
+}
+
+type AssertOrWithEntry struct {
+	Key             []uint16 // An identifier or a string
+	Value           []uint16 // Always a string
+	KeyLoc          logger.Loc
+	ValueLoc        logger.Loc
+	PreferQuotedKey bool
+}
+
+func FindAssertOrWithEntry(assertions []AssertOrWithEntry, name string) *AssertOrWithEntry {
+	for _, assertion := range assertions {
+		if helpers.UTF16EqualsString(assertion.Key, name) {
+			return &assertion
+		}
+	}
+	return nil
+}
+
+type GlobPattern struct {
+	Parts       []helpers.GlobPart
+	ExportAlias string
+	Kind        ImportKind
+}
+
+// This stores a 32-bit index where the zero value is an invalid index. This is
+// a better alternative to storing the index as a pointer since that has the
+// same properties but takes up more space and costs an extra pointer traversal.
+type Index32 struct {
+	flippedBits uint32
+}
+
+func MakeIndex32(index uint32) Index32 {
+	return Index32{flippedBits: ^index}
+}
+
+func (i Index32) IsValid() bool {
+	return i.flippedBits != 0
+}
+
+func (i Index32) GetIndex() uint32 {
+	return ^i.flippedBits
+}
+
+type SymbolKind uint8
+
+const (
+	// An unbound symbol is one that isn't declared in the file it's referenced
+	// in. For example, using "window" without declaring it will be unbound.
+	SymbolUnbound SymbolKind = iota
+
+	// This has special merging behavior. You're allowed to re-declare these
+	// symbols more than once in the same scope. These symbols are also hoisted
+	// out of the scope they are declared in to the closest containing function
+	// or module scope. These are the symbols with this kind:
+	//
+	// - Function arguments
+	// - Function statements
+	// - Variables declared using "var"
+	//
+	SymbolHoisted
+	SymbolHoistedFunction
+
+	// There's a weird special case where catch variables declared using a simple
+	// identifier (i.e. not a binding pattern) block hoisted variables instead of
+	// becoming an error:
+	//
+	//   var e = 0;
+	//   try { throw 1 } catch (e) {
+	//     print(e) // 1
+	//     var e = 2
+	//     print(e) // 2
+	//   }
+	//   print(e) // 0 (since the hoisting stops at the catch block boundary)
+	//
+	// However, other forms are still a syntax error:
+	//
+	//   try {} catch (e) { let e }
+	//   try {} catch ({e}) { var e }
+	//
+	// This symbol is for handling this weird special case.
+	SymbolCatchIdentifier
+
+	// Generator and async functions are not hoisted, but still have special
+	// properties such as being able to overwrite previous functions with the
+	// same name
+	SymbolGeneratorOrAsyncFunction
+
+	// This is the special "arguments" variable inside functions
+	SymbolArguments
+
+	// Classes can merge with TypeScript namespaces.
+	SymbolClass
+
+	// Class names are not allowed to be referenced by computed property keys
+	SymbolClassInComputedPropertyKey
+
+	// A class-private identifier (i.e. "#foo").
+	SymbolPrivateField
+	SymbolPrivateMethod
+	SymbolPrivateGet
+	SymbolPrivateSet
+	SymbolPrivateGetSetPair
+	SymbolPrivateStaticField
+	SymbolPrivateStaticMethod
+	SymbolPrivateStaticGet
+	SymbolPrivateStaticSet
+	SymbolPrivateStaticGetSetPair
+
+	// Labels are in their own namespace
+	SymbolLabel
+
+	// TypeScript enums can merge with TypeScript namespaces and other TypeScript
+	// enums.
+	SymbolTSEnum
+
+	// TypeScript namespaces can merge with classes, functions, TypeScript enums,
+	// and other TypeScript namespaces.
+	SymbolTSNamespace
+
+	// In TypeScript, imports are allowed to silently collide with symbols within
+	// the module. Presumably this is because the imports may be type-only.
+	SymbolImport
+
+	// Assigning to a "const" symbol will throw a TypeError at runtime
+	SymbolConst
+
+	// Injected symbols can be overridden by provided defines
+	SymbolInjected
+
+	// Properties can optionally be renamed to shorter names
+	SymbolMangledProp
+
+	// CSS identifiers that are never renamed
+	SymbolGlobalCSS
+
+	// CSS identifiers that are renamed to be unique to the file they are in
+	SymbolLocalCSS
+
+	// This annotates all other symbols that don't have special behavior
+	SymbolOther
+)
+
+func (kind SymbolKind) IsPrivate() bool {
+	return kind >= SymbolPrivateField && kind <= SymbolPrivateStaticGetSetPair
+}
+
+func (kind SymbolKind) IsHoisted() bool {
+	return kind == SymbolHoisted || kind == SymbolHoistedFunction
+}
+
+func (kind SymbolKind) IsHoistedOrFunction() bool {
+	return kind.IsHoisted() || kind == SymbolGeneratorOrAsyncFunction
+}
+
+func (kind SymbolKind) IsFunction() bool {
+	return kind == SymbolHoistedFunction || kind == SymbolGeneratorOrAsyncFunction
+}
+
+func (kind SymbolKind) IsUnboundOrInjected() bool {
+	return kind == SymbolUnbound || kind == SymbolInjected
+}
+
+var InvalidRef Ref = Ref{^uint32(0), ^uint32(0)}
+
+// Files are parsed in parallel for speed. We want to allow each parser to
+// generate symbol IDs that won't conflict with each other. We also want to be
+// able to quickly merge symbol tables from all files into one giant symbol
+// table.
+//
+// We can accomplish both goals by giving each symbol ID two parts: a source
+// index that is unique to the parser goroutine, and an inner index that
+// increments as the parser generates new symbol IDs. Then a symbol map can
+// be an array of arrays indexed first by source index, then by inner index.
+// The maps can be merged quickly by creating a single outer array containing
+// all inner arrays from all parsed files.
+type Ref struct {
+	SourceIndex uint32
+	InnerIndex  uint32
+}
+
+type LocRef struct {
+	Loc logger.Loc
+	Ref Ref
+}
+
+type ImportItemStatus uint8
+
+const (
+	ImportItemNone ImportItemStatus = iota
+
+	// The linker doesn't report import/export mismatch errors
+	ImportItemGenerated
+
+	// The printer will replace this import with "undefined"
+	ImportItemMissing
+)
+
+type SymbolFlags uint16
+
+const (
+	// Certain symbols must not be renamed or minified. For example, the
+	// "arguments" variable is declared by the runtime for every function.
+	// Renaming can also break any identifier used inside a "with" statement.
+	MustNotBeRenamed SymbolFlags = 1 << iota
+
+	// In React's version of JSX, lower-case names are strings while upper-case
+	// names are identifiers. If we are preserving JSX syntax (i.e. not
+	// transforming it), then we need to be careful to name the identifiers
+	// something with a capital letter so further JSX processing doesn't treat
+	// them as strings instead.
+	MustStartWithCapitalLetterForJSX
+
+	// If true, this symbol is the target of a "__name" helper function call.
+	// This call is special because it deliberately doesn't count as a use
+	// of the symbol (otherwise keeping names would disable tree shaking)
+	// so "UseCountEstimate" is not incremented. This flag helps us know to
+	// avoid optimizing this symbol when "UseCountEstimate" is 1 in this case.
+	DidKeepName
+
+	// Sometimes we lower private symbols even if they are supported. For example,
+	// consider the following TypeScript code:
+	//
+	//   class Foo {
+	//     #foo = 123
+	//     bar = this.#foo
+	//   }
+	//
+	// If "useDefineForClassFields: false" is set in "tsconfig.json", then "bar"
+	// must use assignment semantics instead of define semantics. We can compile
+	// that to this code:
+	//
+	//   class Foo {
+	//     constructor() {
+	//       this.#foo = 123;
+	//       this.bar = this.#foo;
+	//     }
+	//     #foo;
+	//   }
+	//
+	// However, we can't do the same for static fields:
+	//
+	//   class Foo {
+	//     static #foo = 123
+	//     static bar = this.#foo
+	//   }
+	//
+	// Compiling these static fields to something like this would be invalid:
+	//
+	//   class Foo {
+	//     static #foo;
+	//   }
+	//   Foo.#foo = 123;
+	//   Foo.bar = Foo.#foo;
+	//
+	// Thus "#foo" must be lowered even though it's supported. Another case is
+	// when we're converting top-level class declarations to class expressions
+	// to avoid the TDZ and the class shadowing symbol is referenced within the
+	// class body:
+	//
+	//   class Foo {
+	//     static #foo = Foo
+	//   }
+	//
+	// This cannot be converted into something like this:
+	//
+	//   var Foo = class {
+	//     static #foo;
+	//   };
+	//   Foo.#foo = Foo;
+	//
+	PrivateSymbolMustBeLowered
+
+	// This is used to remove the all but the last function re-declaration if a
+	// function is re-declared multiple times like this:
+	//
+	//   function foo() { console.log(1) }
+	//   function foo() { console.log(2) }
+	//
+	RemoveOverwrittenFunctionDeclaration
+
+	// This flag is to avoid warning about this symbol more than once. It only
+	// applies to the "module" and "exports" unbound symbols.
+	DidWarnAboutCommonJSInESM
+
+	// If this is present, the symbol could potentially be overwritten. This means
+	// it's not safe to make assumptions about this symbol from the initializer.
+	CouldPotentiallyBeMutated
+
+	// This flags all symbols that were exported from the module using the ES6
+	// "export" keyword, either directly on the declaration or using "export {}".
+	WasExported
+
+	// This means the symbol is a normal function that has no body statements.
+	IsEmptyFunction
+
+	// This means the symbol is a normal function that takes a single argument
+	// and returns that argument.
+	IsIdentityFunction
+
+	// If true, calls to this symbol can be unwrapped (i.e. removed except for
+	// argument side effects) if the result is unused.
+	CallCanBeUnwrappedIfUnused
+)
+
+func (flags SymbolFlags) Has(flag SymbolFlags) bool {
+	return (flags & flag) != 0
+}
+
+// Note: the order of values in this struct matters to reduce struct size.
+type Symbol struct {
+	// This is used for symbols that represent items in the import clause of an
+	// ES6 import statement. These should always be referenced by EImportIdentifier
+	// instead of an EIdentifier. When this is present, the expression should
+	// be printed as a property access off the namespace instead of as a bare
+	// identifier.
+	//
+	// For correctness, this must be stored on the symbol instead of indirectly
+	// associated with the Ref for the symbol somehow. In ES6 "flat bundling"
+	// mode, re-exported symbols are collapsed using MergeSymbols() and renamed
+	// symbols from other files that end up at this symbol must be able to tell
+	// if it has a namespace alias.
+	NamespaceAlias *NamespaceAlias
+
+	// This is the name that came from the parser. Printed names may be renamed
+	// during minification or to avoid name collisions. Do not use the original
+	// name during printing.
+	OriginalName string
+
+	// Used by the parser for single pass parsing. Symbols that have been merged
+	// form a linked-list where the last link is the symbol to use. This link is
+	// an invalid ref if it's the last link. If this isn't invalid, you need to
+	// FollowSymbols to get the real one.
+	Link Ref
+
+	// An estimate of the number of uses of this symbol. This is used to detect
+	// whether a symbol is used or not. For example, TypeScript imports that are
+	// unused must be removed because they are probably type-only imports. This
+	// is an estimate and may not be completely accurate due to oversights in the
+	// code. But it should always be non-zero when the symbol is used.
+	UseCountEstimate uint32
+
+	// This is for generating cross-chunk imports and exports for code splitting.
+	ChunkIndex Index32
+
+	// This is used for minification. Symbols that are declared in sibling scopes
+	// can share a name. A good heuristic (from Google Closure Compiler) is to
+	// assign names to symbols from sibling scopes in declaration order. That way
+	// local variable names are reused in each global function like this, which
+	// improves gzip compression:
+	//
+	//   function x(a, b) { ... }
+	//   function y(a, b, c) { ... }
+	//
+	// The parser fills this in for symbols inside nested scopes. There are three
+	// slot namespaces: regular symbols, label symbols, and private symbols.
+	NestedScopeSlot Index32
+
+	// Boolean values should all be flags instead to save space
+	Flags SymbolFlags
+
+	Kind SymbolKind
+
+	// We automatically generate import items for property accesses off of
+	// namespace imports. This lets us remove the expensive namespace imports
+	// while bundling in many cases, replacing them with a cheap import item
+	// instead:
+	//
+	//   import * as ns from 'path'
+	//   ns.foo()
+	//
+	// That can often be replaced by this, which avoids needing the namespace:
+	//
+	//   import {foo} from 'path'
+	//   foo()
+	//
+	// However, if the import is actually missing then we don't want to report a
+	// compile-time error like we do for real import items. This status lets us
+	// avoid this. We also need to be able to replace such import items with
+	// undefined, which this status is also used for.
+	ImportItemStatus ImportItemStatus
+}
+
+// You should call "MergeSymbols" instead of calling this directly
+func (newSymbol *Symbol) MergeContentsWith(oldSymbol *Symbol) {
+	newSymbol.UseCountEstimate += oldSymbol.UseCountEstimate
+	if oldSymbol.Flags.Has(MustNotBeRenamed) && !newSymbol.Flags.Has(MustNotBeRenamed) {
+		newSymbol.OriginalName = oldSymbol.OriginalName
+		newSymbol.Flags |= MustNotBeRenamed
+	}
+	if oldSymbol.Flags.Has(MustStartWithCapitalLetterForJSX) {
+		newSymbol.Flags |= MustStartWithCapitalLetterForJSX
+	}
+}
+
+type SlotNamespace uint8
+
+const (
+	SlotDefault SlotNamespace = iota
+	SlotLabel
+	SlotPrivateName
+	SlotMangledProp
+	SlotMustNotBeRenamed
+)
+
+func (s *Symbol) SlotNamespace() SlotNamespace {
+	if s.Kind == SymbolUnbound || s.Flags.Has(MustNotBeRenamed) {
+		return SlotMustNotBeRenamed
+	}
+	if s.Kind.IsPrivate() {
+		return SlotPrivateName
+	}
+	if s.Kind == SymbolLabel {
+		return SlotLabel
+	}
+	if s.Kind == SymbolMangledProp {
+		return SlotMangledProp
+	}
+	return SlotDefault
+}
+
+type SlotCounts [4]uint32
+
+func (a *SlotCounts) UnionMax(b SlotCounts) {
+	for i := range *a {
+		ai := &(*a)[i]
+		bi := b[i]
+		if *ai < bi {
+			*ai = bi
+		}
+	}
+}
+
+type NamespaceAlias struct {
+	Alias        string
+	NamespaceRef Ref
+}
+
+type SymbolMap struct {
+	// This could be represented as a "map[Ref]Symbol" but a two-level array was
+	// more efficient in profiles. This appears to be because it doesn't involve
+	// a hash. This representation also makes it trivial to quickly merge symbol
+	// maps from multiple files together. Each file only generates symbols in a
+	// single inner array, so you can join the maps together by just make a
+	// single outer array containing all of the inner arrays. See the comment on
+	// "Ref" for more detail.
+	SymbolsForSource [][]Symbol
+}
+
+func NewSymbolMap(sourceCount int) SymbolMap {
+	return SymbolMap{make([][]Symbol, sourceCount)}
+}
+
+func (sm SymbolMap) Get(ref Ref) *Symbol {
+	return &sm.SymbolsForSource[ref.SourceIndex][ref.InnerIndex]
+}
+
+// Returns the canonical ref that represents the ref for the provided symbol.
+// This may not be the provided ref if the symbol has been merged with another
+// symbol.
+func FollowSymbols(symbols SymbolMap, ref Ref) Ref {
+	symbol := symbols.Get(ref)
+	if symbol.Link == InvalidRef {
+		return ref
+	}
+
+	link := FollowSymbols(symbols, symbol.Link)
+
+	// Only write if needed to avoid concurrent map update hazards
+	if symbol.Link != link {
+		symbol.Link = link
+	}
+
+	return link
+}
+
+// Use this before calling "FollowSymbols" from separate threads to avoid
+// concurrent map update hazards. In Go, mutating a map is not threadsafe
+// but reading from a map is. Calling "FollowAllSymbols" first ensures that
+// all mutation is done up front.
+func FollowAllSymbols(symbols SymbolMap) {
+	for sourceIndex, inner := range symbols.SymbolsForSource {
+		for symbolIndex := range inner {
+			FollowSymbols(symbols, Ref{uint32(sourceIndex), uint32(symbolIndex)})
+		}
+	}
+}
+
+// Makes "old" point to "new" by joining the linked lists for the two symbols
+// together. That way "FollowSymbols" on both "old" and "new" will result in
+// the same ref.
+func MergeSymbols(symbols SymbolMap, old Ref, new Ref) Ref {
+	if old == new {
+		return new
+	}
+
+	oldSymbol := symbols.Get(old)
+	if oldSymbol.Link != InvalidRef {
+		oldSymbol.Link = MergeSymbols(symbols, oldSymbol.Link, new)
+		return oldSymbol.Link
+	}
+
+	newSymbol := symbols.Get(new)
+	if newSymbol.Link != InvalidRef {
+		newSymbol.Link = MergeSymbols(symbols, old, newSymbol.Link)
+		return newSymbol.Link
+	}
+
+	oldSymbol.Link = new
+	newSymbol.MergeContentsWith(oldSymbol)
+	return new
+}
+
+// This is a histogram of character frequencies for minification
+type CharFreq [64]int32
+
+func (freq *CharFreq) Scan(text string, delta int32) {
+	if delta == 0 {
+		return
+	}
+
+	// This matches the order in "DefaultNameMinifier"
+	for i, n := 0, len(text); i < n; i++ {
+		c := text[i]
+		switch {
+		case c >= 'a' && c <= 'z':
+			(*freq)[c-'a'] += delta
+		case c >= 'A' && c <= 'Z':
+			(*freq)[c-('A'-26)] += delta
+		case c >= '0' && c <= '9':
+			(*freq)[c+(52-'0')] += delta
+		case c == '_':
+			(*freq)[62] += delta
+		case c == '$':
+			(*freq)[63] += delta
+		}
+	}
+}
+
+func (freq *CharFreq) Include(other *CharFreq) {
+	for i := 0; i < 64; i++ {
+		(*freq)[i] += (*other)[i]
+	}
+}
+
+type NameMinifier struct {
+	head string
+	tail string
+}
+
+var DefaultNameMinifierJS = NameMinifier{
+	head: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_$",
+	tail: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_$",
+}
+
+var DefaultNameMinifierCSS = NameMinifier{
+	head: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_",
+	tail: "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_",
+}
+
+type charAndCount struct {
+	char  string
+	count int32
+	index byte
+}
+
+// This type is just so we can use Go's native sort function
+type charAndCountArray []charAndCount
+
+func (a charAndCountArray) Len() int          { return len(a) }
+func (a charAndCountArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a charAndCountArray) Less(i int, j int) bool {
+	ai := a[i]
+	aj := a[j]
+	return ai.count > aj.count || (ai.count == aj.count && ai.index < aj.index)
+}
+
+func (source NameMinifier) ShuffleByCharFreq(freq CharFreq) NameMinifier {
+	// Sort the histogram in descending order by count
+	array := make(charAndCountArray, 64)
+	for i := 0; i < len(source.tail); i++ {
+		array[i] = charAndCount{
+			char:  source.tail[i : i+1],
+			index: byte(i),
+			count: freq[i],
+		}
+	}
+	sort.Sort(array)
+
+	// Compute the identifier start and identifier continue sequences
+	minifier := NameMinifier{}
+	for _, item := range array {
+		if item.char < "0" || item.char > "9" {
+			minifier.head += item.char
+		}
+		minifier.tail += item.char
+	}
+	return minifier
+}
+
+func (minifier NameMinifier) NumberToMinifiedName(i int) string {
+	n_head := len(minifier.head)
+	n_tail := len(minifier.tail)
+
+	j := i % n_head
+	name := minifier.head[j : j+1]
+	i = i / n_head
+
+	for i > 0 {
+		i--
+		j := i % n_tail
+		name += minifier.tail[j : j+1]
+		i = i / n_tail
+	}
+
+	return name
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go b/source/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go
new file mode 100644
index 0000000..67d9e41
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/bundler/bundler.go
@@ -0,0 +1,3331 @@
+package bundler
+
+// The bundler is the core of the "build" and "transform" API calls. Each
+// operation has two phases. The first phase scans the module graph, and is
+// represented by the "ScanBundle" function. The second phase generates the
+// output files from the module graph, and is implemented by the "Compile"
+// function.
+
+import (
+	"bytes"
+	"encoding/base32"
+	"encoding/base64"
+	"fmt"
+	"math/rand"
+	"net/http"
+	"sort"
+	"strings"
+	"sync"
+	"syscall"
+	"time"
+	"unicode"
+	"unicode/utf8"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/cache"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/css_parser"
+	"github.com/evanw/esbuild/internal/fs"
+	"github.com/evanw/esbuild/internal/graph"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_lexer"
+	"github.com/evanw/esbuild/internal/js_parser"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/resolver"
+	"github.com/evanw/esbuild/internal/runtime"
+	"github.com/evanw/esbuild/internal/sourcemap"
+	"github.com/evanw/esbuild/internal/xxhash"
+)
+
+type scannerFile struct {
+	// If "AbsMetadataFile" is present, this will be filled out with information
+	// about this file in JSON format. This is a partial JSON file that will be
+	// fully assembled later.
+	jsonMetadataChunk string
+
+	pluginData interface{}
+	inputFile  graph.InputFile
+}
+
+// This is data related to source maps. It's computed in parallel with linking
+// and must be ready by the time printing happens. This is beneficial because
+// it is somewhat expensive to produce.
+type DataForSourceMap struct {
+	// This data is for the printer. It maps from byte offsets in the file (which
+	// are stored at every AST node) to UTF-16 column offsets (required by source
+	// maps).
+	LineOffsetTables []sourcemap.LineOffsetTable
+
+	// This contains the quoted contents of the original source file. It's what
+	// needs to be embedded in the "sourcesContent" array in the final source
+	// map. Quoting is precomputed because it's somewhat expensive.
+	QuotedContents [][]byte
+}
+
+type Bundle struct {
+	// The unique key prefix is a random string that is unique to every bundling
+	// operation. It is used as a prefix for the unique keys assigned to every
+	// chunk during linking. These unique keys are used to identify each chunk
+	// before the final output paths have been computed.
+	uniqueKeyPrefix string
+
+	fs          fs.FS
+	res         *resolver.Resolver
+	files       []scannerFile
+	entryPoints []graph.EntryPoint
+	options     config.Options
+}
+
+type parseArgs struct {
+	fs              fs.FS
+	log             logger.Log
+	res             *resolver.Resolver
+	caches          *cache.CacheSet
+	prettyPath      string
+	importSource    *logger.Source
+	importWith      *ast.ImportAssertOrWith
+	sideEffects     graph.SideEffects
+	pluginData      interface{}
+	results         chan parseResult
+	inject          chan config.InjectedFile
+	uniqueKeyPrefix string
+	keyPath         logger.Path
+	options         config.Options
+	importPathRange logger.Range
+	sourceIndex     uint32
+	skipResolve     bool
+}
+
+type parseResult struct {
+	resolveResults     []*resolver.ResolveResult
+	globResolveResults map[uint32]globResolveResult
+	file               scannerFile
+	tlaCheck           tlaCheck
+	ok                 bool
+}
+
+type globResolveResult struct {
+	resolveResults map[string]resolver.ResolveResult
+	absPath        string
+	prettyPath     string
+	exportAlias    string
+}
+
+type tlaCheck struct {
+	parent            ast.Index32
+	depth             uint32
+	importRecordIndex uint32
+}
+
+func parseFile(args parseArgs) {
+	source := logger.Source{
+		Index:          args.sourceIndex,
+		KeyPath:        args.keyPath,
+		PrettyPath:     args.prettyPath,
+		IdentifierName: js_ast.GenerateNonUniqueNameFromPath(args.keyPath.Text),
+	}
+
+	var loader config.Loader
+	var absResolveDir string
+	var pluginName string
+	var pluginData interface{}
+
+	if stdin := args.options.Stdin; stdin != nil {
+		// Special-case stdin
+		source.Contents = stdin.Contents
+		loader = stdin.Loader
+		if loader == config.LoaderNone {
+			loader = config.LoaderJS
+		}
+		absResolveDir = args.options.Stdin.AbsResolveDir
+	} else {
+		result, ok := runOnLoadPlugins(
+			args.options.Plugins,
+			args.fs,
+			&args.caches.FSCache,
+			args.log,
+			&source,
+			args.importSource,
+			args.importPathRange,
+			args.pluginData,
+			args.options.WatchMode,
+		)
+		if !ok {
+			if args.inject != nil {
+				args.inject <- config.InjectedFile{
+					Source: source,
+				}
+			}
+			args.results <- parseResult{}
+			return
+		}
+		loader = result.loader
+		absResolveDir = result.absResolveDir
+		pluginName = result.pluginName
+		pluginData = result.pluginData
+	}
+
+	_, base, ext := logger.PlatformIndependentPathDirBaseExt(source.KeyPath.Text)
+
+	// The special "default" loader determines the loader from the file path
+	if loader == config.LoaderDefault {
+		loader = loaderFromFileExtension(args.options.ExtensionToLoader, base+ext)
+	}
+
+	// Reject unsupported import attributes when the loader isn't "copy" (since
+	// "copy" is kind of like "external"). But only do this if this file was not
+	// loaded by a plugin. Plugins are allowed to assign whatever semantics they
+	// want to import attributes.
+	if loader != config.LoaderCopy && pluginName == "" {
+		for _, attr := range source.KeyPath.ImportAttributes.DecodeIntoArray() {
+			var errorText string
+			var errorRange js_lexer.KeyOrValue
+
+			// We only currently handle "type: json"
+			if attr.Key != "type" {
+				errorText = fmt.Sprintf("Importing with the %q attribute is not supported", attr.Key)
+				errorRange = js_lexer.KeyRange
+			} else if attr.Value == "json" {
+				loader = config.LoaderWithTypeJSON
+				continue
+			} else {
+				errorText = fmt.Sprintf("Importing with a type attribute of %q is not supported", attr.Value)
+				errorRange = js_lexer.ValueRange
+			}
+
+			// Everything else is an error
+			r := args.importPathRange
+			if args.importWith != nil {
+				r = js_lexer.RangeOfImportAssertOrWith(*args.importSource, *ast.FindAssertOrWithEntry(args.importWith.Entries, attr.Key), errorRange)
+			}
+			tracker := logger.MakeLineColumnTracker(args.importSource)
+			args.log.AddError(&tracker, r, errorText)
+			if args.inject != nil {
+				args.inject <- config.InjectedFile{
+					Source: source,
+				}
+			}
+			args.results <- parseResult{}
+			return
+		}
+	}
+
+	if loader == config.LoaderEmpty {
+		source.Contents = ""
+	}
+
+	result := parseResult{
+		file: scannerFile{
+			inputFile: graph.InputFile{
+				Source:      source,
+				Loader:      loader,
+				SideEffects: args.sideEffects,
+			},
+			pluginData: pluginData,
+		},
+	}
+
+	defer func() {
+		r := recover()
+		if r != nil {
+			args.log.AddErrorWithNotes(nil, logger.Range{},
+				fmt.Sprintf("panic: %v (while parsing %q)", r, source.PrettyPath),
+				[]logger.MsgData{{Text: helpers.PrettyPrintedStack()}})
+			args.results <- result
+		}
+	}()
+
+	switch loader {
+	case config.LoaderJS, config.LoaderEmpty:
+		ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options))
+		if len(ast.Parts) <= 1 { // Ignore the implicitly-generated namespace export part
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_EmptyAST
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = ok
+
+	case config.LoaderJSX:
+		args.options.JSX.Parse = true
+		ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options))
+		if len(ast.Parts) <= 1 { // Ignore the implicitly-generated namespace export part
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_EmptyAST
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = ok
+
+	case config.LoaderTS, config.LoaderTSNoAmbiguousLessThan:
+		args.options.TS.Parse = true
+		args.options.TS.NoAmbiguousLessThan = loader == config.LoaderTSNoAmbiguousLessThan
+		ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options))
+		if len(ast.Parts) <= 1 { // Ignore the implicitly-generated namespace export part
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_EmptyAST
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = ok
+
+	case config.LoaderTSX:
+		args.options.TS.Parse = true
+		args.options.JSX.Parse = true
+		ast, ok := args.caches.JSCache.Parse(args.log, source, js_parser.OptionsFromConfig(&args.options))
+		if len(ast.Parts) <= 1 { // Ignore the implicitly-generated namespace export part
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_EmptyAST
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = ok
+
+	case config.LoaderCSS, config.LoaderGlobalCSS, config.LoaderLocalCSS:
+		ast := args.caches.CSSCache.Parse(args.log, source, css_parser.OptionsFromConfig(loader, &args.options))
+		result.file.inputFile.Repr = &graph.CSSRepr{AST: ast}
+		result.ok = true
+
+	case config.LoaderJSON, config.LoaderWithTypeJSON:
+		expr, ok := args.caches.JSONCache.Parse(args.log, source, js_parser.JSONOptions{
+			UnsupportedJSFeatures: args.options.UnsupportedJSFeatures,
+		})
+		ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "")
+		if loader == config.LoaderWithTypeJSON {
+			// The exports kind defaults to "none", in which case the linker picks
+			// either ESM or CommonJS depending on the situation. Dynamic imports
+			// causes the linker to pick CommonJS which uses "require()" and then
+			// converts the return value to ESM, which adds extra properties that
+			// aren't supposed to be there when "{ with: { type: 'json' } }" is
+			// present. So if there's an import attribute, we force the type to
+			// be ESM to avoid this.
+			ast.ExportsKind = js_ast.ExportsESM
+		}
+		if pluginName != "" {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
+		} else {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = ok
+
+	case config.LoaderText:
+		encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents))
+		expr := js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(source.Contents)}}
+		ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "")
+		ast.URLForCSS = "data:text/plain;base64," + encoded
+		if pluginName != "" {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
+		} else {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = true
+
+	case config.LoaderBase64:
+		mimeType := guessMimeType(ext, source.Contents)
+		encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents))
+		expr := js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(encoded)}}
+		ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "")
+		ast.URLForCSS = "data:" + mimeType + ";base64," + encoded
+		if pluginName != "" {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
+		} else {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = true
+
+	case config.LoaderBinary:
+		encoded := base64.StdEncoding.EncodeToString([]byte(source.Contents))
+		expr := js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(encoded)}}
+		helper := "__toBinary"
+		if args.options.Platform == config.PlatformNode {
+			helper = "__toBinaryNode"
+		}
+		ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, helper)
+		ast.URLForCSS = "data:application/octet-stream;base64," + encoded
+		if pluginName != "" {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
+		} else {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = true
+
+	case config.LoaderDataURL:
+		mimeType := guessMimeType(ext, source.Contents)
+		url := helpers.EncodeStringAsShortestDataURL(mimeType, source.Contents)
+		expr := js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(url)}}
+		ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "")
+		ast.URLForCSS = url
+		if pluginName != "" {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
+		} else {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = true
+
+	case config.LoaderFile:
+		uniqueKey := fmt.Sprintf("%sA%08d", args.uniqueKeyPrefix, args.sourceIndex)
+		uniqueKeyPath := uniqueKey + source.KeyPath.IgnoredSuffix
+		expr := js_ast.Expr{Data: &js_ast.EString{
+			Value:             helpers.StringToUTF16(uniqueKeyPath),
+			ContainsUniqueKey: true,
+		}}
+		ast := js_parser.LazyExportAST(args.log, source, js_parser.OptionsFromConfig(&args.options), expr, "")
+		ast.URLForCSS = uniqueKeyPath
+		if pluginName != "" {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData_FromPlugin
+		} else {
+			result.file.inputFile.SideEffects.Kind = graph.NoSideEffects_PureData
+		}
+		result.file.inputFile.Repr = &graph.JSRepr{AST: ast}
+		result.ok = true
+
+		// Mark that this file is from the "file" loader
+		result.file.inputFile.UniqueKeyForAdditionalFile = uniqueKey
+
+	case config.LoaderCopy:
+		uniqueKey := fmt.Sprintf("%sA%08d", args.uniqueKeyPrefix, args.sourceIndex)
+		uniqueKeyPath := uniqueKey + source.KeyPath.IgnoredSuffix
+		result.file.inputFile.Repr = &graph.CopyRepr{
+			URLForCode: uniqueKeyPath,
+		}
+		result.ok = true
+
+		// Mark that this file is from the "copy" loader
+		result.file.inputFile.UniqueKeyForAdditionalFile = uniqueKey
+
+	default:
+		var message string
+		if source.KeyPath.Namespace == "file" && ext != "" {
+			message = fmt.Sprintf("No loader is configured for %q files: %s", ext, source.PrettyPath)
+		} else {
+			message = fmt.Sprintf("Do not know how to load path: %s", source.PrettyPath)
+		}
+		tracker := logger.MakeLineColumnTracker(args.importSource)
+		args.log.AddError(&tracker, args.importPathRange, message)
+	}
+
+	// Only continue now if parsing was successful
+	if result.ok {
+		// Run the resolver on the parse thread so it's not run on the main thread.
+		// That way the main thread isn't blocked if the resolver takes a while.
+		if recordsPtr := result.file.inputFile.Repr.ImportRecords(); args.options.Mode == config.ModeBundle && !args.skipResolve && recordsPtr != nil {
+			// Clone the import records because they will be mutated later
+			records := append([]ast.ImportRecord{}, *recordsPtr...)
+			*recordsPtr = records
+			result.resolveResults = make([]*resolver.ResolveResult, len(records))
+
+			if len(records) > 0 {
+				type cacheEntry struct {
+					resolveResult *resolver.ResolveResult
+					debug         resolver.DebugMeta
+					didLogError   bool
+				}
+
+				type cacheKey struct {
+					kind  ast.ImportKind
+					path  string
+					attrs logger.ImportAttributes
+				}
+				resolverCache := make(map[cacheKey]cacheEntry)
+				tracker := logger.MakeLineColumnTracker(&source)
+
+				for importRecordIndex := range records {
+					// Don't try to resolve imports that are already resolved
+					record := &records[importRecordIndex]
+					if record.SourceIndex.IsValid() {
+						continue
+					}
+
+					// Encode the import attributes
+					var attrs logger.ImportAttributes
+					if record.AssertOrWith != nil && record.AssertOrWith.Keyword == ast.WithKeyword {
+						data := make(map[string]string, len(record.AssertOrWith.Entries))
+						for _, entry := range record.AssertOrWith.Entries {
+							data[helpers.UTF16ToString(entry.Key)] = helpers.UTF16ToString(entry.Value)
+						}
+						attrs = logger.EncodeImportAttributes(data)
+					}
+
+					// Special-case glob pattern imports
+					if record.GlobPattern != nil {
+						prettyPath := helpers.GlobPatternToString(record.GlobPattern.Parts)
+						switch record.GlobPattern.Kind {
+						case ast.ImportRequire:
+							prettyPath = fmt.Sprintf("require(%q)", prettyPath)
+						case ast.ImportDynamic:
+							prettyPath = fmt.Sprintf("import(%q)", prettyPath)
+						}
+						if results, msg := args.res.ResolveGlob(absResolveDir, record.GlobPattern.Parts, record.GlobPattern.Kind, prettyPath); results != nil {
+							if msg != nil {
+								args.log.AddID(msg.ID, msg.Kind, &tracker, record.Range, msg.Data.Text)
+							}
+							if result.globResolveResults == nil {
+								result.globResolveResults = make(map[uint32]globResolveResult)
+							}
+							for key, result := range results {
+								result.PathPair.Primary.ImportAttributes = attrs
+								if result.PathPair.HasSecondary() {
+									result.PathPair.Secondary.ImportAttributes = attrs
+								}
+								results[key] = result
+							}
+							result.globResolveResults[uint32(importRecordIndex)] = globResolveResult{
+								resolveResults: results,
+								absPath:        args.fs.Join(absResolveDir, "(glob)"),
+								prettyPath:     fmt.Sprintf("%s in %s", prettyPath, result.file.inputFile.Source.PrettyPath),
+								exportAlias:    record.GlobPattern.ExportAlias,
+							}
+						} else {
+							args.log.AddError(&tracker, record.Range, fmt.Sprintf("Could not resolve %s", prettyPath))
+						}
+						continue
+					}
+
+					// Ignore records that the parser has discarded. This is used to remove
+					// type-only imports in TypeScript files.
+					if record.Flags.Has(ast.IsUnused) {
+						continue
+					}
+
+					// Cache the path in case it's imported multiple times in this file
+					cacheKey := cacheKey{
+						kind:  record.Kind,
+						path:  record.Path.Text,
+						attrs: attrs,
+					}
+					entry, ok := resolverCache[cacheKey]
+					if ok {
+						result.resolveResults[importRecordIndex] = entry.resolveResult
+					} else {
+						// Run the resolver and log an error if the path couldn't be resolved
+						resolveResult, didLogError, debug := RunOnResolvePlugins(
+							args.options.Plugins,
+							args.res,
+							args.log,
+							args.fs,
+							&args.caches.FSCache,
+							&source,
+							record.Range,
+							source.KeyPath,
+							record.Path.Text,
+							attrs,
+							record.Kind,
+							absResolveDir,
+							pluginData,
+						)
+						if resolveResult != nil {
+							resolveResult.PathPair.Primary.ImportAttributes = attrs
+							if resolveResult.PathPair.HasSecondary() {
+								resolveResult.PathPair.Secondary.ImportAttributes = attrs
+							}
+						}
+						entry = cacheEntry{
+							resolveResult: resolveResult,
+							debug:         debug,
+							didLogError:   didLogError,
+						}
+						resolverCache[cacheKey] = entry
+
+						// All "require.resolve()" imports should be external because we don't
+						// want to waste effort traversing into them
+						if record.Kind == ast.ImportRequireResolve {
+							if resolveResult != nil && resolveResult.PathPair.IsExternal {
+								// Allow path substitution as long as the result is external
+								result.resolveResults[importRecordIndex] = resolveResult
+							} else if !record.Flags.Has(ast.HandlesImportErrors) {
+								args.log.AddID(logger.MsgID_Bundler_RequireResolveNotExternal, logger.Warning, &tracker, record.Range,
+									fmt.Sprintf("%q should be marked as external for use with \"require.resolve\"", record.Path.Text))
+							}
+							continue
+						}
+					}
+
+					// Check whether we should log an error every time the result is nil,
+					// even if it's from the cache. Do this because the error may not
+					// have been logged for nil entries if the previous instances had
+					// the "HandlesImportErrors" flag.
+					if entry.resolveResult == nil {
+						// Failed imports inside a try/catch are silently turned into
+						// external imports instead of causing errors. This matches a common
+						// code pattern for conditionally importing a module with a graceful
+						// fallback.
+						if !entry.didLogError && !record.Flags.Has(ast.HandlesImportErrors) {
+							// Report an error
+							text, suggestion, notes := ResolveFailureErrorTextSuggestionNotes(args.res, record.Path.Text, record.Kind,
+								pluginName, args.fs, absResolveDir, args.options.Platform, source.PrettyPath, entry.debug.ModifiedImportPath)
+							entry.debug.LogErrorMsg(args.log, &source, record.Range, text, suggestion, notes)
+
+							// Only report this error once per unique import path in the file
+							entry.didLogError = true
+							resolverCache[cacheKey] = entry
+						} else if !entry.didLogError && record.Flags.Has(ast.HandlesImportErrors) {
+							// Report a debug message about why there was no error
+							args.log.AddIDWithNotes(logger.MsgID_Bundler_IgnoredDynamicImport, logger.Debug, &tracker, record.Range,
+								fmt.Sprintf("Importing %q was allowed even though it could not be resolved because dynamic import failures appear to be handled here:",
+									record.Path.Text), []logger.MsgData{tracker.MsgData(js_lexer.RangeOfIdentifier(source, record.ErrorHandlerLoc),
+									"The handler for dynamic import failures is here:")})
+						}
+						continue
+					}
+
+					result.resolveResults[importRecordIndex] = entry.resolveResult
+				}
+			}
+		}
+
+		// Attempt to parse the source map if present
+		if loader.CanHaveSourceMap() && args.options.SourceMap != config.SourceMapNone {
+			var sourceMapComment logger.Span
+			switch repr := result.file.inputFile.Repr.(type) {
+			case *graph.JSRepr:
+				sourceMapComment = repr.AST.SourceMapComment
+			case *graph.CSSRepr:
+				sourceMapComment = repr.AST.SourceMapComment
+			}
+
+			if sourceMapComment.Text != "" {
+				tracker := logger.MakeLineColumnTracker(&source)
+
+				if path, contents := extractSourceMapFromComment(args.log, args.fs, &args.caches.FSCache,
+					&source, &tracker, sourceMapComment, absResolveDir); contents != nil {
+					prettyPath := resolver.PrettyPath(args.fs, path)
+					log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, args.log.Overrides)
+
+					sourceMap := js_parser.ParseSourceMap(log, logger.Source{
+						KeyPath:    path,
+						PrettyPath: prettyPath,
+						Contents:   *contents,
+					})
+
+					if msgs := log.Done(); len(msgs) > 0 {
+						var text string
+						if path.Namespace == "file" {
+							text = fmt.Sprintf("The source map %q was referenced by the file %q here:", prettyPath, args.prettyPath)
+						} else {
+							text = fmt.Sprintf("This source map came from the file %q here:", args.prettyPath)
+						}
+						note := tracker.MsgData(sourceMapComment.Range, text)
+						for _, msg := range msgs {
+							msg.Notes = append(msg.Notes, note)
+							args.log.AddMsg(msg)
+						}
+					}
+
+					// If "sourcesContent" entries aren't present, try filling them in
+					// using the file system. This includes both generating the entire
+					// "sourcesContent" array if it's absent as well as filling in
+					// individual null entries in the array if the array is present.
+					if sourceMap != nil && !args.options.ExcludeSourcesContent {
+						// Make sure "sourcesContent" is big enough
+						if len(sourceMap.SourcesContent) < len(sourceMap.Sources) {
+							slice := make([]sourcemap.SourceContent, len(sourceMap.Sources))
+							copy(slice, sourceMap.SourcesContent)
+							sourceMap.SourcesContent = slice
+						}
+
+						// Attempt to fill in null entries using the file system
+						for i, source := range sourceMap.Sources {
+							if sourceMap.SourcesContent[i].Value == nil {
+								var absPath string
+								if args.fs.IsAbs(source) {
+									absPath = source
+								} else if path.Namespace == "file" {
+									absPath = args.fs.Join(args.fs.Dir(path.Text), source)
+								} else {
+									continue
+								}
+								if contents, err, _ := args.caches.FSCache.ReadFile(args.fs, absPath); err == nil {
+									sourceMap.SourcesContent[i].Value = helpers.StringToUTF16(contents)
+								}
+							}
+						}
+					}
+
+					result.file.inputFile.InputSourceMap = sourceMap
+				}
+			}
+		}
+	}
+
+	// Note: We must always send on the "inject" channel before we send on the
+	// "results" channel to avoid deadlock
+	if args.inject != nil {
+		var exports []config.InjectableExport
+
+		if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok {
+			aliases := make([]string, 0, len(repr.AST.NamedExports))
+			for alias := range repr.AST.NamedExports {
+				aliases = append(aliases, alias)
+			}
+			sort.Strings(aliases) // Sort for determinism
+			exports = make([]config.InjectableExport, len(aliases))
+			for i, alias := range aliases {
+				exports[i] = config.InjectableExport{
+					Alias: alias,
+					Loc:   repr.AST.NamedExports[alias].AliasLoc,
+				}
+			}
+		}
+
+		// Once we send on the "inject" channel, the main thread may mutate the
+		// "options" object to populate the "InjectedFiles" field. So we must
+		// only send on the "inject" channel after we're done using the "options"
+		// object so we don't introduce a data race.
+		isCopyLoader := loader == config.LoaderCopy
+		if isCopyLoader && args.skipResolve {
+			// This is not allowed because the import path would have to be rewritten,
+			// but import paths are not rewritten when bundling isn't enabled.
+			args.log.AddError(nil, logger.Range{},
+				fmt.Sprintf("Cannot inject %q with the \"copy\" loader without bundling enabled", source.PrettyPath))
+		}
+		args.inject <- config.InjectedFile{
+			Source:       source,
+			Exports:      exports,
+			IsCopyLoader: isCopyLoader,
+		}
+	}
+
+	args.results <- result
+}
+
+func ResolveFailureErrorTextSuggestionNotes(
+	res *resolver.Resolver,
+	path string,
+	kind ast.ImportKind,
+	pluginName string,
+	fs fs.FS,
+	absResolveDir string,
+	platform config.Platform,
+	originatingFilePath string,
+	modifiedImportPath string,
+) (text string, suggestion string, notes []logger.MsgData) {
+	if modifiedImportPath != "" {
+		text = fmt.Sprintf("Could not resolve %q (originally %q)", modifiedImportPath, path)
+		notes = append(notes, logger.MsgData{Text: fmt.Sprintf(
+			"The path %q was remapped to %q using the alias feature, which then couldn't be resolved. "+
+				"Keep in mind that import path aliases are resolved in the current working directory.",
+			path, modifiedImportPath)})
+		path = modifiedImportPath
+	} else {
+		text = fmt.Sprintf("Could not resolve %q", path)
+	}
+	hint := ""
+
+	if resolver.IsPackagePath(path) && !fs.IsAbs(path) {
+		hint = fmt.Sprintf("You can mark the path %q as external to exclude it from the bundle, which will remove this error and leave the unresolved path in the bundle.", path)
+		if kind == ast.ImportRequire {
+			hint += " You can also surround this \"require\" call with a try/catch block to handle this failure at run-time instead of bundle-time."
+		} else if kind == ast.ImportDynamic {
+			hint += " You can also add \".catch()\" here to handle this failure at run-time instead of bundle-time."
+		}
+		if pluginName == "" && !fs.IsAbs(path) {
+			if query, _ := res.ProbeResolvePackageAsRelative(absResolveDir, path, kind); query != nil {
+				hint = fmt.Sprintf("Use the relative path %q to reference the file %q. "+
+					"Without the leading \"./\", the path %q is being interpreted as a package path instead.",
+					"./"+path, resolver.PrettyPath(fs, query.PathPair.Primary), path)
+				suggestion = string(helpers.QuoteForJSON("./"+path, false))
+			}
+		}
+	}
+
+	if platform != config.PlatformNode {
+		pkg := strings.TrimPrefix(path, "node:")
+		if resolver.BuiltInNodeModules[pkg] {
+			var how string
+			switch logger.API {
+			case logger.CLIAPI:
+				how = "--platform=node"
+			case logger.JSAPI:
+				how = "platform: 'node'"
+			case logger.GoAPI:
+				how = "Platform: api.PlatformNode"
+			}
+			hint = fmt.Sprintf("The package %q wasn't found on the file system but is built into node. "+
+				"Are you trying to bundle for node? You can use %q to do that, which will remove this error.", path, how)
+		}
+	}
+
+	if absResolveDir == "" && pluginName != "" {
+		where := ""
+		if originatingFilePath != "" {
+			where = fmt.Sprintf(" for the file %q", originatingFilePath)
+		}
+		hint = fmt.Sprintf("The plugin %q didn't set a resolve directory%s, "+
+			"so esbuild did not search for %q on the file system.", pluginName, where, path)
+	}
+
+	if hint != "" {
+		if modifiedImportPath != "" {
+			// Add a newline if there's already a paragraph of text
+			notes = append(notes, logger.MsgData{})
+
+			// Don't add a suggestion if the path was rewritten using an alias
+			suggestion = ""
+		}
+		notes = append(notes, logger.MsgData{Text: hint})
+	}
+	return
+}
+
+func isASCIIOnly(text string) bool {
+	for _, c := range text {
+		if c < 0x20 || c > 0x7E {
+			return false
+		}
+	}
+	return true
+}
+
+func guessMimeType(extension string, contents string) string {
+	mimeType := helpers.MimeTypeByExtension(extension)
+	if mimeType == "" {
+		mimeType = http.DetectContentType([]byte(contents))
+	}
+
+	// Turn "text/plain; charset=utf-8" into "text/plain;charset=utf-8"
+	return strings.ReplaceAll(mimeType, "; ", ";")
+}
+
+func extractSourceMapFromComment(
+	log logger.Log,
+	fs fs.FS,
+	fsCache *cache.FSCache,
+	source *logger.Source,
+	tracker *logger.LineColumnTracker,
+	comment logger.Span,
+	absResolveDir string,
+) (logger.Path, *string) {
+	// Support data URLs
+	if parsed, ok := resolver.ParseDataURL(comment.Text); ok {
+		if contents, err := parsed.DecodeData(); err == nil {
+			return logger.Path{Text: source.PrettyPath, IgnoredSuffix: "#sourceMappingURL"}, &contents
+		} else {
+			log.AddID(logger.MsgID_SourceMap_UnsupportedSourceMapComment, logger.Warning, tracker, comment.Range,
+				fmt.Sprintf("Unsupported source map comment: %s", err.Error()))
+			return logger.Path{}, nil
+		}
+	}
+
+	// Relative path in a file with an absolute path
+	if absResolveDir != "" {
+		absPath := fs.Join(absResolveDir, comment.Text)
+		path := logger.Path{Text: absPath, Namespace: "file"}
+		contents, err, originalError := fsCache.ReadFile(fs, absPath)
+		if log.Level <= logger.LevelDebug && originalError != nil {
+			log.AddID(logger.MsgID_None, logger.Debug, tracker, comment.Range, fmt.Sprintf("Failed to read file %q: %s", resolver.PrettyPath(fs, path), originalError.Error()))
+		}
+		if err != nil {
+			kind := logger.Warning
+			if err == syscall.ENOENT {
+				// Don't report a warning because this is likely unactionable
+				kind = logger.Debug
+			}
+			log.AddID(logger.MsgID_SourceMap_MissingSourceMap, kind, tracker, comment.Range,
+				fmt.Sprintf("Cannot read file %q: %s", resolver.PrettyPath(fs, path), err.Error()))
+			return logger.Path{}, nil
+		}
+		return path, &contents
+	}
+
+	// Anything else is unsupported
+	return logger.Path{}, nil
+}
+
+func sanitizeLocation(fs fs.FS, loc *logger.MsgLocation) {
+	if loc != nil {
+		if loc.Namespace == "" {
+			loc.Namespace = "file"
+		}
+		if loc.File != "" {
+			loc.File = resolver.PrettyPath(fs, logger.Path{Text: loc.File, Namespace: loc.Namespace})
+		}
+	}
+}
+
+func logPluginMessages(
+	fs fs.FS,
+	log logger.Log,
+	name string,
+	msgs []logger.Msg,
+	thrown error,
+	importSource *logger.Source,
+	importPathRange logger.Range,
+) bool {
+	didLogError := false
+	tracker := logger.MakeLineColumnTracker(importSource)
+
+	// Report errors and warnings generated by the plugin
+	for _, msg := range msgs {
+		if msg.PluginName == "" {
+			msg.PluginName = name
+		}
+		if msg.Kind == logger.Error {
+			didLogError = true
+		}
+
+		// Sanitize the locations
+		for _, note := range msg.Notes {
+			sanitizeLocation(fs, note.Location)
+		}
+		if msg.Data.Location == nil {
+			msg.Data.Location = tracker.MsgLocationOrNil(importPathRange)
+		} else {
+			sanitizeLocation(fs, msg.Data.Location)
+			if importSource != nil {
+				if msg.Data.Location.File == "" {
+					msg.Data.Location.File = importSource.PrettyPath
+				}
+				msg.Notes = append(msg.Notes, tracker.MsgData(importPathRange,
+					fmt.Sprintf("The plugin %q was triggered by this import", name)))
+			}
+		}
+
+		log.AddMsg(msg)
+	}
+
+	// Report errors thrown by the plugin itself
+	if thrown != nil {
+		didLogError = true
+		text := thrown.Error()
+		log.AddMsg(logger.Msg{
+			PluginName: name,
+			Kind:       logger.Error,
+			Data: logger.MsgData{
+				Text:       text,
+				Location:   tracker.MsgLocationOrNil(importPathRange),
+				UserDetail: thrown,
+			},
+		})
+	}
+
+	return didLogError
+}
+
+func RunOnResolvePlugins(
+	plugins []config.Plugin,
+	res *resolver.Resolver,
+	log logger.Log,
+	fs fs.FS,
+	fsCache *cache.FSCache,
+	importSource *logger.Source,
+	importPathRange logger.Range,
+	importer logger.Path,
+	path string,
+	importAttributes logger.ImportAttributes,
+	kind ast.ImportKind,
+	absResolveDir string,
+	pluginData interface{},
+) (*resolver.ResolveResult, bool, resolver.DebugMeta) {
+	resolverArgs := config.OnResolveArgs{
+		Path:       path,
+		ResolveDir: absResolveDir,
+		Kind:       kind,
+		PluginData: pluginData,
+		Importer:   importer,
+		With:       importAttributes,
+	}
+	applyPath := logger.Path{
+		Text:      path,
+		Namespace: importer.Namespace,
+	}
+	tracker := logger.MakeLineColumnTracker(importSource)
+
+	// Apply resolver plugins in order until one succeeds
+	for _, plugin := range plugins {
+		for _, onResolve := range plugin.OnResolve {
+			if !config.PluginAppliesToPath(applyPath, onResolve.Filter, onResolve.Namespace) {
+				continue
+			}
+
+			result := onResolve.Callback(resolverArgs)
+			pluginName := result.PluginName
+			if pluginName == "" {
+				pluginName = plugin.Name
+			}
+			didLogError := logPluginMessages(fs, log, pluginName, result.Msgs, result.ThrownError, importSource, importPathRange)
+
+			// Plugins can also provide additional file system paths to watch
+			for _, file := range result.AbsWatchFiles {
+				fsCache.ReadFile(fs, file)
+			}
+			for _, dir := range result.AbsWatchDirs {
+				if entries, err, _ := fs.ReadDirectory(dir); err == nil {
+					entries.SortedKeys()
+				}
+			}
+
+			// Stop now if there was an error
+			if didLogError {
+				return nil, true, resolver.DebugMeta{}
+			}
+
+			// The "file" namespace is the default for non-external paths, but not
+			// for external paths. External paths must explicitly specify the "file"
+			// namespace.
+			nsFromPlugin := result.Path.Namespace
+			if result.Path.Namespace == "" && !result.External {
+				result.Path.Namespace = "file"
+			}
+
+			// Otherwise, continue on to the next resolver if this loader didn't succeed
+			if result.Path.Text == "" {
+				if result.External {
+					result.Path = logger.Path{Text: path}
+				} else {
+					continue
+				}
+			}
+
+			// Paths in the file namespace must be absolute paths
+			if result.Path.Namespace == "file" && !fs.IsAbs(result.Path.Text) {
+				if nsFromPlugin == "file" {
+					log.AddError(&tracker, importPathRange,
+						fmt.Sprintf("Plugin %q returned a path in the \"file\" namespace that is not an absolute path: %s", pluginName, result.Path.Text))
+				} else {
+					log.AddError(&tracker, importPathRange,
+						fmt.Sprintf("Plugin %q returned a non-absolute path: %s (set a namespace if this is not a file path)", pluginName, result.Path.Text))
+				}
+				return nil, true, resolver.DebugMeta{}
+			}
+
+			var sideEffectsData *resolver.SideEffectsData
+			if result.IsSideEffectFree {
+				sideEffectsData = &resolver.SideEffectsData{
+					PluginName: pluginName,
+				}
+			}
+
+			return &resolver.ResolveResult{
+				PathPair:               resolver.PathPair{Primary: result.Path, IsExternal: result.External},
+				PluginData:             result.PluginData,
+				PrimarySideEffectsData: sideEffectsData,
+			}, false, resolver.DebugMeta{}
+		}
+	}
+
+	// Resolve relative to the resolve directory by default. All paths in the
+	// "file" namespace automatically have a resolve directory. Loader plugins
+	// can also configure a custom resolve directory for files in other namespaces.
+	result, debug := res.Resolve(absResolveDir, path, kind)
+
+	// Warn when the case used for importing differs from the actual file name
+	if result != nil && result.DifferentCase != nil && !helpers.IsInsideNodeModules(absResolveDir) {
+		diffCase := *result.DifferentCase
+		log.AddID(logger.MsgID_Bundler_DifferentPathCase, logger.Warning, &tracker, importPathRange, fmt.Sprintf(
+			"Use %q instead of %q to avoid issues with case-sensitive file systems",
+			resolver.PrettyPath(fs, logger.Path{Text: fs.Join(diffCase.Dir, diffCase.Actual), Namespace: "file"}),
+			resolver.PrettyPath(fs, logger.Path{Text: fs.Join(diffCase.Dir, diffCase.Query), Namespace: "file"}),
+		))
+	}
+
+	return result, false, debug
+}
+
+type loaderPluginResult struct {
+	pluginData    interface{}
+	absResolveDir string
+	pluginName    string
+	loader        config.Loader
+}
+
+func runOnLoadPlugins(
+	plugins []config.Plugin,
+	fs fs.FS,
+	fsCache *cache.FSCache,
+	log logger.Log,
+	source *logger.Source,
+	importSource *logger.Source,
+	importPathRange logger.Range,
+	pluginData interface{},
+	isWatchMode bool,
+) (loaderPluginResult, bool) {
+	loaderArgs := config.OnLoadArgs{
+		Path:       source.KeyPath,
+		PluginData: pluginData,
+	}
+	tracker := logger.MakeLineColumnTracker(importSource)
+
+	// Apply loader plugins in order until one succeeds
+	for _, plugin := range plugins {
+		for _, onLoad := range plugin.OnLoad {
+			if !config.PluginAppliesToPath(source.KeyPath, onLoad.Filter, onLoad.Namespace) {
+				continue
+			}
+
+			result := onLoad.Callback(loaderArgs)
+			pluginName := result.PluginName
+			if pluginName == "" {
+				pluginName = plugin.Name
+			}
+			didLogError := logPluginMessages(fs, log, pluginName, result.Msgs, result.ThrownError, importSource, importPathRange)
+
+			// Plugins can also provide additional file system paths to watch
+			for _, file := range result.AbsWatchFiles {
+				fsCache.ReadFile(fs, file)
+			}
+			for _, dir := range result.AbsWatchDirs {
+				if entries, err, _ := fs.ReadDirectory(dir); err == nil {
+					entries.SortedKeys()
+				}
+			}
+
+			// Stop now if there was an error
+			if didLogError {
+				if isWatchMode && source.KeyPath.Namespace == "file" {
+					fsCache.ReadFile(fs, source.KeyPath.Text) // Read the file for watch mode tracking
+				}
+				return loaderPluginResult{}, false
+			}
+
+			// Otherwise, continue on to the next loader if this loader didn't succeed
+			if result.Contents == nil {
+				continue
+			}
+
+			source.Contents = *result.Contents
+			loader := result.Loader
+			if loader == config.LoaderNone {
+				loader = config.LoaderJS
+			}
+			if result.AbsResolveDir == "" && source.KeyPath.Namespace == "file" {
+				result.AbsResolveDir = fs.Dir(source.KeyPath.Text)
+			}
+			if isWatchMode && source.KeyPath.Namespace == "file" {
+				fsCache.ReadFile(fs, source.KeyPath.Text) // Read the file for watch mode tracking
+			}
+			return loaderPluginResult{
+				loader:        loader,
+				absResolveDir: result.AbsResolveDir,
+				pluginName:    pluginName,
+				pluginData:    result.PluginData,
+			}, true
+		}
+	}
+
+	// Force disabled modules to be empty
+	if source.KeyPath.IsDisabled() {
+		return loaderPluginResult{loader: config.LoaderEmpty}, true
+	}
+
+	// Read normal modules from disk
+	if source.KeyPath.Namespace == "file" {
+		if contents, err, originalError := fsCache.ReadFile(fs, source.KeyPath.Text); err == nil {
+			source.Contents = contents
+			return loaderPluginResult{
+				loader:        config.LoaderDefault,
+				absResolveDir: fs.Dir(source.KeyPath.Text),
+			}, true
+		} else {
+			if log.Level <= logger.LevelDebug && originalError != nil {
+				log.AddID(logger.MsgID_None, logger.Debug, nil, logger.Range{}, fmt.Sprintf("Failed to read file %q: %s", source.KeyPath.Text, originalError.Error()))
+			}
+			if err == syscall.ENOENT {
+				log.AddError(&tracker, importPathRange,
+					fmt.Sprintf("Could not read from file: %s", source.KeyPath.Text))
+				return loaderPluginResult{}, false
+			} else {
+				log.AddError(&tracker, importPathRange,
+					fmt.Sprintf("Cannot read file %q: %s", resolver.PrettyPath(fs, source.KeyPath), err.Error()))
+				return loaderPluginResult{}, false
+			}
+		}
+	}
+
+	// Native support for data URLs. This is supported natively by node:
+	// https://nodejs.org/docs/latest/api/esm.html#esm_data_imports
+	if source.KeyPath.Namespace == "dataurl" {
+		if parsed, ok := resolver.ParseDataURL(source.KeyPath.Text); ok {
+			if contents, err := parsed.DecodeData(); err != nil {
+				log.AddError(&tracker, importPathRange,
+					fmt.Sprintf("Could not load data URL: %s", err.Error()))
+				return loaderPluginResult{loader: config.LoaderNone}, true
+			} else {
+				source.Contents = contents
+				if mimeType := parsed.DecodeMIMEType(); mimeType != resolver.MIMETypeUnsupported {
+					switch mimeType {
+					case resolver.MIMETypeTextCSS:
+						return loaderPluginResult{loader: config.LoaderCSS}, true
+					case resolver.MIMETypeTextJavaScript:
+						return loaderPluginResult{loader: config.LoaderJS}, true
+					case resolver.MIMETypeApplicationJSON:
+						return loaderPluginResult{loader: config.LoaderJSON}, true
+					}
+				}
+			}
+		}
+	}
+
+	// Otherwise, fail to load the path
+	return loaderPluginResult{loader: config.LoaderNone}, true
+}
+
+func loaderFromFileExtension(extensionToLoader map[string]config.Loader, base string) config.Loader {
+	// Pick the loader with the longest matching extension. So if there's an
+	// extension for ".css" and for ".module.css", we want to match the one for
+	// ".module.css" before the one for ".css".
+	if i := strings.IndexByte(base, '.'); i != -1 {
+		for {
+			if loader, ok := extensionToLoader[base[i:]]; ok {
+				return loader
+			}
+			base = base[i+1:]
+			i = strings.IndexByte(base, '.')
+			if i == -1 {
+				break
+			}
+		}
+	} else {
+		// If there's no extension, explicitly check for an extensionless loader
+		if loader, ok := extensionToLoader[""]; ok {
+			return loader
+		}
+	}
+	return config.LoaderNone
+}
+
+// Identify the path by its lowercase absolute path name with Windows-specific
+// slashes substituted for standard slashes. This should hopefully avoid path
+// issues on Windows where multiple different paths can refer to the same
+// underlying file.
+func canonicalFileSystemPathForWindows(absPath string) string {
+	return strings.ReplaceAll(strings.ToLower(absPath), "\\", "/")
+}
+
+func HashForFileName(hashBytes []byte) string {
+	return base32.StdEncoding.EncodeToString(hashBytes)[:8]
+}
+
+type scanner struct {
+	log             logger.Log
+	fs              fs.FS
+	res             *resolver.Resolver
+	caches          *cache.CacheSet
+	timer           *helpers.Timer
+	uniqueKeyPrefix string
+
+	// These are not guarded by a mutex because it's only ever modified by a single
+	// thread. Note that not all results in the "results" array are necessarily
+	// valid. Make sure to check the "ok" flag before using them.
+	results       []parseResult
+	visited       map[logger.Path]visitedFile
+	resultChannel chan parseResult
+
+	options config.Options
+
+	// Also not guarded by a mutex for the same reason
+	remaining int
+}
+
+type visitedFile struct {
+	sourceIndex uint32
+}
+
+type EntryPoint struct {
+	InputPath                string
+	OutputPath               string
+	InputPathInFileNamespace bool
+}
+
+func generateUniqueKeyPrefix() (string, error) {
+	var data [12]byte
+	rand.Seed(time.Now().UnixNano())
+	if _, err := rand.Read(data[:]); err != nil {
+		return "", err
+	}
+
+	// This is 16 bytes and shouldn't generate escape characters when put into strings
+	return base64.URLEncoding.EncodeToString(data[:]), nil
+}
+
+// This creates a bundle by scanning over the whole module graph starting from
+// the entry points until all modules are reached. Each module has some number
+// of import paths which are resolved to module identifiers (i.e. "onResolve"
+// in the plugin API). Each unique module identifier is loaded once (i.e.
+// "onLoad" in the plugin API).
+func ScanBundle(
+	call config.APICall,
+	log logger.Log,
+	fs fs.FS,
+	caches *cache.CacheSet,
+	entryPoints []EntryPoint,
+	options config.Options,
+	timer *helpers.Timer,
+) Bundle {
+	timer.Begin("Scan phase")
+	defer timer.End("Scan phase")
+
+	applyOptionDefaults(&options)
+
+	// Run "onStart" plugins in parallel. IMPORTANT: We always need to run all
+	// "onStart" callbacks even when the build is cancelled, because plugins may
+	// rely on invariants that are started in "onStart" and ended in "onEnd".
+	// This works because "onEnd" callbacks are always run as well.
+	timer.Begin("On-start callbacks")
+	onStartWaitGroup := sync.WaitGroup{}
+	for _, plugin := range options.Plugins {
+		for _, onStart := range plugin.OnStart {
+			onStartWaitGroup.Add(1)
+			go func(plugin config.Plugin, onStart config.OnStart) {
+				result := onStart.Callback()
+				logPluginMessages(fs, log, plugin.Name, result.Msgs, result.ThrownError, nil, logger.Range{})
+				onStartWaitGroup.Done()
+			}(plugin, onStart)
+		}
+	}
+
+	// Each bundling operation gets a separate unique key
+	uniqueKeyPrefix, err := generateUniqueKeyPrefix()
+	if err != nil {
+		log.AddError(nil, logger.Range{}, fmt.Sprintf("Failed to read from randomness source: %s", err.Error()))
+	}
+
+	// This may mutate "options" by the "tsconfig.json" override settings
+	res := resolver.NewResolver(call, fs, log, caches, &options)
+
+	s := scanner{
+		log:             log,
+		fs:              fs,
+		res:             res,
+		caches:          caches,
+		options:         options,
+		timer:           timer,
+		results:         make([]parseResult, 0, caches.SourceIndexCache.LenHint()),
+		visited:         make(map[logger.Path]visitedFile),
+		resultChannel:   make(chan parseResult),
+		uniqueKeyPrefix: uniqueKeyPrefix,
+	}
+
+	// Always start by parsing the runtime file
+	s.results = append(s.results, parseResult{})
+	s.remaining++
+	go func() {
+		source, ast, ok := globalRuntimeCache.parseRuntime(&options)
+		s.resultChannel <- parseResult{
+			file: scannerFile{
+				inputFile: graph.InputFile{
+					Source: source,
+					Repr: &graph.JSRepr{
+						AST: ast,
+					},
+					OmitFromSourceMapsAndMetafile: true,
+				},
+			},
+			ok: ok,
+		}
+	}()
+
+	// Wait for all "onStart" plugins here before continuing. People sometimes run
+	// setup code in "onStart" that "onLoad" expects to be able to use without
+	// "onLoad" needing to block on the completion of their "onStart" callback.
+	//
+	// We want to enable this:
+	//
+	//   let plugin = {
+	//     name: 'example',
+	//     setup(build) {
+	//       let started = false
+	//       build.onStart(() => started = true)
+	//       build.onLoad({ filter: /.*/ }, () => {
+	//         assert(started === true)
+	//       })
+	//     },
+	//   }
+	//
+	// without people having to write something like this:
+	//
+	//   let plugin = {
+	//     name: 'example',
+	//     setup(build) {
+	//       let started = {}
+	//       started.promise = new Promise(resolve => {
+	//         started.resolve = resolve
+	//       })
+	//       build.onStart(() => {
+	//         started.resolve(true)
+	//       })
+	//       build.onLoad({ filter: /.*/ }, async () => {
+	//         assert(await started.promise === true)
+	//       })
+	//     },
+	//   }
+	//
+	onStartWaitGroup.Wait()
+	timer.End("On-start callbacks")
+
+	// We can check the cancel flag now that all "onStart" callbacks are done
+	if options.CancelFlag.DidCancel() {
+		return Bundle{options: options}
+	}
+
+	s.preprocessInjectedFiles()
+
+	if options.CancelFlag.DidCancel() {
+		return Bundle{options: options}
+	}
+
+	entryPointMeta := s.addEntryPoints(entryPoints)
+
+	if options.CancelFlag.DidCancel() {
+		return Bundle{options: options}
+	}
+
+	s.scanAllDependencies()
+
+	if options.CancelFlag.DidCancel() {
+		return Bundle{options: options}
+	}
+
+	files := s.processScannedFiles(entryPointMeta)
+
+	if options.CancelFlag.DidCancel() {
+		return Bundle{options: options}
+	}
+
+	return Bundle{
+		fs:              fs,
+		res:             s.res,
+		files:           files,
+		entryPoints:     entryPointMeta,
+		uniqueKeyPrefix: uniqueKeyPrefix,
+		options:         s.options,
+	}
+}
+
+type inputKind uint8
+
+const (
+	inputKindNormal inputKind = iota
+	inputKindEntryPoint
+	inputKindStdin
+)
+
+// This returns the source index of the resulting file
+func (s *scanner) maybeParseFile(
+	resolveResult resolver.ResolveResult,
+	prettyPath string,
+	importSource *logger.Source,
+	importPathRange logger.Range,
+	importWith *ast.ImportAssertOrWith,
+	kind inputKind,
+	inject chan config.InjectedFile,
+) uint32 {
+	path := resolveResult.PathPair.Primary
+	visitedKey := path
+	if visitedKey.Namespace == "file" {
+		visitedKey.Text = canonicalFileSystemPathForWindows(visitedKey.Text)
+	}
+
+	// Only parse a given file path once
+	visited, ok := s.visited[visitedKey]
+	if ok {
+		if inject != nil {
+			inject <- config.InjectedFile{}
+		}
+		return visited.sourceIndex
+	}
+
+	visited = visitedFile{
+		sourceIndex: s.allocateSourceIndex(visitedKey, cache.SourceIndexNormal),
+	}
+	s.visited[visitedKey] = visited
+	s.remaining++
+	optionsClone := s.options
+	if kind != inputKindStdin {
+		optionsClone.Stdin = nil
+	}
+
+	// Allow certain properties to be overridden by "tsconfig.json"
+	resolveResult.TSConfigJSX.ApplyTo(&optionsClone.JSX)
+	if resolveResult.TSConfig != nil {
+		optionsClone.TS.Config = *resolveResult.TSConfig
+	}
+	if resolveResult.TSAlwaysStrict != nil {
+		optionsClone.TSAlwaysStrict = resolveResult.TSAlwaysStrict
+	}
+
+	// Set the module type preference using node's module type rules
+	if strings.HasSuffix(path.Text, ".mjs") {
+		optionsClone.ModuleTypeData.Type = js_ast.ModuleESM_MJS
+	} else if strings.HasSuffix(path.Text, ".mts") {
+		optionsClone.ModuleTypeData.Type = js_ast.ModuleESM_MTS
+	} else if strings.HasSuffix(path.Text, ".cjs") {
+		optionsClone.ModuleTypeData.Type = js_ast.ModuleCommonJS_CJS
+	} else if strings.HasSuffix(path.Text, ".cts") {
+		optionsClone.ModuleTypeData.Type = js_ast.ModuleCommonJS_CTS
+	} else if strings.HasSuffix(path.Text, ".js") || strings.HasSuffix(path.Text, ".jsx") ||
+		strings.HasSuffix(path.Text, ".ts") || strings.HasSuffix(path.Text, ".tsx") {
+		optionsClone.ModuleTypeData = resolveResult.ModuleTypeData
+	} else {
+		// The "type" setting in "package.json" only applies to ".js" files
+		optionsClone.ModuleTypeData.Type = js_ast.ModuleUnknown
+	}
+
+	// Enable bundling for injected files so we always do tree shaking. We
+	// never want to include unnecessary code from injected files since they
+	// are essentially bundled. However, if we do this we should skip the
+	// resolving step when we're not bundling. It'd be strange to get
+	// resolution errors when the top-level bundling controls are disabled.
+	skipResolve := false
+	if inject != nil && optionsClone.Mode != config.ModeBundle {
+		optionsClone.Mode = config.ModeBundle
+		skipResolve = true
+	}
+
+	// Special-case pretty-printed paths for data URLs
+	if path.Namespace == "dataurl" {
+		if _, ok := resolver.ParseDataURL(path.Text); ok {
+			prettyPath = path.Text
+			if len(prettyPath) > 65 {
+				prettyPath = prettyPath[:65]
+			}
+			prettyPath = strings.ReplaceAll(prettyPath, "\n", "\\n")
+			if len(prettyPath) > 64 {
+				prettyPath = prettyPath[:64] + "..."
+			}
+			prettyPath = fmt.Sprintf("<%s>", prettyPath)
+		}
+	}
+
+	var sideEffects graph.SideEffects
+	if resolveResult.PrimarySideEffectsData != nil {
+		sideEffects.Kind = graph.NoSideEffects_PackageJSON
+		sideEffects.Data = resolveResult.PrimarySideEffectsData
+	}
+
+	go parseFile(parseArgs{
+		fs:              s.fs,
+		log:             s.log,
+		res:             s.res,
+		caches:          s.caches,
+		keyPath:         path,
+		prettyPath:      prettyPath,
+		sourceIndex:     visited.sourceIndex,
+		importSource:    importSource,
+		sideEffects:     sideEffects,
+		importPathRange: importPathRange,
+		importWith:      importWith,
+		pluginData:      resolveResult.PluginData,
+		options:         optionsClone,
+		results:         s.resultChannel,
+		inject:          inject,
+		skipResolve:     skipResolve,
+		uniqueKeyPrefix: s.uniqueKeyPrefix,
+	})
+
+	return visited.sourceIndex
+}
+
+func (s *scanner) allocateSourceIndex(path logger.Path, kind cache.SourceIndexKind) uint32 {
+	// Allocate a source index using the shared source index cache so that
+	// subsequent builds reuse the same source index and therefore use the
+	// cached parse results for increased speed.
+	sourceIndex := s.caches.SourceIndexCache.Get(path, kind)
+
+	// Grow the results array to fit this source index
+	if newLen := int(sourceIndex) + 1; len(s.results) < newLen {
+		// Reallocate to a bigger array
+		if cap(s.results) < newLen {
+			s.results = append(make([]parseResult, 0, 2*newLen), s.results...)
+		}
+
+		// Grow in place
+		s.results = s.results[:newLen]
+	}
+
+	return sourceIndex
+}
+
+func (s *scanner) allocateGlobSourceIndex(parentSourceIndex uint32, globIndex uint32) uint32 {
+	// Allocate a source index using the shared source index cache so that
+	// subsequent builds reuse the same source index and therefore use the
+	// cached parse results for increased speed.
+	sourceIndex := s.caches.SourceIndexCache.GetGlob(parentSourceIndex, globIndex)
+
+	// Grow the results array to fit this source index
+	if newLen := int(sourceIndex) + 1; len(s.results) < newLen {
+		// Reallocate to a bigger array
+		if cap(s.results) < newLen {
+			s.results = append(make([]parseResult, 0, 2*newLen), s.results...)
+		}
+
+		// Grow in place
+		s.results = s.results[:newLen]
+	}
+
+	return sourceIndex
+}
+
+func (s *scanner) preprocessInjectedFiles() {
+	s.timer.Begin("Preprocess injected files")
+	defer s.timer.End("Preprocess injected files")
+
+	injectedFiles := make([]config.InjectedFile, 0, len(s.options.InjectedDefines)+len(s.options.InjectPaths))
+
+	// These are virtual paths that are generated for compound "--define" values.
+	// They are special-cased and are not available for plugins to intercept.
+	for _, define := range s.options.InjectedDefines {
+		// These should be unique by construction so no need to check for collisions
+		visitedKey := logger.Path{Text: fmt.Sprintf("<define:%s>", define.Name)}
+		sourceIndex := s.allocateSourceIndex(visitedKey, cache.SourceIndexNormal)
+		s.visited[visitedKey] = visitedFile{sourceIndex: sourceIndex}
+		source := logger.Source{
+			Index:          sourceIndex,
+			KeyPath:        visitedKey,
+			PrettyPath:     resolver.PrettyPath(s.fs, visitedKey),
+			IdentifierName: js_ast.EnsureValidIdentifier(visitedKey.Text),
+		}
+
+		// The first "len(InjectedDefine)" injected files intentionally line up
+		// with the injected defines by index. The index will be used to import
+		// references to them in the parser.
+		injectedFiles = append(injectedFiles, config.InjectedFile{
+			Source:     source,
+			DefineName: define.Name,
+		})
+
+		// Generate the file inline here since it has already been parsed
+		expr := js_ast.Expr{Data: define.Data}
+		ast := js_parser.LazyExportAST(s.log, source, js_parser.OptionsFromConfig(&s.options), expr, "")
+		result := parseResult{
+			ok: true,
+			file: scannerFile{
+				inputFile: graph.InputFile{
+					Source: source,
+					Repr:   &graph.JSRepr{AST: ast},
+					Loader: config.LoaderJSON,
+					SideEffects: graph.SideEffects{
+						Kind: graph.NoSideEffects_PureData,
+					},
+				},
+			},
+		}
+
+		// Append to the channel on a goroutine in case it blocks due to capacity
+		s.remaining++
+		go func() { s.resultChannel <- result }()
+	}
+
+	// Add user-specified injected files. Run resolver plugins on these files
+	// so plugins can alter where they resolve to. These are run in parallel in
+	// case any of these plugins block.
+	injectResolveResults := make([]*resolver.ResolveResult, len(s.options.InjectPaths))
+	injectAbsResolveDir := s.fs.Cwd()
+	injectResolveWaitGroup := sync.WaitGroup{}
+	injectResolveWaitGroup.Add(len(s.options.InjectPaths))
+	for i, importPath := range s.options.InjectPaths {
+		go func(i int, importPath string) {
+			var importer logger.Path
+
+			// Add a leading "./" if it's missing, similar to entry points
+			absPath := importPath
+			if !s.fs.IsAbs(absPath) {
+				absPath = s.fs.Join(injectAbsResolveDir, absPath)
+			}
+			dir := s.fs.Dir(absPath)
+			base := s.fs.Base(absPath)
+			if entries, err, originalError := s.fs.ReadDirectory(dir); err == nil {
+				if entry, _ := entries.Get(base); entry != nil && entry.Kind(s.fs) == fs.FileEntry {
+					importer.Namespace = "file"
+					if !s.fs.IsAbs(importPath) && resolver.IsPackagePath(importPath) {
+						importPath = "./" + importPath
+					}
+				}
+			} else if s.log.Level <= logger.LevelDebug && originalError != nil {
+				s.log.AddID(logger.MsgID_None, logger.Debug, nil, logger.Range{}, fmt.Sprintf("Failed to read directory %q: %s", absPath, originalError.Error()))
+			}
+
+			// Run the resolver and log an error if the path couldn't be resolved
+			resolveResult, didLogError, debug := RunOnResolvePlugins(
+				s.options.Plugins,
+				s.res,
+				s.log,
+				s.fs,
+				&s.caches.FSCache,
+				nil,
+				logger.Range{},
+				importer,
+				importPath,
+				logger.ImportAttributes{},
+				ast.ImportEntryPoint,
+				injectAbsResolveDir,
+				nil,
+			)
+			if resolveResult != nil {
+				if resolveResult.PathPair.IsExternal {
+					s.log.AddError(nil, logger.Range{}, fmt.Sprintf("The injected path %q cannot be marked as external", importPath))
+				} else {
+					injectResolveResults[i] = resolveResult
+				}
+			} else if !didLogError {
+				debug.LogErrorMsg(s.log, nil, logger.Range{}, fmt.Sprintf("Could not resolve %q", importPath), "", nil)
+			}
+			injectResolveWaitGroup.Done()
+		}(i, importPath)
+	}
+	injectResolveWaitGroup.Wait()
+
+	if s.options.CancelFlag.DidCancel() {
+		return
+	}
+
+	// Parse all entry points that were resolved successfully
+	results := make([]config.InjectedFile, len(s.options.InjectPaths))
+	j := 0
+	var injectWaitGroup sync.WaitGroup
+	for _, resolveResult := range injectResolveResults {
+		if resolveResult != nil {
+			channel := make(chan config.InjectedFile, 1)
+			s.maybeParseFile(*resolveResult, resolver.PrettyPath(s.fs, resolveResult.PathPair.Primary), nil, logger.Range{}, nil, inputKindNormal, channel)
+			injectWaitGroup.Add(1)
+
+			// Wait for the results in parallel. The results slice is large enough so
+			// it is not reallocated during the computations.
+			go func(i int) {
+				results[i] = <-channel
+				injectWaitGroup.Done()
+			}(j)
+			j++
+		}
+	}
+	injectWaitGroup.Wait()
+	injectedFiles = append(injectedFiles, results[:j]...)
+
+	// It's safe to mutate the options object to add the injected files here
+	// because there aren't any concurrent "parseFile" goroutines at this point.
+	// The only ones that were created by this point are the ones we created
+	// above, and we've already waited for all of them to finish using the
+	// "options" object.
+	s.options.InjectedFiles = injectedFiles
+}
+
+func (s *scanner) addEntryPoints(entryPoints []EntryPoint) []graph.EntryPoint {
+	s.timer.Begin("Add entry points")
+	defer s.timer.End("Add entry points")
+
+	// Reserve a slot for each entry point
+	entryMetas := make([]graph.EntryPoint, 0, len(entryPoints)+1)
+
+	// Treat stdin as an extra entry point
+	if stdin := s.options.Stdin; stdin != nil {
+		stdinPath := logger.Path{Text: "<stdin>"}
+		if stdin.SourceFile != "" {
+			if stdin.AbsResolveDir == "" {
+				stdinPath = logger.Path{Text: stdin.SourceFile}
+			} else if s.fs.IsAbs(stdin.SourceFile) {
+				stdinPath = logger.Path{Text: stdin.SourceFile, Namespace: "file"}
+			} else {
+				stdinPath = logger.Path{Text: s.fs.Join(stdin.AbsResolveDir, stdin.SourceFile), Namespace: "file"}
+			}
+		}
+		resolveResult := resolver.ResolveResult{PathPair: resolver.PathPair{Primary: stdinPath}}
+		sourceIndex := s.maybeParseFile(resolveResult, resolver.PrettyPath(s.fs, stdinPath), nil, logger.Range{}, nil, inputKindStdin, nil)
+		entryMetas = append(entryMetas, graph.EntryPoint{
+			OutputPath:  "stdin",
+			SourceIndex: sourceIndex,
+		})
+	}
+
+	if s.options.CancelFlag.DidCancel() {
+		return nil
+	}
+
+	// Check each entry point ahead of time to see if it's a real file
+	entryPointAbsResolveDir := s.fs.Cwd()
+	for i := range entryPoints {
+		entryPoint := &entryPoints[i]
+		absPath := entryPoint.InputPath
+		if strings.ContainsRune(absPath, '*') {
+			continue // Ignore glob patterns
+		}
+		if !s.fs.IsAbs(absPath) {
+			absPath = s.fs.Join(entryPointAbsResolveDir, absPath)
+		}
+		dir := s.fs.Dir(absPath)
+		base := s.fs.Base(absPath)
+		if entries, err, originalError := s.fs.ReadDirectory(dir); err == nil {
+			if entry, _ := entries.Get(base); entry != nil && entry.Kind(s.fs) == fs.FileEntry {
+				entryPoint.InputPathInFileNamespace = true
+
+				// Entry point paths without a leading "./" are interpreted as package
+				// paths. This happens because they go through general path resolution
+				// like all other import paths so that plugins can run on them. Requiring
+				// a leading "./" for a relative path simplifies writing plugins because
+				// entry points aren't a special case.
+				//
+				// However, requiring a leading "./" also breaks backward compatibility
+				// and makes working with the CLI more difficult. So attempt to insert
+				// "./" automatically when needed. We don't want to unconditionally insert
+				// a leading "./" because the path may not be a file system path. For
+				// example, it may be a URL. So only insert a leading "./" when the path
+				// is an exact match for an existing file.
+				if !s.fs.IsAbs(entryPoint.InputPath) && resolver.IsPackagePath(entryPoint.InputPath) {
+					entryPoint.InputPath = "./" + entryPoint.InputPath
+				}
+			}
+		} else if s.log.Level <= logger.LevelDebug && originalError != nil {
+			s.log.AddID(logger.MsgID_None, logger.Debug, nil, logger.Range{}, fmt.Sprintf("Failed to read directory %q: %s", absPath, originalError.Error()))
+		}
+	}
+
+	if s.options.CancelFlag.DidCancel() {
+		return nil
+	}
+
+	// Add any remaining entry points. Run resolver plugins on these entry points
+	// so plugins can alter where they resolve to. These are run in parallel in
+	// case any of these plugins block.
+	type entryPointInfo struct {
+		results []resolver.ResolveResult
+		isGlob  bool
+	}
+	entryPointInfos := make([]entryPointInfo, len(entryPoints))
+	entryPointWaitGroup := sync.WaitGroup{}
+	entryPointWaitGroup.Add(len(entryPoints))
+	for i, entryPoint := range entryPoints {
+		go func(i int, entryPoint EntryPoint) {
+			var importer logger.Path
+			if entryPoint.InputPathInFileNamespace {
+				importer.Namespace = "file"
+			}
+
+			// Special-case glob patterns here
+			if strings.ContainsRune(entryPoint.InputPath, '*') {
+				if pattern := helpers.ParseGlobPattern(entryPoint.InputPath); len(pattern) > 1 {
+					prettyPattern := fmt.Sprintf("%q", entryPoint.InputPath)
+					if results, msg := s.res.ResolveGlob(entryPointAbsResolveDir, pattern, ast.ImportEntryPoint, prettyPattern); results != nil {
+						keys := make([]string, 0, len(results))
+						for key := range results {
+							keys = append(keys, key)
+						}
+						sort.Strings(keys)
+						info := entryPointInfo{isGlob: true}
+						for _, key := range keys {
+							info.results = append(info.results, results[key])
+						}
+						entryPointInfos[i] = info
+						if msg != nil {
+							s.log.AddID(msg.ID, msg.Kind, nil, logger.Range{}, msg.Data.Text)
+						}
+					} else {
+						s.log.AddError(nil, logger.Range{}, fmt.Sprintf("Could not resolve %q", entryPoint.InputPath))
+					}
+					entryPointWaitGroup.Done()
+					return
+				}
+			}
+
+			// Run the resolver and log an error if the path couldn't be resolved
+			resolveResult, didLogError, debug := RunOnResolvePlugins(
+				s.options.Plugins,
+				s.res,
+				s.log,
+				s.fs,
+				&s.caches.FSCache,
+				nil,
+				logger.Range{},
+				importer,
+				entryPoint.InputPath,
+				logger.ImportAttributes{},
+				ast.ImportEntryPoint,
+				entryPointAbsResolveDir,
+				nil,
+			)
+			if resolveResult != nil {
+				if resolveResult.PathPair.IsExternal {
+					s.log.AddError(nil, logger.Range{}, fmt.Sprintf("The entry point %q cannot be marked as external", entryPoint.InputPath))
+				} else {
+					entryPointInfos[i] = entryPointInfo{results: []resolver.ResolveResult{*resolveResult}}
+				}
+			} else if !didLogError {
+				var notes []logger.MsgData
+				if !s.fs.IsAbs(entryPoint.InputPath) {
+					if query, _ := s.res.ProbeResolvePackageAsRelative(entryPointAbsResolveDir, entryPoint.InputPath, ast.ImportEntryPoint); query != nil {
+						notes = append(notes, logger.MsgData{
+							Text: fmt.Sprintf("Use the relative path %q to reference the file %q. "+
+								"Without the leading \"./\", the path %q is being interpreted as a package path instead.",
+								"./"+entryPoint.InputPath, resolver.PrettyPath(s.fs, query.PathPair.Primary), entryPoint.InputPath),
+						})
+					}
+				}
+				debug.LogErrorMsg(s.log, nil, logger.Range{}, fmt.Sprintf("Could not resolve %q", entryPoint.InputPath), "", notes)
+			}
+			entryPointWaitGroup.Done()
+		}(i, entryPoint)
+	}
+	entryPointWaitGroup.Wait()
+
+	if s.options.CancelFlag.DidCancel() {
+		return nil
+	}
+
+	// Parse all entry points that were resolved successfully
+	for i, info := range entryPointInfos {
+		if info.results == nil {
+			continue
+		}
+
+		for _, resolveResult := range info.results {
+			prettyPath := resolver.PrettyPath(s.fs, resolveResult.PathPair.Primary)
+			sourceIndex := s.maybeParseFile(resolveResult, prettyPath, nil, logger.Range{}, nil, inputKindEntryPoint, nil)
+			outputPath := entryPoints[i].OutputPath
+			outputPathWasAutoGenerated := false
+
+			// If the output path is missing, automatically generate one from the input path
+			if outputPath == "" {
+				if info.isGlob {
+					outputPath = prettyPath
+				} else {
+					outputPath = entryPoints[i].InputPath
+				}
+				windowsVolumeLabel := ""
+
+				// The ":" character is invalid in file paths on Windows except when
+				// it's used as a volume separator. Special-case that here so volume
+				// labels don't break on Windows.
+				if s.fs.IsAbs(outputPath) && len(outputPath) >= 3 && outputPath[1] == ':' {
+					if c := outputPath[0]; (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') {
+						if c := outputPath[2]; c == '/' || c == '\\' {
+							windowsVolumeLabel = outputPath[:3]
+							outputPath = outputPath[3:]
+						}
+					}
+				}
+
+				// For cross-platform robustness, do not allow characters in the output
+				// path that are invalid on Windows. This is especially relevant when
+				// the input path is something other than a file path, such as a URL.
+				outputPath = sanitizeFilePathForVirtualModulePath(outputPath)
+				if windowsVolumeLabel != "" {
+					outputPath = windowsVolumeLabel + outputPath
+				}
+				outputPathWasAutoGenerated = true
+			}
+
+			entryMetas = append(entryMetas, graph.EntryPoint{
+				OutputPath:                 outputPath,
+				SourceIndex:                sourceIndex,
+				OutputPathWasAutoGenerated: outputPathWasAutoGenerated,
+			})
+		}
+	}
+
+	// Turn all automatically-generated output paths into absolute paths
+	for i := range entryMetas {
+		entryPoint := &entryMetas[i]
+		if entryPoint.OutputPathWasAutoGenerated && !s.fs.IsAbs(entryPoint.OutputPath) {
+			entryPoint.OutputPath = s.fs.Join(entryPointAbsResolveDir, entryPoint.OutputPath)
+		}
+	}
+
+	// Automatically compute "outbase" if it wasn't provided
+	if s.options.AbsOutputBase == "" {
+		s.options.AbsOutputBase = lowestCommonAncestorDirectory(s.fs, entryMetas)
+		if s.options.AbsOutputBase == "" {
+			s.options.AbsOutputBase = entryPointAbsResolveDir
+		}
+	}
+
+	// Turn all output paths back into relative paths, but this time relative to
+	// the "outbase" value we computed above
+	for i := range entryMetas {
+		entryPoint := &entryMetas[i]
+		if s.fs.IsAbs(entryPoint.OutputPath) {
+			if !entryPoint.OutputPathWasAutoGenerated {
+				// If an explicit absolute output path was specified, use the path
+				// relative to the "outdir" directory
+				if relPath, ok := s.fs.Rel(s.options.AbsOutputDir, entryPoint.OutputPath); ok {
+					entryPoint.OutputPath = relPath
+				}
+			} else {
+				// Otherwise if the absolute output path was derived from the input
+				// path, use the path relative to the "outbase" directory
+				if relPath, ok := s.fs.Rel(s.options.AbsOutputBase, entryPoint.OutputPath); ok {
+					entryPoint.OutputPath = relPath
+				}
+
+				// Strip the file extension from the output path if there is one so the
+				// "out extension" setting is used instead
+				if last := strings.LastIndexAny(entryPoint.OutputPath, "/.\\"); last != -1 && entryPoint.OutputPath[last] == '.' {
+					entryPoint.OutputPath = entryPoint.OutputPath[:last]
+				}
+			}
+		}
+	}
+
+	return entryMetas
+}
+
+func lowestCommonAncestorDirectory(fs fs.FS, entryPoints []graph.EntryPoint) string {
+	// Ignore any explicitly-specified output paths
+	absPaths := make([]string, 0, len(entryPoints))
+	for _, entryPoint := range entryPoints {
+		if entryPoint.OutputPathWasAutoGenerated {
+			absPaths = append(absPaths, entryPoint.OutputPath)
+		}
+	}
+
+	if len(absPaths) == 0 {
+		return ""
+	}
+
+	lowestAbsDir := fs.Dir(absPaths[0])
+
+	for _, absPath := range absPaths[1:] {
+		absDir := fs.Dir(absPath)
+		lastSlash := 0
+		a := 0
+		b := 0
+
+		for {
+			runeA, widthA := utf8.DecodeRuneInString(absDir[a:])
+			runeB, widthB := utf8.DecodeRuneInString(lowestAbsDir[b:])
+			boundaryA := widthA == 0 || runeA == '/' || runeA == '\\'
+			boundaryB := widthB == 0 || runeB == '/' || runeB == '\\'
+
+			if boundaryA && boundaryB {
+				if widthA == 0 || widthB == 0 {
+					// Truncate to the smaller path if one path is a prefix of the other
+					lowestAbsDir = absDir[:a]
+					break
+				} else {
+					// Track the longest common directory so far
+					lastSlash = a
+				}
+			} else if boundaryA != boundaryB || unicode.ToLower(runeA) != unicode.ToLower(runeB) {
+				// If we're at the top-level directory, then keep the slash
+				if lastSlash < len(absDir) && !strings.ContainsAny(absDir[:lastSlash], "\\/") {
+					lastSlash++
+				}
+
+				// If both paths are different at this point, stop and set the lowest so
+				// far to the common parent directory. Compare using a case-insensitive
+				// comparison to handle paths on Windows.
+				lowestAbsDir = absDir[:lastSlash]
+				break
+			}
+
+			a += widthA
+			b += widthB
+		}
+	}
+
+	return lowestAbsDir
+}
+
+func (s *scanner) scanAllDependencies() {
+	s.timer.Begin("Scan all dependencies")
+	defer s.timer.End("Scan all dependencies")
+
+	// Continue scanning until all dependencies have been discovered
+	for s.remaining > 0 {
+		if s.options.CancelFlag.DidCancel() {
+			return
+		}
+
+		result := <-s.resultChannel
+		s.remaining--
+		if !result.ok {
+			continue
+		}
+
+		// Don't try to resolve paths if we're not bundling
+		if recordsPtr := result.file.inputFile.Repr.ImportRecords(); s.options.Mode == config.ModeBundle && recordsPtr != nil {
+			records := *recordsPtr
+			for importRecordIndex := range records {
+				record := &records[importRecordIndex]
+
+				// This is used for error messages
+				var with *ast.ImportAssertOrWith
+				if record.AssertOrWith != nil && record.AssertOrWith.Keyword == ast.WithKeyword {
+					with = record.AssertOrWith
+				}
+
+				// Skip this import record if the previous resolver call failed
+				resolveResult := result.resolveResults[importRecordIndex]
+				if resolveResult == nil {
+					if globResults := result.globResolveResults[uint32(importRecordIndex)]; globResults.resolveResults != nil {
+						sourceIndex := s.allocateGlobSourceIndex(result.file.inputFile.Source.Index, uint32(importRecordIndex))
+						record.SourceIndex = ast.MakeIndex32(sourceIndex)
+						s.results[sourceIndex] = s.generateResultForGlobResolve(sourceIndex, globResults.absPath,
+							&result.file.inputFile.Source, record.Range, with, record.GlobPattern.Kind, globResults, record.AssertOrWith)
+					}
+					continue
+				}
+
+				path := resolveResult.PathPair.Primary
+				if !resolveResult.PathPair.IsExternal {
+					// Handle a path within the bundle
+					sourceIndex := s.maybeParseFile(*resolveResult, resolver.PrettyPath(s.fs, path),
+						&result.file.inputFile.Source, record.Range, with, inputKindNormal, nil)
+					record.SourceIndex = ast.MakeIndex32(sourceIndex)
+				} else {
+					// Allow this import statement to be removed if something marked it as "sideEffects: false"
+					if resolveResult.PrimarySideEffectsData != nil {
+						record.Flags |= ast.IsExternalWithoutSideEffects
+					}
+
+					// If the path to the external module is relative to the source
+					// file, rewrite the path to be relative to the working directory
+					if path.Namespace == "file" {
+						if relPath, ok := s.fs.Rel(s.options.AbsOutputDir, path.Text); ok {
+							// Prevent issues with path separators being different on Windows
+							relPath = strings.ReplaceAll(relPath, "\\", "/")
+							if resolver.IsPackagePath(relPath) {
+								relPath = "./" + relPath
+							}
+							record.Path.Text = relPath
+						} else {
+							record.Path = path
+						}
+					} else {
+						record.Path = path
+					}
+				}
+			}
+		}
+
+		s.results[result.file.inputFile.Source.Index] = result
+	}
+}
+
+func (s *scanner) generateResultForGlobResolve(
+	sourceIndex uint32,
+	fakeSourcePath string,
+	importSource *logger.Source,
+	importRange logger.Range,
+	importWith *ast.ImportAssertOrWith,
+	kind ast.ImportKind,
+	result globResolveResult,
+	assertions *ast.ImportAssertOrWith,
+) parseResult {
+	keys := make([]string, 0, len(result.resolveResults))
+	for key := range result.resolveResults {
+		keys = append(keys, key)
+	}
+	sort.Strings(keys)
+
+	object := js_ast.EObject{Properties: make([]js_ast.Property, 0, len(result.resolveResults))}
+	importRecords := make([]ast.ImportRecord, 0, len(result.resolveResults))
+	resolveResults := make([]*resolver.ResolveResult, 0, len(result.resolveResults))
+
+	for _, key := range keys {
+		resolveResult := result.resolveResults[key]
+		var value js_ast.Expr
+
+		importRecordIndex := uint32(len(importRecords))
+		var sourceIndex ast.Index32
+
+		if !resolveResult.PathPair.IsExternal {
+			sourceIndex = ast.MakeIndex32(s.maybeParseFile(
+				resolveResult,
+				resolver.PrettyPath(s.fs, resolveResult.PathPair.Primary),
+				importSource,
+				importRange,
+				importWith,
+				inputKindNormal,
+				nil,
+			))
+		}
+
+		path := resolveResult.PathPair.Primary
+
+		// If the path to the external module is relative to the source
+		// file, rewrite the path to be relative to the working directory
+		if path.Namespace == "file" {
+			if relPath, ok := s.fs.Rel(s.options.AbsOutputDir, path.Text); ok {
+				// Prevent issues with path separators being different on Windows
+				relPath = strings.ReplaceAll(relPath, "\\", "/")
+				if resolver.IsPackagePath(relPath) {
+					relPath = "./" + relPath
+				}
+				path.Text = relPath
+			}
+		}
+
+		resolveResults = append(resolveResults, &resolveResult)
+		importRecords = append(importRecords, ast.ImportRecord{
+			Path:         path,
+			SourceIndex:  sourceIndex,
+			AssertOrWith: assertions,
+			Kind:         kind,
+		})
+
+		switch kind {
+		case ast.ImportDynamic:
+			value.Data = &js_ast.EImportString{ImportRecordIndex: importRecordIndex}
+		case ast.ImportRequire:
+			value.Data = &js_ast.ERequireString{ImportRecordIndex: importRecordIndex}
+		default:
+			panic("Internal error")
+		}
+
+		object.Properties = append(object.Properties, js_ast.Property{
+			Key: js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(key)}},
+			ValueOrNil: js_ast.Expr{Data: &js_ast.EArrow{
+				Body:       js_ast.FnBody{Block: js_ast.SBlock{Stmts: []js_ast.Stmt{{Data: &js_ast.SReturn{ValueOrNil: value}}}}},
+				PreferExpr: true,
+			}},
+		})
+	}
+
+	source := logger.Source{
+		KeyPath:    logger.Path{Text: fakeSourcePath, Namespace: "file"},
+		PrettyPath: result.prettyPath,
+		Index:      sourceIndex,
+	}
+	ast := js_parser.GlobResolveAST(s.log, source, importRecords, &object, result.exportAlias)
+
+	// Fill out "nil" for any additional imports (i.e. from the runtime)
+	for len(resolveResults) < len(ast.ImportRecords) {
+		resolveResults = append(resolveResults, nil)
+	}
+
+	return parseResult{
+		resolveResults: resolveResults,
+		file: scannerFile{
+			inputFile: graph.InputFile{
+				Source: source,
+				Repr: &graph.JSRepr{
+					AST: ast,
+				},
+				OmitFromSourceMapsAndMetafile: true,
+			},
+		},
+		ok: true,
+	}
+}
+
+func (s *scanner) processScannedFiles(entryPointMeta []graph.EntryPoint) []scannerFile {
+	s.timer.Begin("Process scanned files")
+	defer s.timer.End("Process scanned files")
+
+	// Build a set of entry point source indices for quick lookup
+	entryPointSourceIndexToMetaIndex := make(map[uint32]uint32, len(entryPointMeta))
+	for i, meta := range entryPointMeta {
+		entryPointSourceIndexToMetaIndex[meta.SourceIndex] = uint32(i)
+	}
+
+	// Check for pretty-printed path collisions
+	importAttributeNameCollisions := make(map[string][]uint32)
+	for sourceIndex := range s.results {
+		if result := &s.results[sourceIndex]; result.ok {
+			prettyPath := result.file.inputFile.Source.PrettyPath
+			importAttributeNameCollisions[prettyPath] = append(importAttributeNameCollisions[prettyPath], uint32(sourceIndex))
+		}
+	}
+
+	// Import attributes can result in the same file being imported multiple
+	// times in different ways. If that happens, append the import attributes
+	// to the pretty-printed file names to disambiguate them. This renaming
+	// must happen before we construct the metafile JSON chunks below.
+	for _, sourceIndices := range importAttributeNameCollisions {
+		if len(sourceIndices) == 1 {
+			continue
+		}
+
+		for _, sourceIndex := range sourceIndices {
+			source := &s.results[sourceIndex].file.inputFile.Source
+			attrs := source.KeyPath.ImportAttributes.DecodeIntoArray()
+			if len(attrs) == 0 {
+				continue
+			}
+
+			var sb strings.Builder
+			sb.WriteString(" with {")
+			for i, attr := range attrs {
+				if i > 0 {
+					sb.WriteByte(',')
+				}
+				sb.WriteByte(' ')
+				if js_ast.IsIdentifier(attr.Key) {
+					sb.WriteString(attr.Key)
+				} else {
+					sb.Write(helpers.QuoteSingle(attr.Key, false))
+				}
+				sb.WriteString(": ")
+				sb.Write(helpers.QuoteSingle(attr.Value, false))
+			}
+			sb.WriteString(" }")
+			source.PrettyPath += sb.String()
+		}
+	}
+
+	// Now that all files have been scanned, process the final file import records
+	for sourceIndex, result := range s.results {
+		if !result.ok {
+			continue
+		}
+
+		sb := strings.Builder{}
+		isFirstImport := true
+
+		// Begin the metadata chunk
+		if s.options.NeedsMetafile {
+			sb.Write(helpers.QuoteForJSON(result.file.inputFile.Source.PrettyPath, s.options.ASCIIOnly))
+			sb.WriteString(fmt.Sprintf(": {\n      \"bytes\": %d,\n      \"imports\": [", len(result.file.inputFile.Source.Contents)))
+		}
+
+		// Don't try to resolve paths if we're not bundling
+		if recordsPtr := result.file.inputFile.Repr.ImportRecords(); s.options.Mode == config.ModeBundle && recordsPtr != nil {
+			records := *recordsPtr
+			tracker := logger.MakeLineColumnTracker(&result.file.inputFile.Source)
+
+			for importRecordIndex := range records {
+				record := &records[importRecordIndex]
+
+				// Save the import attributes to the metafile
+				var metafileWith string
+				if s.options.NeedsMetafile {
+					if with := record.AssertOrWith; with != nil && with.Keyword == ast.WithKeyword && len(with.Entries) > 0 {
+						data := strings.Builder{}
+						data.WriteString(",\n          \"with\": {")
+						for i, entry := range with.Entries {
+							if i > 0 {
+								data.WriteByte(',')
+							}
+							data.WriteString("\n            ")
+							data.Write(helpers.QuoteForJSON(helpers.UTF16ToString(entry.Key), s.options.ASCIIOnly))
+							data.WriteString(": ")
+							data.Write(helpers.QuoteForJSON(helpers.UTF16ToString(entry.Value), s.options.ASCIIOnly))
+						}
+						data.WriteString("\n          }")
+						metafileWith = data.String()
+					}
+				}
+
+				// Skip this import record if the previous resolver call failed
+				resolveResult := result.resolveResults[importRecordIndex]
+				if resolveResult == nil || !record.SourceIndex.IsValid() {
+					if s.options.NeedsMetafile {
+						if isFirstImport {
+							isFirstImport = false
+							sb.WriteString("\n        ")
+						} else {
+							sb.WriteString(",\n        ")
+						}
+						sb.WriteString(fmt.Sprintf("{\n          \"path\": %s,\n          \"kind\": %s,\n          \"external\": true%s\n        }",
+							helpers.QuoteForJSON(record.Path.Text, s.options.ASCIIOnly),
+							helpers.QuoteForJSON(record.Kind.StringForMetafile(), s.options.ASCIIOnly),
+							metafileWith))
+					}
+					continue
+				}
+
+				// Now that all files have been scanned, look for packages that are imported
+				// both with "import" and "require". Rewrite any imports that reference the
+				// "module" package.json field to the "main" package.json field instead.
+				//
+				// This attempts to automatically avoid the "dual package hazard" where a
+				// package has both a CommonJS module version and an ECMAScript module
+				// version and exports a non-object in CommonJS (often a function). If we
+				// pick the "module" field and the package is imported with "require" then
+				// code expecting a function will crash.
+				if resolveResult.PathPair.HasSecondary() {
+					secondaryKey := resolveResult.PathPair.Secondary
+					if secondaryKey.Namespace == "file" {
+						secondaryKey.Text = canonicalFileSystemPathForWindows(secondaryKey.Text)
+					}
+					if secondaryVisited, ok := s.visited[secondaryKey]; ok {
+						record.SourceIndex = ast.MakeIndex32(secondaryVisited.sourceIndex)
+					}
+				}
+
+				// Generate metadata about each import
+				otherResult := &s.results[record.SourceIndex.GetIndex()]
+				otherFile := &otherResult.file
+				if s.options.NeedsMetafile {
+					if isFirstImport {
+						isFirstImport = false
+						sb.WriteString("\n        ")
+					} else {
+						sb.WriteString(",\n        ")
+					}
+					sb.WriteString(fmt.Sprintf("{\n          \"path\": %s,\n          \"kind\": %s,\n          \"original\": %s%s\n        }",
+						helpers.QuoteForJSON(otherFile.inputFile.Source.PrettyPath, s.options.ASCIIOnly),
+						helpers.QuoteForJSON(record.Kind.StringForMetafile(), s.options.ASCIIOnly),
+						helpers.QuoteForJSON(record.Path.Text, s.options.ASCIIOnly),
+						metafileWith))
+				}
+
+				// Validate that imports with "assert { type: 'json' }" were imported
+				// with the JSON loader. This is done to match the behavior of these
+				// import assertions in a real JavaScript runtime. In addition, we also
+				// allow the copy loader since this is sort of like marking the path
+				// as external (the import assertions are kept and the real JavaScript
+				// runtime evaluates them, not us).
+				if record.Flags.Has(ast.AssertTypeJSON) && otherResult.ok && otherFile.inputFile.Loader != config.LoaderJSON && otherFile.inputFile.Loader != config.LoaderCopy {
+					s.log.AddErrorWithNotes(&tracker, record.Range,
+						fmt.Sprintf("The file %q was loaded with the %q loader", otherFile.inputFile.Source.PrettyPath, config.LoaderToString[otherFile.inputFile.Loader]),
+						[]logger.MsgData{
+							tracker.MsgData(js_lexer.RangeOfImportAssertOrWith(result.file.inputFile.Source,
+								*ast.FindAssertOrWithEntry(record.AssertOrWith.Entries, "type"), js_lexer.KeyAndValueRange),
+								"This import assertion requires the loader to be \"json\" instead:"),
+							{Text: "You need to either reconfigure esbuild to ensure that the loader for this file is \"json\" or you need to remove this import assertion."}})
+				}
+
+				switch record.Kind {
+				case ast.ImportComposesFrom:
+					// Using a JavaScript file with CSS "composes" is not allowed
+					if _, ok := otherFile.inputFile.Repr.(*graph.JSRepr); ok && otherFile.inputFile.Loader != config.LoaderEmpty {
+						s.log.AddErrorWithNotes(&tracker, record.Range,
+							fmt.Sprintf("Cannot use \"composes\" with %q", otherFile.inputFile.Source.PrettyPath),
+							[]logger.MsgData{{Text: fmt.Sprintf(
+								"You can only use \"composes\" with CSS files and %q is not a CSS file (it was loaded with the %q loader).",
+								otherFile.inputFile.Source.PrettyPath, config.LoaderToString[otherFile.inputFile.Loader])}})
+					}
+
+				case ast.ImportAt:
+					// Using a JavaScript file with CSS "@import" is not allowed
+					if _, ok := otherFile.inputFile.Repr.(*graph.JSRepr); ok && otherFile.inputFile.Loader != config.LoaderEmpty {
+						s.log.AddErrorWithNotes(&tracker, record.Range,
+							fmt.Sprintf("Cannot import %q into a CSS file", otherFile.inputFile.Source.PrettyPath),
+							[]logger.MsgData{{Text: fmt.Sprintf(
+								"An \"@import\" rule can only be used to import another CSS file and %q is not a CSS file (it was loaded with the %q loader).",
+								otherFile.inputFile.Source.PrettyPath, config.LoaderToString[otherFile.inputFile.Loader])}})
+					}
+
+				case ast.ImportURL:
+					// Using a JavaScript or CSS file with CSS "url()" is not allowed
+					switch otherRepr := otherFile.inputFile.Repr.(type) {
+					case *graph.CSSRepr:
+						s.log.AddErrorWithNotes(&tracker, record.Range,
+							fmt.Sprintf("Cannot use %q as a URL", otherFile.inputFile.Source.PrettyPath),
+							[]logger.MsgData{{Text: fmt.Sprintf(
+								"You can't use a \"url()\" token to reference a CSS file, and %q is a CSS file (it was loaded with the %q loader).",
+								otherFile.inputFile.Source.PrettyPath, config.LoaderToString[otherFile.inputFile.Loader])}})
+
+					case *graph.JSRepr:
+						if otherRepr.AST.URLForCSS == "" && otherFile.inputFile.Loader != config.LoaderEmpty {
+							s.log.AddErrorWithNotes(&tracker, record.Range,
+								fmt.Sprintf("Cannot use %q as a URL", otherFile.inputFile.Source.PrettyPath),
+								[]logger.MsgData{{Text: fmt.Sprintf(
+									"You can't use a \"url()\" token to reference the file %q because it was loaded with the %q loader, which doesn't provide a URL to embed in the resulting CSS.",
+									otherFile.inputFile.Source.PrettyPath, config.LoaderToString[otherFile.inputFile.Loader])}})
+						}
+					}
+				}
+
+				// If the imported file uses the "copy" loader, then move it from
+				// "SourceIndex" to "CopySourceIndex" so we don't end up bundling it.
+				if _, ok := otherFile.inputFile.Repr.(*graph.CopyRepr); ok {
+					record.CopySourceIndex = record.SourceIndex
+					record.SourceIndex = ast.Index32{}
+					continue
+				}
+
+				// If an import from a JavaScript file targets a CSS file, generate a
+				// JavaScript stub to ensure that JavaScript files only ever import
+				// other JavaScript files.
+				if _, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok {
+					if css, ok := otherFile.inputFile.Repr.(*graph.CSSRepr); ok {
+						if s.options.WriteToStdout {
+							s.log.AddError(&tracker, record.Range,
+								fmt.Sprintf("Cannot import %q into a JavaScript file without an output path configured", otherFile.inputFile.Source.PrettyPath))
+						} else if !css.JSSourceIndex.IsValid() {
+							stubKey := otherFile.inputFile.Source.KeyPath
+							if stubKey.Namespace == "file" {
+								stubKey.Text = canonicalFileSystemPathForWindows(stubKey.Text)
+							}
+							sourceIndex := s.allocateSourceIndex(stubKey, cache.SourceIndexJSStubForCSS)
+							source := otherFile.inputFile.Source
+							source.Index = sourceIndex
+							s.results[sourceIndex] = parseResult{
+								file: scannerFile{
+									inputFile: graph.InputFile{
+										Source: source,
+										Loader: otherFile.inputFile.Loader,
+										Repr: &graph.JSRepr{
+											// Note: The actual export object will be filled in by the linker
+											AST: js_parser.LazyExportAST(s.log, source,
+												js_parser.OptionsFromConfig(&s.options), js_ast.Expr{Data: js_ast.ENullShared}, ""),
+											CSSSourceIndex: ast.MakeIndex32(record.SourceIndex.GetIndex()),
+										},
+									},
+								},
+								ok: true,
+							}
+							css.JSSourceIndex = ast.MakeIndex32(sourceIndex)
+						}
+						record.SourceIndex = css.JSSourceIndex
+						if !css.JSSourceIndex.IsValid() {
+							continue
+						}
+					}
+				}
+
+				// Warn about this import if it's a bare import statement without any
+				// imported names (i.e. a side-effect-only import) and the module has
+				// been marked as having no side effects.
+				//
+				// Except don't do this if this file is inside "node_modules" since
+				// it's a bug in the package and the user won't be able to do anything
+				// about it. Note that this can result in esbuild silently generating
+				// broken code. If this actually happens for people, it's probably worth
+				// re-enabling the warning about code inside "node_modules".
+				if record.Flags.Has(ast.WasOriginallyBareImport) && !s.options.IgnoreDCEAnnotations &&
+					!helpers.IsInsideNodeModules(result.file.inputFile.Source.KeyPath.Text) {
+					if otherModule := &s.results[record.SourceIndex.GetIndex()].file.inputFile; otherModule.SideEffects.Kind != graph.HasSideEffects &&
+						// Do not warn if this is from a plugin, since removing the import
+						// would cause the plugin to not run, and running a plugin is a side
+						// effect.
+						otherModule.SideEffects.Kind != graph.NoSideEffects_PureData_FromPlugin &&
+
+						// Do not warn if this has no side effects because the parsed AST
+						// is empty. This is the case for ".d.ts" files, for example.
+						otherModule.SideEffects.Kind != graph.NoSideEffects_EmptyAST {
+
+						var notes []logger.MsgData
+						var by string
+						if data := otherModule.SideEffects.Data; data != nil {
+							if data.PluginName != "" {
+								by = fmt.Sprintf(" by plugin %q", data.PluginName)
+							} else {
+								var text string
+								if data.IsSideEffectsArrayInJSON {
+									text = "It was excluded from the \"sideEffects\" array in the enclosing \"package.json\" file:"
+								} else {
+									text = "\"sideEffects\" is false in the enclosing \"package.json\" file:"
+								}
+								tracker := logger.MakeLineColumnTracker(data.Source)
+								notes = append(notes, tracker.MsgData(data.Range, text))
+							}
+						}
+						s.log.AddIDWithNotes(logger.MsgID_Bundler_IgnoredBareImport, logger.Warning, &tracker, record.Range,
+							fmt.Sprintf("Ignoring this import because %q was marked as having no side effects%s",
+								otherModule.Source.PrettyPath, by), notes)
+					}
+				}
+			}
+		}
+
+		// End the metadata chunk
+		if s.options.NeedsMetafile {
+			if !isFirstImport {
+				sb.WriteString("\n      ")
+			}
+			if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok &&
+				(repr.AST.ExportsKind == js_ast.ExportsCommonJS || repr.AST.ExportsKind == js_ast.ExportsESM) {
+				format := "cjs"
+				if repr.AST.ExportsKind == js_ast.ExportsESM {
+					format = "esm"
+				}
+				sb.WriteString(fmt.Sprintf("],\n      \"format\": %q", format))
+			} else {
+				sb.WriteString("]")
+			}
+			if attrs := result.file.inputFile.Source.KeyPath.ImportAttributes.DecodeIntoArray(); len(attrs) > 0 {
+				sb.WriteString(",\n      \"with\": {")
+				for i, attr := range attrs {
+					if i > 0 {
+						sb.WriteByte(',')
+					}
+					sb.WriteString(fmt.Sprintf("\n        %s: %s",
+						helpers.QuoteForJSON(attr.Key, s.options.ASCIIOnly),
+						helpers.QuoteForJSON(attr.Value, s.options.ASCIIOnly),
+					))
+				}
+				sb.WriteString("\n      }")
+			}
+			sb.WriteString("\n    }")
+		}
+
+		result.file.jsonMetadataChunk = sb.String()
+
+		// If this file is from the "file" or "copy" loaders, generate an additional file
+		if result.file.inputFile.UniqueKeyForAdditionalFile != "" {
+			bytes := []byte(result.file.inputFile.Source.Contents)
+			template := s.options.AssetPathTemplate
+
+			// Use the entry path template instead of the asset path template if this
+			// file is an entry point and uses the "copy" loader. With the "file" loader
+			// the JS stub is the entry point, but with the "copy" loader the file is
+			// the entry point itself.
+			customFilePath := ""
+			useOutputFile := false
+			if result.file.inputFile.Loader == config.LoaderCopy {
+				if metaIndex, ok := entryPointSourceIndexToMetaIndex[uint32(sourceIndex)]; ok {
+					template = s.options.EntryPathTemplate
+					customFilePath = entryPointMeta[metaIndex].OutputPath
+					useOutputFile = s.options.AbsOutputFile != ""
+				}
+			}
+
+			// Add a hash to the file name to prevent multiple files with the same name
+			// but different contents from colliding
+			var hash string
+			if config.HasPlaceholder(template, config.HashPlaceholder) {
+				h := xxhash.New()
+				h.Write(bytes)
+				hash = HashForFileName(h.Sum(nil))
+			}
+
+			// This should use similar logic to how the linker computes output paths
+			var dir, base, ext string
+			if useOutputFile {
+				// If the output path was configured explicitly, use it verbatim
+				dir = "/"
+				base = s.fs.Base(s.options.AbsOutputFile)
+				ext = s.fs.Ext(base)
+				base = base[:len(base)-len(ext)]
+			} else {
+				// Otherwise, derive the output path from the input path
+				// Generate the input for the template
+				_, _, originalExt := logger.PlatformIndependentPathDirBaseExt(result.file.inputFile.Source.KeyPath.Text)
+				dir, base = PathRelativeToOutbase(
+					&result.file.inputFile,
+					&s.options,
+					s.fs,
+					/* avoidIndex */ false,
+					customFilePath,
+				)
+				ext = originalExt
+			}
+
+			// Apply the path template
+			templateExt := strings.TrimPrefix(ext, ".")
+			relPath := config.TemplateToString(config.SubstituteTemplate(template, config.PathPlaceholders{
+				Dir:  &dir,
+				Name: &base,
+				Hash: &hash,
+				Ext:  &templateExt,
+			})) + ext
+
+			// Optionally add metadata about the file
+			var jsonMetadataChunk string
+			if s.options.NeedsMetafile {
+				inputs := fmt.Sprintf("{\n        %s: {\n          \"bytesInOutput\": %d\n        }\n      }",
+					helpers.QuoteForJSON(result.file.inputFile.Source.PrettyPath, s.options.ASCIIOnly),
+					len(bytes),
+				)
+				jsonMetadataChunk = fmt.Sprintf(
+					"{\n      \"imports\": [],\n      \"exports\": [],\n      \"inputs\": %s,\n      \"bytes\": %d\n    }",
+					inputs,
+					len(bytes),
+				)
+			}
+
+			// Generate the additional file to copy into the output directory
+			result.file.inputFile.AdditionalFiles = []graph.OutputFile{{
+				AbsPath:           s.fs.Join(s.options.AbsOutputDir, relPath),
+				Contents:          bytes,
+				JSONMetadataChunk: jsonMetadataChunk,
+			}}
+		}
+
+		s.results[sourceIndex] = result
+	}
+
+	// The linker operates on an array of files, so construct that now. This
+	// can't be constructed earlier because we generate new parse results for
+	// JavaScript stub files for CSS imports above.
+	files := make([]scannerFile, len(s.results))
+	for sourceIndex := range s.results {
+		if result := &s.results[sourceIndex]; result.ok {
+			s.validateTLA(uint32(sourceIndex))
+			files[sourceIndex] = result.file
+		}
+	}
+
+	return files
+}
+
+func (s *scanner) validateTLA(sourceIndex uint32) tlaCheck {
+	result := &s.results[sourceIndex]
+
+	if result.ok && result.tlaCheck.depth == 0 {
+		if repr, ok := result.file.inputFile.Repr.(*graph.JSRepr); ok {
+			result.tlaCheck.depth = 1
+			if repr.AST.LiveTopLevelAwaitKeyword.Len > 0 {
+				result.tlaCheck.parent = ast.MakeIndex32(sourceIndex)
+			}
+
+			for importRecordIndex, record := range repr.AST.ImportRecords {
+				if record.SourceIndex.IsValid() && (record.Kind == ast.ImportRequire || record.Kind == ast.ImportStmt) {
+					parent := s.validateTLA(record.SourceIndex.GetIndex())
+					if !parent.parent.IsValid() {
+						continue
+					}
+
+					// Follow any import chains
+					if record.Kind == ast.ImportStmt && (!result.tlaCheck.parent.IsValid() || parent.depth < result.tlaCheck.depth) {
+						result.tlaCheck.depth = parent.depth + 1
+						result.tlaCheck.parent = record.SourceIndex
+						result.tlaCheck.importRecordIndex = uint32(importRecordIndex)
+						continue
+					}
+
+					// Require of a top-level await chain is forbidden
+					if record.Kind == ast.ImportRequire {
+						var notes []logger.MsgData
+						var tlaPrettyPath string
+						otherSourceIndex := record.SourceIndex.GetIndex()
+
+						// Build up a chain of relevant notes for all of the imports
+						for {
+							parentResult := &s.results[otherSourceIndex]
+							parentRepr := parentResult.file.inputFile.Repr.(*graph.JSRepr)
+
+							if parentRepr.AST.LiveTopLevelAwaitKeyword.Len > 0 {
+								tlaPrettyPath = parentResult.file.inputFile.Source.PrettyPath
+								tracker := logger.MakeLineColumnTracker(&parentResult.file.inputFile.Source)
+								notes = append(notes, tracker.MsgData(parentRepr.AST.LiveTopLevelAwaitKeyword,
+									fmt.Sprintf("The top-level await in %q is here:", tlaPrettyPath)))
+								break
+							}
+
+							if !parentResult.tlaCheck.parent.IsValid() {
+								notes = append(notes, logger.MsgData{Text: "unexpected invalid index"})
+								break
+							}
+
+							otherSourceIndex = parentResult.tlaCheck.parent.GetIndex()
+
+							tracker := logger.MakeLineColumnTracker(&parentResult.file.inputFile.Source)
+							notes = append(notes, tracker.MsgData(
+								parentRepr.AST.ImportRecords[parentResult.tlaCheck.importRecordIndex].Range,
+								fmt.Sprintf("The file %q imports the file %q here:",
+									parentResult.file.inputFile.Source.PrettyPath, s.results[otherSourceIndex].file.inputFile.Source.PrettyPath)))
+						}
+
+						var text string
+						importedPrettyPath := s.results[record.SourceIndex.GetIndex()].file.inputFile.Source.PrettyPath
+
+						if importedPrettyPath == tlaPrettyPath {
+							text = fmt.Sprintf("This require call is not allowed because the imported file %q contains a top-level await",
+								importedPrettyPath)
+						} else {
+							text = fmt.Sprintf("This require call is not allowed because the transitive dependency %q contains a top-level await",
+								tlaPrettyPath)
+						}
+
+						tracker := logger.MakeLineColumnTracker(&result.file.inputFile.Source)
+						s.log.AddErrorWithNotes(&tracker, record.Range, text, notes)
+					}
+				}
+			}
+
+			// Make sure that if we wrap this module in a closure, the closure is also
+			// async. This happens when you call "import()" on this module and code
+			// splitting is off.
+			if result.tlaCheck.parent.IsValid() {
+				repr.Meta.IsAsyncOrHasAsyncDependency = true
+			}
+		}
+	}
+
+	return result.tlaCheck
+}
+
+func DefaultExtensionToLoaderMap() map[string]config.Loader {
+	return map[string]config.Loader{
+		"":            config.LoaderJS, // This represents files without an extension
+		".js":         config.LoaderJS,
+		".mjs":        config.LoaderJS,
+		".cjs":        config.LoaderJS,
+		".jsx":        config.LoaderJSX,
+		".ts":         config.LoaderTS,
+		".cts":        config.LoaderTSNoAmbiguousLessThan,
+		".mts":        config.LoaderTSNoAmbiguousLessThan,
+		".tsx":        config.LoaderTSX,
+		".css":        config.LoaderCSS,
+		".module.css": config.LoaderLocalCSS,
+		".json":       config.LoaderJSON,
+		".txt":        config.LoaderText,
+	}
+}
+
+func applyOptionDefaults(options *config.Options) {
+	if options.ExtensionToLoader == nil {
+		options.ExtensionToLoader = DefaultExtensionToLoaderMap()
+	}
+	if options.OutputExtensionJS == "" {
+		options.OutputExtensionJS = ".js"
+	}
+	if options.OutputExtensionCSS == "" {
+		options.OutputExtensionCSS = ".css"
+	}
+
+	// Configure default path templates
+	if len(options.EntryPathTemplate) == 0 {
+		options.EntryPathTemplate = []config.PathTemplate{
+			{Data: "./", Placeholder: config.DirPlaceholder},
+			{Data: "/", Placeholder: config.NamePlaceholder},
+		}
+	}
+	if len(options.ChunkPathTemplate) == 0 {
+		options.ChunkPathTemplate = []config.PathTemplate{
+			{Data: "./", Placeholder: config.NamePlaceholder},
+			{Data: "-", Placeholder: config.HashPlaceholder},
+		}
+	}
+	if len(options.AssetPathTemplate) == 0 {
+		options.AssetPathTemplate = []config.PathTemplate{
+			{Data: "./", Placeholder: config.NamePlaceholder},
+			{Data: "-", Placeholder: config.HashPlaceholder},
+		}
+	}
+
+	options.ProfilerNames = !options.MinifyIdentifiers
+
+	// Automatically fix invalid configurations of unsupported features
+	fixInvalidUnsupportedJSFeatureOverrides(options, compat.AsyncAwait, compat.AsyncGenerator|compat.ForAwait|compat.TopLevelAwait)
+	fixInvalidUnsupportedJSFeatureOverrides(options, compat.Generator, compat.AsyncGenerator)
+	fixInvalidUnsupportedJSFeatureOverrides(options, compat.ObjectAccessors, compat.ClassPrivateAccessor|compat.ClassPrivateStaticAccessor)
+	fixInvalidUnsupportedJSFeatureOverrides(options, compat.ClassField, compat.ClassPrivateField)
+	fixInvalidUnsupportedJSFeatureOverrides(options, compat.ClassStaticField, compat.ClassPrivateStaticField)
+	fixInvalidUnsupportedJSFeatureOverrides(options, compat.Class,
+		compat.ClassField|compat.ClassPrivateAccessor|compat.ClassPrivateBrandCheck|compat.ClassPrivateField|
+			compat.ClassPrivateMethod|compat.ClassPrivateStaticAccessor|compat.ClassPrivateStaticField|
+			compat.ClassPrivateStaticMethod|compat.ClassStaticBlocks|compat.ClassStaticField)
+
+	// If we're not building for the browser, automatically disable support for
+	// inline </script> and </style> tags if there aren't currently any overrides
+	if options.Platform != config.PlatformBrowser {
+		if !options.UnsupportedJSFeatureOverridesMask.Has(compat.InlineScript) {
+			options.UnsupportedJSFeatures |= compat.InlineScript
+		}
+		if !options.UnsupportedCSSFeatureOverridesMask.Has(compat.InlineStyle) {
+			options.UnsupportedCSSFeatures |= compat.InlineStyle
+		}
+	}
+}
+
+func fixInvalidUnsupportedJSFeatureOverrides(options *config.Options, implies compat.JSFeature, implied compat.JSFeature) {
+	// If this feature is unsupported, that implies that the other features must also be unsupported
+	if options.UnsupportedJSFeatureOverrides.Has(implies) {
+		options.UnsupportedJSFeatures |= implied
+		options.UnsupportedJSFeatureOverrides |= implied
+		options.UnsupportedJSFeatureOverridesMask |= implied
+	}
+}
+
+type Linker func(
+	options *config.Options,
+	timer *helpers.Timer,
+	log logger.Log,
+	fs fs.FS,
+	res *resolver.Resolver,
+	inputFiles []graph.InputFile,
+	entryPoints []graph.EntryPoint,
+	uniqueKeyPrefix string,
+	reachableFiles []uint32,
+	dataForSourceMaps func() []DataForSourceMap,
+) []graph.OutputFile
+
+func (b *Bundle) Compile(log logger.Log, timer *helpers.Timer, mangleCache map[string]interface{}, link Linker) ([]graph.OutputFile, string) {
+	timer.Begin("Compile phase")
+	defer timer.End("Compile phase")
+
+	if b.options.CancelFlag.DidCancel() {
+		return nil, ""
+	}
+
+	options := b.options
+
+	// In most cases we don't need synchronized access to the mangle cache
+	cssUsedLocalNames := make(map[string]bool)
+	options.ExclusiveMangleCacheUpdate = func(cb func(
+		mangleCache map[string]interface{},
+		cssUsedLocalNames map[string]bool,
+	)) {
+		cb(mangleCache, cssUsedLocalNames)
+	}
+
+	files := make([]graph.InputFile, len(b.files))
+	for i, file := range b.files {
+		files[i] = file.inputFile
+	}
+
+	// Get the base path from the options or choose the lowest common ancestor of all entry points
+	allReachableFiles := findReachableFiles(files, b.entryPoints)
+
+	// Compute source map data in parallel with linking
+	timer.Begin("Spawn source map tasks")
+	dataForSourceMaps := b.computeDataForSourceMapsInParallel(&options, allReachableFiles)
+	timer.End("Spawn source map tasks")
+
+	var resultGroups [][]graph.OutputFile
+	if options.CodeSplitting || len(b.entryPoints) == 1 {
+		// If code splitting is enabled or if there's only one entry point, link all entry points together
+		resultGroups = [][]graph.OutputFile{link(&options, timer, log, b.fs, b.res,
+			files, b.entryPoints, b.uniqueKeyPrefix, allReachableFiles, dataForSourceMaps)}
+	} else {
+		// Otherwise, link each entry point with the runtime file separately
+		waitGroup := sync.WaitGroup{}
+		resultGroups = make([][]graph.OutputFile, len(b.entryPoints))
+		serializer := helpers.MakeSerializer(len(b.entryPoints))
+		for i, entryPoint := range b.entryPoints {
+			waitGroup.Add(1)
+			go func(i int, entryPoint graph.EntryPoint) {
+				entryPoints := []graph.EntryPoint{entryPoint}
+				forked := timer.Fork()
+
+				// Each goroutine needs a separate options object
+				optionsClone := options
+				optionsClone.ExclusiveMangleCacheUpdate = func(cb func(
+					mangleCache map[string]interface{},
+					cssUsedLocalNames map[string]bool,
+				)) {
+					// Serialize all accesses to the mangle cache in entry point order for determinism
+					serializer.Enter(i)
+					defer serializer.Leave(i)
+					cb(mangleCache, cssUsedLocalNames)
+				}
+
+				resultGroups[i] = link(&optionsClone, forked, log, b.fs, b.res, files, entryPoints,
+					b.uniqueKeyPrefix, findReachableFiles(files, entryPoints), dataForSourceMaps)
+				timer.Join(forked)
+				waitGroup.Done()
+			}(i, entryPoint)
+		}
+		waitGroup.Wait()
+	}
+
+	// Join the results in entry point order for determinism
+	var outputFiles []graph.OutputFile
+	for _, group := range resultGroups {
+		outputFiles = append(outputFiles, group...)
+	}
+
+	// Also generate the metadata file if necessary
+	var metafileJSON string
+	if options.NeedsMetafile {
+		timer.Begin("Generate metadata JSON")
+		metafileJSON = b.generateMetadataJSON(outputFiles, allReachableFiles, options.ASCIIOnly)
+		timer.End("Generate metadata JSON")
+	}
+
+	if !options.WriteToStdout {
+		// Make sure an output file never overwrites an input file
+		if !options.AllowOverwrite {
+			sourceAbsPaths := make(map[string]uint32)
+			for _, sourceIndex := range allReachableFiles {
+				keyPath := b.files[sourceIndex].inputFile.Source.KeyPath
+				if keyPath.Namespace == "file" {
+					absPathKey := canonicalFileSystemPathForWindows(keyPath.Text)
+					sourceAbsPaths[absPathKey] = sourceIndex
+				}
+			}
+			for _, outputFile := range outputFiles {
+				absPathKey := canonicalFileSystemPathForWindows(outputFile.AbsPath)
+				if sourceIndex, ok := sourceAbsPaths[absPathKey]; ok {
+					hint := ""
+					switch logger.API {
+					case logger.CLIAPI:
+						hint = " (use \"--allow-overwrite\" to allow this)"
+					case logger.JSAPI:
+						hint = " (use \"allowOverwrite: true\" to allow this)"
+					case logger.GoAPI:
+						hint = " (use \"AllowOverwrite: true\" to allow this)"
+					}
+					log.AddError(nil, logger.Range{},
+						fmt.Sprintf("Refusing to overwrite input file %q%s",
+							b.files[sourceIndex].inputFile.Source.PrettyPath, hint))
+				}
+			}
+		}
+
+		// Make sure an output file never overwrites another output file. This
+		// is almost certainly unintentional and would otherwise happen silently.
+		//
+		// Make an exception for files that have identical contents. In that case
+		// the duplicate is just silently filtered out. This can happen with the
+		// "file" loader, for example.
+		outputFileMap := make(map[string][]byte)
+		end := 0
+		for _, outputFile := range outputFiles {
+			absPathKey := canonicalFileSystemPathForWindows(outputFile.AbsPath)
+			contents, ok := outputFileMap[absPathKey]
+
+			// If this isn't a duplicate, keep the output file
+			if !ok {
+				outputFileMap[absPathKey] = outputFile.Contents
+				outputFiles[end] = outputFile
+				end++
+				continue
+			}
+
+			// If the names and contents are both the same, only keep the first one
+			if bytes.Equal(contents, outputFile.Contents) {
+				continue
+			}
+
+			// Otherwise, generate an error
+			outputPath := outputFile.AbsPath
+			if relPath, ok := b.fs.Rel(b.fs.Cwd(), outputPath); ok {
+				outputPath = relPath
+			}
+			log.AddError(nil, logger.Range{}, "Two output files share the same path but have different contents: "+outputPath)
+		}
+		outputFiles = outputFiles[:end]
+	}
+
+	return outputFiles, metafileJSON
+}
+
+// Find all files reachable from all entry points. This order should be
+// deterministic given that the entry point order is deterministic, since the
+// returned order is the postorder of the graph traversal and import record
+// order within a given file is deterministic.
+func findReachableFiles(files []graph.InputFile, entryPoints []graph.EntryPoint) []uint32 {
+	visited := make(map[uint32]bool)
+	var order []uint32
+	var visit func(uint32)
+
+	// Include this file and all files it imports
+	visit = func(sourceIndex uint32) {
+		if !visited[sourceIndex] {
+			visited[sourceIndex] = true
+			file := &files[sourceIndex]
+			if repr, ok := file.Repr.(*graph.JSRepr); ok && repr.CSSSourceIndex.IsValid() {
+				visit(repr.CSSSourceIndex.GetIndex())
+			}
+			if recordsPtr := file.Repr.ImportRecords(); recordsPtr != nil {
+				for _, record := range *recordsPtr {
+					if record.SourceIndex.IsValid() {
+						visit(record.SourceIndex.GetIndex())
+					} else if record.CopySourceIndex.IsValid() {
+						visit(record.CopySourceIndex.GetIndex())
+					}
+				}
+			}
+
+			// Each file must come after its dependencies
+			order = append(order, sourceIndex)
+		}
+	}
+
+	// The runtime is always included in case it's needed
+	visit(runtime.SourceIndex)
+
+	// Include all files reachable from any entry point
+	for _, entryPoint := range entryPoints {
+		visit(entryPoint.SourceIndex)
+	}
+
+	return order
+}
+
+// This is done in parallel with linking because linking is a mostly serial
+// phase and there are extra resources for parallelism. This could also be done
+// during parsing but that would slow down parsing and delay the start of the
+// linking phase, which then delays the whole bundling process.
+//
+// However, doing this during parsing would allow it to be cached along with
+// the parsed ASTs which would then speed up incremental builds. In the future
+// it could be good to optionally have this be computed during the parsing
+// phase when incremental builds are active but otherwise still have it be
+// computed during linking for optimal speed during non-incremental builds.
+func (b *Bundle) computeDataForSourceMapsInParallel(options *config.Options, reachableFiles []uint32) func() []DataForSourceMap {
+	if options.SourceMap == config.SourceMapNone {
+		return func() []DataForSourceMap {
+			return nil
+		}
+	}
+
+	var waitGroup sync.WaitGroup
+	results := make([]DataForSourceMap, len(b.files))
+
+	for _, sourceIndex := range reachableFiles {
+		if f := &b.files[sourceIndex]; f.inputFile.Loader.CanHaveSourceMap() {
+			var approximateLineCount int32
+			switch repr := f.inputFile.Repr.(type) {
+			case *graph.JSRepr:
+				approximateLineCount = repr.AST.ApproximateLineCount
+			case *graph.CSSRepr:
+				approximateLineCount = repr.AST.ApproximateLineCount
+			}
+			waitGroup.Add(1)
+			go func(sourceIndex uint32, f *scannerFile, approximateLineCount int32) {
+				result := &results[sourceIndex]
+				result.LineOffsetTables = sourcemap.GenerateLineOffsetTables(f.inputFile.Source.Contents, approximateLineCount)
+				sm := f.inputFile.InputSourceMap
+				if !options.ExcludeSourcesContent {
+					if sm == nil {
+						// Simple case: no nested source map
+						result.QuotedContents = [][]byte{helpers.QuoteForJSON(f.inputFile.Source.Contents, options.ASCIIOnly)}
+					} else {
+						// Complex case: nested source map
+						result.QuotedContents = make([][]byte, len(sm.Sources))
+						nullContents := []byte("null")
+						for i := range sm.Sources {
+							// Missing contents become a "null" literal
+							quotedContents := nullContents
+							if i < len(sm.SourcesContent) {
+								if value := sm.SourcesContent[i]; value.Quoted != "" && (!options.ASCIIOnly || !isASCIIOnly(value.Quoted)) {
+									// Just use the value directly from the input file
+									quotedContents = []byte(value.Quoted)
+								} else if value.Value != nil {
+									// Re-quote non-ASCII values if output is ASCII-only.
+									// Also quote values that haven't been quoted yet
+									// (happens when the entire "sourcesContent" array is
+									// absent and the source has been found on the file
+									// system using the "sources" array).
+									quotedContents = helpers.QuoteForJSON(helpers.UTF16ToString(value.Value), options.ASCIIOnly)
+								}
+							}
+							result.QuotedContents[i] = quotedContents
+						}
+					}
+				}
+				waitGroup.Done()
+			}(sourceIndex, f, approximateLineCount)
+		}
+	}
+
+	return func() []DataForSourceMap {
+		waitGroup.Wait()
+		return results
+	}
+}
+
+func (b *Bundle) generateMetadataJSON(results []graph.OutputFile, allReachableFiles []uint32, asciiOnly bool) string {
+	sb := strings.Builder{}
+	sb.WriteString("{\n  \"inputs\": {")
+
+	// Write inputs
+	isFirst := true
+	for _, sourceIndex := range allReachableFiles {
+		if b.files[sourceIndex].inputFile.OmitFromSourceMapsAndMetafile {
+			continue
+		}
+		if file := &b.files[sourceIndex]; len(file.jsonMetadataChunk) > 0 {
+			if isFirst {
+				isFirst = false
+				sb.WriteString("\n    ")
+			} else {
+				sb.WriteString(",\n    ")
+			}
+			sb.WriteString(file.jsonMetadataChunk)
+		}
+	}
+
+	sb.WriteString("\n  },\n  \"outputs\": {")
+
+	// Write outputs
+	isFirst = true
+	paths := make(map[string]bool)
+	for _, result := range results {
+		if len(result.JSONMetadataChunk) > 0 {
+			path := resolver.PrettyPath(b.fs, logger.Path{Text: result.AbsPath, Namespace: "file"})
+			if paths[path] {
+				// Don't write out the same path twice (can happen with the "file" loader)
+				continue
+			}
+			if isFirst {
+				isFirst = false
+				sb.WriteString("\n    ")
+			} else {
+				sb.WriteString(",\n    ")
+			}
+			paths[path] = true
+			sb.WriteString(fmt.Sprintf("%s: ", helpers.QuoteForJSON(path, asciiOnly)))
+			sb.WriteString(result.JSONMetadataChunk)
+		}
+	}
+
+	sb.WriteString("\n  }\n}\n")
+	return sb.String()
+}
+
+type runtimeCacheKey struct {
+	unsupportedJSFeatures compat.JSFeature
+	minifySyntax          bool
+	minifyIdentifiers     bool
+}
+
+type runtimeCache struct {
+	astMap   map[runtimeCacheKey]js_ast.AST
+	astMutex sync.Mutex
+}
+
+var globalRuntimeCache runtimeCache
+
+func (cache *runtimeCache) parseRuntime(options *config.Options) (source logger.Source, runtimeAST js_ast.AST, ok bool) {
+	key := runtimeCacheKey{
+		// All configuration options that the runtime code depends on must go here
+		unsupportedJSFeatures: options.UnsupportedJSFeatures,
+		minifySyntax:          options.MinifySyntax,
+		minifyIdentifiers:     options.MinifyIdentifiers,
+	}
+
+	// Determine which source to use
+	source = runtime.Source(key.unsupportedJSFeatures)
+
+	// Cache hit?
+	(func() {
+		cache.astMutex.Lock()
+		defer cache.astMutex.Unlock()
+		if cache.astMap != nil {
+			runtimeAST, ok = cache.astMap[key]
+		}
+	})()
+	if ok {
+		return
+	}
+
+	// Cache miss
+	log := logger.NewDeferLog(logger.DeferLogAll, nil)
+	runtimeAST, ok = js_parser.Parse(log, source, js_parser.OptionsFromConfig(&config.Options{
+		// These configuration options must only depend on the key
+		UnsupportedJSFeatures: key.unsupportedJSFeatures,
+		MinifySyntax:          key.minifySyntax,
+		MinifyIdentifiers:     key.minifyIdentifiers,
+
+		// Always do tree shaking for the runtime because we never want to
+		// include unnecessary runtime code
+		TreeShaking: true,
+	}))
+	if log.HasErrors() {
+		msgs := "Internal error: failed to parse runtime:\n"
+		for _, msg := range log.Done() {
+			msgs += msg.String(logger.OutputOptions{IncludeSource: true}, logger.TerminalInfo{})
+		}
+		panic(msgs[:len(msgs)-1])
+	}
+
+	// Cache for next time
+	if ok {
+		cache.astMutex.Lock()
+		defer cache.astMutex.Unlock()
+		if cache.astMap == nil {
+			cache.astMap = make(map[runtimeCacheKey]js_ast.AST)
+		}
+		cache.astMap[key] = runtimeAST
+	}
+	return
+}
+
+// Returns the path of this file relative to "outbase", which is then ready to
+// be joined with the absolute output directory path. The directory and name
+// components are returned separately for convenience.
+func PathRelativeToOutbase(
+	inputFile *graph.InputFile,
+	options *config.Options,
+	fs fs.FS,
+	avoidIndex bool,
+	customFilePath string,
+) (relDir string, baseName string) {
+	relDir = "/"
+	absPath := inputFile.Source.KeyPath.Text
+
+	if customFilePath != "" {
+		// Use the configured output path if present
+		absPath = customFilePath
+		if !fs.IsAbs(absPath) {
+			absPath = fs.Join(options.AbsOutputBase, absPath)
+		}
+	} else if inputFile.Source.KeyPath.Namespace != "file" {
+		// Come up with a path for virtual paths (i.e. non-file-system paths)
+		dir, base, _ := logger.PlatformIndependentPathDirBaseExt(absPath)
+		if avoidIndex && base == "index" {
+			_, base, _ = logger.PlatformIndependentPathDirBaseExt(dir)
+		}
+		baseName = sanitizeFilePathForVirtualModulePath(base)
+		return
+	} else {
+		// Heuristic: If the file is named something like "index.js", then use
+		// the name of the parent directory instead. This helps avoid the
+		// situation where many chunks are named "index" because of people
+		// dynamically-importing npm packages that make use of node's implicit
+		// "index" file name feature.
+		if avoidIndex {
+			base := fs.Base(absPath)
+			base = base[:len(base)-len(fs.Ext(base))]
+			if base == "index" {
+				absPath = fs.Dir(absPath)
+			}
+		}
+	}
+
+	// Try to get a relative path to the base directory
+	relPath, ok := fs.Rel(options.AbsOutputBase, absPath)
+	if !ok {
+		// This can fail in some situations such as on different drives on
+		// Windows. In that case we just use the file name.
+		baseName = fs.Base(absPath)
+	} else {
+		// Now we finally have a relative path
+		relDir = fs.Dir(relPath) + "/"
+		baseName = fs.Base(relPath)
+
+		// Use platform-independent slashes
+		relDir = strings.ReplaceAll(relDir, "\\", "/")
+
+		// Replace leading "../" so we don't try to write outside of the output
+		// directory. This normally can't happen because "AbsOutputBase" is
+		// automatically computed to contain all entry point files, but it can
+		// happen if someone sets it manually via the "outbase" API option.
+		//
+		// Note that we can't just strip any leading "../" because that could
+		// cause two separate entry point paths to collide. For example, there
+		// could be both "src/index.js" and "../src/index.js" as entry points.
+		dotDotCount := 0
+		for strings.HasPrefix(relDir[dotDotCount*3:], "../") {
+			dotDotCount++
+		}
+		if dotDotCount > 0 {
+			// The use of "_.._" here is somewhat arbitrary but it is unlikely to
+			// collide with a folder named by a human and it works on Windows
+			// (Windows doesn't like names that end with a "."). And not starting
+			// with a "." means that it will not be hidden on Unix.
+			relDir = strings.Repeat("_.._/", dotDotCount) + relDir[dotDotCount*3:]
+		}
+		for strings.HasSuffix(relDir, "/") {
+			relDir = relDir[:len(relDir)-1]
+		}
+		relDir = "/" + relDir
+		if strings.HasSuffix(relDir, "/.") {
+			relDir = relDir[:len(relDir)-1]
+		}
+	}
+
+	// Strip the file extension if the output path is an input file
+	if customFilePath == "" {
+		ext := fs.Ext(baseName)
+		baseName = baseName[:len(baseName)-len(ext)]
+	}
+	return
+}
+
+func sanitizeFilePathForVirtualModulePath(path string) string {
+	// Convert it to a safe file path. See: https://stackoverflow.com/a/31976060
+	sb := strings.Builder{}
+	needsGap := false
+	for _, c := range path {
+		switch c {
+		case 0:
+			// These characters are forbidden on Unix and Windows
+
+		case '<', '>', ':', '"', '|', '?', '*':
+			// These characters are forbidden on Windows
+
+		default:
+			if c < 0x20 {
+				// These characters are forbidden on Windows
+				break
+			}
+
+			// Turn runs of invalid characters into a '_'
+			if needsGap {
+				sb.WriteByte('_')
+				needsGap = false
+			}
+
+			sb.WriteRune(c)
+			continue
+		}
+
+		if sb.Len() > 0 {
+			needsGap = true
+		}
+	}
+
+	// Make sure the name isn't empty
+	if sb.Len() == 0 {
+		return "_"
+	}
+
+	// Note: An extension will be added to this base name, so there is no need to
+	// avoid forbidden file names such as ".." since ".js" is a valid file name.
+	return sb.String()
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/cache/cache.go b/source/vendor/github.com/evanw/esbuild/internal/cache/cache.go
new file mode 100644
index 0000000..8b1dd8c
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/cache/cache.go
@@ -0,0 +1,115 @@
+package cache
+
+import (
+	"sync"
+
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/runtime"
+)
+
+// This is a cache of the parsed contents of a set of files. The idea is to be
+// able to reuse the results of parsing between builds and make subsequent
+// builds faster by avoiding redundant parsing work. This only works if:
+//
+//   - The AST information in the cache must be considered immutable. There is
+//     no way to enforce this in Go, but please be disciplined about this. The
+//     ASTs are shared in between builds. Any information that must be mutated
+//     in the AST during a build must be done on a shallow clone of the data if
+//     the mutation happens after parsing (i.e. a clone that clones everything
+//     that will be mutated and shares only the parts that won't be mutated).
+//
+//   - The information in the cache must not depend at all on the contents of
+//     any file other than the file being cached. Invalidating an entry in the
+//     cache does not also invalidate any entries that depend on that file, so
+//     caching information that depends on other files can result in incorrect
+//     results due to reusing stale data. For example, do not "bake in" some
+//     value imported from another file.
+//
+//   - Cached ASTs must only be reused if the parsing options are identical
+//     between builds. For example, it would be bad if the AST parser depended
+//     on options inherited from a nearby "package.json" file but those options
+//     were not part of the cache key. Then the cached AST could incorrectly be
+//     reused even if the contents of that "package.json" file have changed.
+type CacheSet struct {
+	FSCache          FSCache
+	CSSCache         CSSCache
+	JSONCache        JSONCache
+	JSCache          JSCache
+	SourceIndexCache SourceIndexCache
+}
+
+func MakeCacheSet() *CacheSet {
+	return &CacheSet{
+		SourceIndexCache: SourceIndexCache{
+			globEntries:     make(map[uint64]uint32),
+			entries:         make(map[sourceIndexKey]uint32),
+			nextSourceIndex: runtime.SourceIndex + 1,
+		},
+		FSCache: FSCache{
+			entries: make(map[string]*fsEntry),
+		},
+		CSSCache: CSSCache{
+			entries: make(map[logger.Path]*cssCacheEntry),
+		},
+		JSONCache: JSONCache{
+			entries: make(map[logger.Path]*jsonCacheEntry),
+		},
+		JSCache: JSCache{
+			entries: make(map[logger.Path]*jsCacheEntry),
+		},
+	}
+}
+
+type SourceIndexCache struct {
+	globEntries     map[uint64]uint32
+	entries         map[sourceIndexKey]uint32
+	mutex           sync.Mutex
+	nextSourceIndex uint32
+}
+
+type SourceIndexKind uint8
+
+const (
+	SourceIndexNormal SourceIndexKind = iota
+	SourceIndexJSStubForCSS
+)
+
+type sourceIndexKey struct {
+	path logger.Path
+	kind SourceIndexKind
+}
+
+func (c *SourceIndexCache) LenHint() uint32 {
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+
+	// Add some extra room at the end for a new file or two without reallocating
+	const someExtraRoom = 16
+	return c.nextSourceIndex + someExtraRoom
+}
+
+func (c *SourceIndexCache) Get(path logger.Path, kind SourceIndexKind) uint32 {
+	key := sourceIndexKey{path: path, kind: kind}
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+	if sourceIndex, ok := c.entries[key]; ok {
+		return sourceIndex
+	}
+	sourceIndex := c.nextSourceIndex
+	c.nextSourceIndex++
+	c.entries[key] = sourceIndex
+	return sourceIndex
+}
+
+func (c *SourceIndexCache) GetGlob(parentSourceIndex uint32, globIndex uint32) uint32 {
+	key := (uint64(parentSourceIndex) << 32) | uint64(globIndex)
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+	if sourceIndex, ok := c.globEntries[key]; ok {
+		return sourceIndex
+	}
+	sourceIndex := c.nextSourceIndex
+	c.nextSourceIndex++
+	c.globEntries[key] = sourceIndex
+	return sourceIndex
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/cache/cache_ast.go b/source/vendor/github.com/evanw/esbuild/internal/cache/cache_ast.go
new file mode 100644
index 0000000..c976f89
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/cache/cache_ast.go
@@ -0,0 +1,190 @@
+package cache
+
+import (
+	"sync"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_parser"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_parser"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+// This cache intends to avoid unnecessarily re-parsing files in subsequent
+// builds. For a given path, parsing can be avoided if the contents of the file
+// and the options for the parser are the same as last time. Even if the
+// contents of the file are the same, the options for the parser may have
+// changed if they depend on some other file ("package.json" for example).
+//
+// This cache checks if the file contents have changed even though we have
+// the ability to detect if a file has changed on the file system by reading
+// its metadata. First of all, if the file contents are cached then they should
+// be the same pointer, which makes the comparison trivial. Also we want to
+// cache the AST for plugins in the common case that the plugin output stays
+// the same.
+
+////////////////////////////////////////////////////////////////////////////////
+// CSS
+
+type CSSCache struct {
+	entries map[logger.Path]*cssCacheEntry
+	mutex   sync.Mutex
+}
+
+type cssCacheEntry struct {
+	source  logger.Source
+	msgs    []logger.Msg
+	ast     css_ast.AST
+	options css_parser.Options
+}
+
+func (c *CSSCache) Parse(log logger.Log, source logger.Source, options css_parser.Options) css_ast.AST {
+	// Check the cache
+	entry := func() *cssCacheEntry {
+		c.mutex.Lock()
+		defer c.mutex.Unlock()
+		return c.entries[source.KeyPath]
+	}()
+
+	// Cache hit
+	if entry != nil && entry.source == source && entry.options.Equal(&options) {
+		for _, msg := range entry.msgs {
+			log.AddMsg(msg)
+		}
+		return entry.ast
+	}
+
+	// Cache miss
+	tempLog := logger.NewDeferLog(logger.DeferLogAll, log.Overrides)
+	ast := css_parser.Parse(tempLog, source, options)
+	msgs := tempLog.Done()
+	for _, msg := range msgs {
+		log.AddMsg(msg)
+	}
+
+	// Create the cache entry
+	entry = &cssCacheEntry{
+		source:  source,
+		options: options,
+		ast:     ast,
+		msgs:    msgs,
+	}
+
+	// Save for next time
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+	c.entries[source.KeyPath] = entry
+	return ast
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// JSON
+
+type JSONCache struct {
+	entries map[logger.Path]*jsonCacheEntry
+	mutex   sync.Mutex
+}
+
+type jsonCacheEntry struct {
+	expr    js_ast.Expr
+	msgs    []logger.Msg
+	source  logger.Source
+	options js_parser.JSONOptions
+	ok      bool
+}
+
+func (c *JSONCache) Parse(log logger.Log, source logger.Source, options js_parser.JSONOptions) (js_ast.Expr, bool) {
+	// Check the cache
+	entry := func() *jsonCacheEntry {
+		c.mutex.Lock()
+		defer c.mutex.Unlock()
+		return c.entries[source.KeyPath]
+	}()
+
+	// Cache hit
+	if entry != nil && entry.source == source && entry.options == options {
+		for _, msg := range entry.msgs {
+			log.AddMsg(msg)
+		}
+		return entry.expr, entry.ok
+	}
+
+	// Cache miss
+	tempLog := logger.NewDeferLog(logger.DeferLogAll, log.Overrides)
+	expr, ok := js_parser.ParseJSON(tempLog, source, options)
+	msgs := tempLog.Done()
+	for _, msg := range msgs {
+		log.AddMsg(msg)
+	}
+
+	// Create the cache entry
+	entry = &jsonCacheEntry{
+		source:  source,
+		options: options,
+		expr:    expr,
+		ok:      ok,
+		msgs:    msgs,
+	}
+
+	// Save for next time
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+	c.entries[source.KeyPath] = entry
+	return expr, ok
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// JS
+
+type JSCache struct {
+	entries map[logger.Path]*jsCacheEntry
+	mutex   sync.Mutex
+}
+
+type jsCacheEntry struct {
+	source  logger.Source
+	msgs    []logger.Msg
+	options js_parser.Options
+	ast     js_ast.AST
+	ok      bool
+}
+
+func (c *JSCache) Parse(log logger.Log, source logger.Source, options js_parser.Options) (js_ast.AST, bool) {
+	// Check the cache
+	entry := func() *jsCacheEntry {
+		c.mutex.Lock()
+		defer c.mutex.Unlock()
+		return c.entries[source.KeyPath]
+	}()
+
+	// Cache hit
+	if entry != nil && entry.source == source && entry.options.Equal(&options) {
+		for _, msg := range entry.msgs {
+			log.AddMsg(msg)
+		}
+		return entry.ast, entry.ok
+	}
+
+	// Cache miss
+	tempLog := logger.NewDeferLog(logger.DeferLogAll, log.Overrides)
+	ast, ok := js_parser.Parse(tempLog, source, options)
+	msgs := tempLog.Done()
+	for _, msg := range msgs {
+		log.AddMsg(msg)
+	}
+
+	// Create the cache entry
+	entry = &jsCacheEntry{
+		source:  source,
+		options: options,
+		ast:     ast,
+		ok:      ok,
+		msgs:    msgs,
+	}
+
+	// Save for next time
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+	c.entries[source.KeyPath] = entry
+	return ast, ok
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/cache/cache_fs.go b/source/vendor/github.com/evanw/esbuild/internal/cache/cache_fs.go
new file mode 100644
index 0000000..ab4d08e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/cache/cache_fs.go
@@ -0,0 +1,52 @@
+package cache
+
+import (
+	"sync"
+
+	"github.com/evanw/esbuild/internal/fs"
+)
+
+// This cache uses information from the "stat" syscall to try to avoid re-
+// reading files from the file system during subsequent builds if the file
+// hasn't changed. The assumption is reading the file metadata is faster than
+// reading the file contents.
+
+type FSCache struct {
+	entries map[string]*fsEntry
+	mutex   sync.Mutex
+}
+
+type fsEntry struct {
+	contents       string
+	modKey         fs.ModKey
+	isModKeyUsable bool
+}
+
+func (c *FSCache) ReadFile(fs fs.FS, path string) (contents string, canonicalError error, originalError error) {
+	entry := func() *fsEntry {
+		c.mutex.Lock()
+		defer c.mutex.Unlock()
+		return c.entries[path]
+	}()
+
+	// If the file's modification key hasn't changed since it was cached, assume
+	// the contents of the file are also the same and skip reading the file.
+	modKey, modKeyErr := fs.ModKey(path)
+	if entry != nil && entry.isModKeyUsable && modKeyErr == nil && entry.modKey == modKey {
+		return entry.contents, nil, nil
+	}
+
+	contents, err, originalError := fs.ReadFile(path)
+	if err != nil {
+		return "", err, originalError
+	}
+
+	c.mutex.Lock()
+	defer c.mutex.Unlock()
+	c.entries[path] = &fsEntry{
+		contents:       contents,
+		modKey:         modKey,
+		isModKeyUsable: modKeyErr == nil,
+	}
+	return contents, nil, nil
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/compat/compat.go b/source/vendor/github.com/evanw/esbuild/internal/compat/compat.go
new file mode 100644
index 0000000..bd2d0ff
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/compat/compat.go
@@ -0,0 +1,92 @@
+package compat
+
+import (
+	"strconv"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/ast"
+)
+
+type v struct {
+	major uint16
+	minor uint8
+	patch uint8
+}
+
+type Semver struct {
+	// "1.2.3-alpha" => { Parts: {1, 2, 3}, PreRelease: "-alpha" }
+	Parts      []int
+	PreRelease string
+}
+
+func (v Semver) String() string {
+	b := strings.Builder{}
+	for _, part := range v.Parts {
+		if b.Len() > 0 {
+			b.WriteRune('.')
+		}
+		b.WriteString(strconv.Itoa(part))
+	}
+	b.WriteString(v.PreRelease)
+	return b.String()
+}
+
+// Returns <0 if "a < b"
+// Returns 0 if "a == b"
+// Returns >0 if "a > b"
+func compareVersions(a v, b Semver) int {
+	diff := int(a.major)
+	if len(b.Parts) > 0 {
+		diff -= b.Parts[0]
+	}
+	if diff == 0 {
+		diff = int(a.minor)
+		if len(b.Parts) > 1 {
+			diff -= b.Parts[1]
+		}
+	}
+	if diff == 0 {
+		diff = int(a.patch)
+		if len(b.Parts) > 2 {
+			diff -= b.Parts[2]
+		}
+	}
+	if diff == 0 && len(b.PreRelease) != 0 {
+		return 1 // "1.0.0" > "1.0.0-alpha"
+	}
+	return diff
+}
+
+// The start is inclusive and the end is exclusive
+type versionRange struct {
+	start v
+	end   v // Use 0.0.0 for "no end"
+}
+
+func isVersionSupported(ranges []versionRange, version Semver) bool {
+	for _, r := range ranges {
+		if compareVersions(r.start, version) <= 0 && (r.end == (v{}) || compareVersions(r.end, version) > 0) {
+			return true
+		}
+	}
+	return false
+}
+
+func SymbolFeature(kind ast.SymbolKind) JSFeature {
+	switch kind {
+	case ast.SymbolPrivateField:
+		return ClassPrivateField
+	case ast.SymbolPrivateMethod:
+		return ClassPrivateMethod
+	case ast.SymbolPrivateGet, ast.SymbolPrivateSet, ast.SymbolPrivateGetSetPair:
+		return ClassPrivateAccessor
+	case ast.SymbolPrivateStaticField:
+		return ClassPrivateStaticField
+	case ast.SymbolPrivateStaticMethod:
+		return ClassPrivateStaticMethod
+	case ast.SymbolPrivateStaticGet, ast.SymbolPrivateStaticSet, ast.SymbolPrivateStaticGetSetPair:
+		return ClassPrivateStaticAccessor
+	default:
+		return 0
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/compat/css_table.go b/source/vendor/github.com/evanw/esbuild/internal/compat/css_table.go
new file mode 100644
index 0000000..1cd717e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/compat/css_table.go
@@ -0,0 +1,361 @@
+// This file was automatically generated by "css_table.ts"
+
+package compat
+
+import (
+	"github.com/evanw/esbuild/internal/css_ast"
+)
+
+type CSSFeature uint16
+
+const (
+	ColorFunctions CSSFeature = 1 << iota
+	GradientDoublePosition
+	GradientInterpolation
+	GradientMidpoints
+	HWB
+	HexRGBA
+	InlineStyle
+	InsetProperty
+	IsPseudoClass
+	Modern_RGB_HSL
+	Nesting
+	RebeccaPurple
+)
+
+var StringToCSSFeature = map[string]CSSFeature{
+	"color-functions":          ColorFunctions,
+	"gradient-double-position": GradientDoublePosition,
+	"gradient-interpolation":   GradientInterpolation,
+	"gradient-midpoints":       GradientMidpoints,
+	"hwb":                      HWB,
+	"hex-rgba":                 HexRGBA,
+	"inline-style":             InlineStyle,
+	"inset-property":           InsetProperty,
+	"is-pseudo-class":          IsPseudoClass,
+	"modern-rgb-hsl":           Modern_RGB_HSL,
+	"nesting":                  Nesting,
+	"rebecca-purple":           RebeccaPurple,
+}
+
+func (features CSSFeature) Has(feature CSSFeature) bool {
+	return (features & feature) != 0
+}
+
+func (features CSSFeature) ApplyOverrides(overrides CSSFeature, mask CSSFeature) CSSFeature {
+	return (features & ^mask) | (overrides & mask)
+}
+
+var cssTable = map[CSSFeature]map[Engine][]versionRange{
+	ColorFunctions: {
+		Chrome:  {{start: v{111, 0, 0}}},
+		Edge:    {{start: v{111, 0, 0}}},
+		Firefox: {{start: v{113, 0, 0}}},
+		IOS:     {{start: v{15, 4, 0}}},
+		Opera:   {{start: v{97, 0, 0}}},
+		Safari:  {{start: v{15, 4, 0}}},
+	},
+	GradientDoublePosition: {
+		Chrome:  {{start: v{72, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		Firefox: {{start: v{83, 0, 0}}},
+		IOS:     {{start: v{12, 2, 0}}},
+		Opera:   {{start: v{60, 0, 0}}},
+		Safari:  {{start: v{12, 1, 0}}},
+	},
+	GradientInterpolation: {
+		Chrome: {{start: v{111, 0, 0}}},
+		Edge:   {{start: v{111, 0, 0}}},
+		IOS:    {{start: v{16, 2, 0}}},
+		Opera:  {{start: v{97, 0, 0}}},
+		Safari: {{start: v{16, 2, 0}}},
+	},
+	GradientMidpoints: {
+		Chrome:  {{start: v{40, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		Firefox: {{start: v{36, 0, 0}}},
+		IOS:     {{start: v{7, 0, 0}}},
+		Opera:   {{start: v{27, 0, 0}}},
+		Safari:  {{start: v{7, 0, 0}}},
+	},
+	HWB: {
+		Chrome:  {{start: v{101, 0, 0}}},
+		Edge:    {{start: v{101, 0, 0}}},
+		Firefox: {{start: v{96, 0, 0}}},
+		IOS:     {{start: v{15, 0, 0}}},
+		Opera:   {{start: v{87, 0, 0}}},
+		Safari:  {{start: v{15, 0, 0}}},
+	},
+	HexRGBA: {
+		Chrome:  {{start: v{62, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		Firefox: {{start: v{49, 0, 0}}},
+		IOS:     {{start: v{9, 3, 0}}},
+		Opera:   {{start: v{49, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	InlineStyle: {},
+	InsetProperty: {
+		Chrome:  {{start: v{87, 0, 0}}},
+		Edge:    {{start: v{87, 0, 0}}},
+		Firefox: {{start: v{66, 0, 0}}},
+		IOS:     {{start: v{14, 5, 0}}},
+		Opera:   {{start: v{73, 0, 0}}},
+		Safari:  {{start: v{14, 1, 0}}},
+	},
+	IsPseudoClass: {
+		Chrome:  {{start: v{88, 0, 0}}},
+		Edge:    {{start: v{88, 0, 0}}},
+		Firefox: {{start: v{78, 0, 0}}},
+		IOS:     {{start: v{14, 0, 0}}},
+		Opera:   {{start: v{75, 0, 0}}},
+		Safari:  {{start: v{14, 0, 0}}},
+	},
+	Modern_RGB_HSL: {
+		Chrome:  {{start: v{66, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		Firefox: {{start: v{52, 0, 0}}},
+		IOS:     {{start: v{12, 2, 0}}},
+		Opera:   {{start: v{53, 0, 0}}},
+		Safari:  {{start: v{12, 1, 0}}},
+	},
+	Nesting: {
+		Chrome:  {{start: v{120, 0, 0}}},
+		Edge:    {{start: v{120, 0, 0}}},
+		Firefox: {{start: v{117, 0, 0}}},
+		IOS:     {{start: v{17, 2, 0}}},
+		Opera:   {{start: v{106, 0, 0}}},
+		Safari:  {{start: v{17, 2, 0}}},
+	},
+	RebeccaPurple: {
+		Chrome:  {{start: v{38, 0, 0}}},
+		Edge:    {{start: v{12, 0, 0}}},
+		Firefox: {{start: v{33, 0, 0}}},
+		IE:      {{start: v{11, 0, 0}}},
+		IOS:     {{start: v{8, 0, 0}}},
+		Opera:   {{start: v{25, 0, 0}}},
+		Safari:  {{start: v{9, 0, 0}}},
+	},
+}
+
+// Return all features that are not available in at least one environment
+func UnsupportedCSSFeatures(constraints map[Engine]Semver) (unsupported CSSFeature) {
+	for feature, engines := range cssTable {
+		if feature == InlineStyle {
+			continue // This is purely user-specified
+		}
+		for engine, version := range constraints {
+			if !engine.IsBrowser() {
+				// Specifying "--target=es2020" shouldn't affect CSS
+				continue
+			}
+			if versionRanges, ok := engines[engine]; !ok || !isVersionSupported(versionRanges, version) {
+				unsupported |= feature
+			}
+		}
+	}
+	return
+}
+
+type CSSPrefix uint8
+
+const (
+	KhtmlPrefix CSSPrefix = 1 << iota
+	MozPrefix
+	MsPrefix
+	OPrefix
+	WebkitPrefix
+
+	NoPrefix CSSPrefix = 0
+)
+
+type prefixData struct {
+	// Note: In some cases, earlier versions did not require a prefix but later
+	// ones do. This is the case for Microsoft Edge for example, which switched
+	// the underlying browser engine from a custom one to the one from Chrome.
+	// However, we assume that users specifying a browser version for CSS mean
+	// "works in this version or newer", so we still add a prefix when a target
+	// is an old Edge version.
+	engine        Engine
+	withoutPrefix v
+	prefix        CSSPrefix
+}
+
+var cssPrefixTable = map[css_ast.D][]prefixData{
+	css_ast.DAppearance: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{84, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{84, 0, 0}},
+		{engine: Firefox, prefix: MozPrefix, withoutPrefix: v{80, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{73, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+	},
+	css_ast.DBackdropFilter: {
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{18, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{18, 0, 0}},
+	},
+	css_ast.DBackgroundClip: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: Edge, prefix: MsPrefix, withoutPrefix: v{15, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{14, 0, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{106, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{14, 0, 0}},
+	},
+	css_ast.DBoxDecorationBreak: {
+		{engine: Chrome, prefix: WebkitPrefix},
+		{engine: Edge, prefix: WebkitPrefix},
+		{engine: IOS, prefix: WebkitPrefix},
+		{engine: Opera, prefix: WebkitPrefix},
+		{engine: Safari, prefix: WebkitPrefix},
+	},
+	css_ast.DClipPath: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{55, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{13, 0, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{42, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{13, 1, 0}},
+	},
+	css_ast.DFontKerning: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{33, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{12, 0, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{20, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{9, 1, 0}},
+	},
+	css_ast.DHyphens: {
+		{engine: Edge, prefix: MsPrefix, withoutPrefix: v{79, 0, 0}},
+		{engine: Firefox, prefix: MozPrefix, withoutPrefix: v{43, 0, 0}},
+		{engine: IE, prefix: MsPrefix},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{17, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{17, 0, 0}},
+	},
+	css_ast.DInitialLetter: {
+		{engine: IOS, prefix: WebkitPrefix},
+		{engine: Safari, prefix: WebkitPrefix},
+	},
+	css_ast.DMaskComposite: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{106, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+	},
+	css_ast.DMaskImage: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+		{engine: Opera, prefix: WebkitPrefix},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+	},
+	css_ast.DMaskOrigin: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{106, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+	},
+	css_ast.DMaskPosition: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{106, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+	},
+	css_ast.DMaskRepeat: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{106, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+	},
+	css_ast.DMaskSize: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{120, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{106, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+	},
+	css_ast.DPosition: {
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{13, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{13, 0, 0}},
+	},
+	css_ast.DPrintColorAdjust: {
+		{engine: Chrome, prefix: WebkitPrefix},
+		{engine: Edge, prefix: WebkitPrefix},
+		{engine: Opera, prefix: WebkitPrefix},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{15, 4, 0}},
+	},
+	css_ast.DTabSize: {
+		{engine: Firefox, prefix: MozPrefix, withoutPrefix: v{91, 0, 0}},
+		{engine: Opera, prefix: OPrefix, withoutPrefix: v{15, 0, 0}},
+	},
+	css_ast.DTextDecorationColor: {
+		{engine: Firefox, prefix: MozPrefix, withoutPrefix: v{36, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{12, 2, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{12, 1, 0}},
+	},
+	css_ast.DTextDecorationLine: {
+		{engine: Firefox, prefix: MozPrefix, withoutPrefix: v{36, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{12, 2, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{12, 1, 0}},
+	},
+	css_ast.DTextDecorationSkip: {
+		{engine: IOS, prefix: WebkitPrefix, withoutPrefix: v{12, 2, 0}},
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{12, 1, 0}},
+	},
+	css_ast.DTextEmphasisColor: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{99, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{99, 0, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{85, 0, 0}},
+	},
+	css_ast.DTextEmphasisPosition: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{99, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{99, 0, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{85, 0, 0}},
+	},
+	css_ast.DTextEmphasisStyle: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{99, 0, 0}},
+		{engine: Edge, prefix: WebkitPrefix, withoutPrefix: v{99, 0, 0}},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{85, 0, 0}},
+	},
+	css_ast.DTextOrientation: {
+		{engine: Safari, prefix: WebkitPrefix, withoutPrefix: v{14, 0, 0}},
+	},
+	css_ast.DTextSizeAdjust: {
+		{engine: Edge, prefix: MsPrefix, withoutPrefix: v{79, 0, 0}},
+		{engine: IOS, prefix: WebkitPrefix},
+	},
+	css_ast.DUserSelect: {
+		{engine: Chrome, prefix: WebkitPrefix, withoutPrefix: v{54, 0, 0}},
+		{engine: Edge, prefix: MsPrefix, withoutPrefix: v{79, 0, 0}},
+		{engine: Firefox, prefix: MozPrefix, withoutPrefix: v{69, 0, 0}},
+		{engine: IE, prefix: MsPrefix},
+		{engine: IOS, prefix: WebkitPrefix},
+		{engine: Opera, prefix: WebkitPrefix, withoutPrefix: v{41, 0, 0}},
+		{engine: Safari, prefix: KhtmlPrefix, withoutPrefix: v{3, 0, 0}},
+		{engine: Safari, prefix: WebkitPrefix},
+	},
+}
+
+func CSSPrefixData(constraints map[Engine]Semver) (entries map[css_ast.D]CSSPrefix) {
+	for property, items := range cssPrefixTable {
+		prefixes := NoPrefix
+		for engine, version := range constraints {
+			if !engine.IsBrowser() {
+				// Specifying "--target=es2020" shouldn't affect CSS
+				continue
+			}
+			for _, item := range items {
+				if item.engine == engine && (item.withoutPrefix == v{} || compareVersions(item.withoutPrefix, version) > 0) {
+					prefixes |= item.prefix
+				}
+			}
+		}
+		if prefixes != NoPrefix {
+			if entries == nil {
+				entries = make(map[css_ast.D]CSSPrefix)
+			}
+			entries[property] = prefixes
+		}
+	}
+	return
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/compat/js_table.go b/source/vendor/github.com/evanw/esbuild/internal/compat/js_table.go
new file mode 100644
index 0000000..89180c3
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/compat/js_table.go
@@ -0,0 +1,910 @@
+// This file was automatically generated by "js_table.ts"
+
+package compat
+
+type Engine uint8
+
+const (
+	Chrome Engine = iota
+	Deno
+	Edge
+	ES
+	Firefox
+	Hermes
+	IE
+	IOS
+	Node
+	Opera
+	Rhino
+	Safari
+)
+
+func (e Engine) String() string {
+	switch e {
+	case Chrome:
+		return "chrome"
+	case Deno:
+		return "deno"
+	case Edge:
+		return "edge"
+	case ES:
+		return "es"
+	case Firefox:
+		return "firefox"
+	case Hermes:
+		return "hermes"
+	case IE:
+		return "ie"
+	case IOS:
+		return "ios"
+	case Node:
+		return "node"
+	case Opera:
+		return "opera"
+	case Rhino:
+		return "rhino"
+	case Safari:
+		return "safari"
+	}
+	return ""
+}
+
+func (e Engine) IsBrowser() bool {
+	switch e {
+	case Chrome, Edge, Firefox, IE, IOS, Opera, Safari:
+		return true
+	}
+	return false
+}
+
+type JSFeature uint64
+
+const (
+	ArbitraryModuleNamespaceNames JSFeature = 1 << iota
+	ArraySpread
+	Arrow
+	AsyncAwait
+	AsyncGenerator
+	Bigint
+	Class
+	ClassField
+	ClassPrivateAccessor
+	ClassPrivateBrandCheck
+	ClassPrivateField
+	ClassPrivateMethod
+	ClassPrivateStaticAccessor
+	ClassPrivateStaticField
+	ClassPrivateStaticMethod
+	ClassStaticBlocks
+	ClassStaticField
+	ConstAndLet
+	Decorators
+	DefaultArgument
+	Destructuring
+	DynamicImport
+	ExponentOperator
+	ExportStarAs
+	ForAwait
+	ForOf
+	FunctionNameConfigurable
+	FunctionOrClassPropertyAccess
+	Generator
+	Hashbang
+	ImportAssertions
+	ImportAttributes
+	ImportMeta
+	InlineScript
+	LogicalAssignment
+	NestedRestBinding
+	NewTarget
+	NodeColonPrefixImport
+	NodeColonPrefixRequire
+	NullishCoalescing
+	ObjectAccessors
+	ObjectExtensions
+	ObjectRestSpread
+	OptionalCatchBinding
+	OptionalChain
+	RegexpDotAllFlag
+	RegexpLookbehindAssertions
+	RegexpMatchIndices
+	RegexpNamedCaptureGroups
+	RegexpSetNotation
+	RegexpStickyAndUnicodeFlags
+	RegexpUnicodePropertyEscapes
+	RestArgument
+	TemplateLiteral
+	TopLevelAwait
+	TypeofExoticObjectIsObject
+	UnicodeEscapes
+	Using
+)
+
+var StringToJSFeature = map[string]JSFeature{
+	"arbitrary-module-namespace-names":  ArbitraryModuleNamespaceNames,
+	"array-spread":                      ArraySpread,
+	"arrow":                             Arrow,
+	"async-await":                       AsyncAwait,
+	"async-generator":                   AsyncGenerator,
+	"bigint":                            Bigint,
+	"class":                             Class,
+	"class-field":                       ClassField,
+	"class-private-accessor":            ClassPrivateAccessor,
+	"class-private-brand-check":         ClassPrivateBrandCheck,
+	"class-private-field":               ClassPrivateField,
+	"class-private-method":              ClassPrivateMethod,
+	"class-private-static-accessor":     ClassPrivateStaticAccessor,
+	"class-private-static-field":        ClassPrivateStaticField,
+	"class-private-static-method":       ClassPrivateStaticMethod,
+	"class-static-blocks":               ClassStaticBlocks,
+	"class-static-field":                ClassStaticField,
+	"const-and-let":                     ConstAndLet,
+	"decorators":                        Decorators,
+	"default-argument":                  DefaultArgument,
+	"destructuring":                     Destructuring,
+	"dynamic-import":                    DynamicImport,
+	"exponent-operator":                 ExponentOperator,
+	"export-star-as":                    ExportStarAs,
+	"for-await":                         ForAwait,
+	"for-of":                            ForOf,
+	"function-name-configurable":        FunctionNameConfigurable,
+	"function-or-class-property-access": FunctionOrClassPropertyAccess,
+	"generator":                         Generator,
+	"hashbang":                          Hashbang,
+	"import-assertions":                 ImportAssertions,
+	"import-attributes":                 ImportAttributes,
+	"import-meta":                       ImportMeta,
+	"inline-script":                     InlineScript,
+	"logical-assignment":                LogicalAssignment,
+	"nested-rest-binding":               NestedRestBinding,
+	"new-target":                        NewTarget,
+	"node-colon-prefix-import":          NodeColonPrefixImport,
+	"node-colon-prefix-require":         NodeColonPrefixRequire,
+	"nullish-coalescing":                NullishCoalescing,
+	"object-accessors":                  ObjectAccessors,
+	"object-extensions":                 ObjectExtensions,
+	"object-rest-spread":                ObjectRestSpread,
+	"optional-catch-binding":            OptionalCatchBinding,
+	"optional-chain":                    OptionalChain,
+	"regexp-dot-all-flag":               RegexpDotAllFlag,
+	"regexp-lookbehind-assertions":      RegexpLookbehindAssertions,
+	"regexp-match-indices":              RegexpMatchIndices,
+	"regexp-named-capture-groups":       RegexpNamedCaptureGroups,
+	"regexp-set-notation":               RegexpSetNotation,
+	"regexp-sticky-and-unicode-flags":   RegexpStickyAndUnicodeFlags,
+	"regexp-unicode-property-escapes":   RegexpUnicodePropertyEscapes,
+	"rest-argument":                     RestArgument,
+	"template-literal":                  TemplateLiteral,
+	"top-level-await":                   TopLevelAwait,
+	"typeof-exotic-object-is-object":    TypeofExoticObjectIsObject,
+	"unicode-escapes":                   UnicodeEscapes,
+	"using":                             Using,
+}
+
+func (features JSFeature) Has(feature JSFeature) bool {
+	return (features & feature) != 0
+}
+
+func (features JSFeature) ApplyOverrides(overrides JSFeature, mask JSFeature) JSFeature {
+	return (features & ^mask) | (overrides & mask)
+}
+
+var jsTable = map[JSFeature]map[Engine][]versionRange{
+	ArbitraryModuleNamespaceNames: {
+		Chrome:  {{start: v{90, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{87, 0, 0}}},
+		IOS:     {{start: v{14, 5, 0}}},
+		Node:    {{start: v{16, 0, 0}}},
+		Safari:  {{start: v{14, 1, 0}}},
+	},
+	ArraySpread: {
+		// Note: The latest version of "IE" failed 15 tests including: spread syntax for iterable objects: spreading non-iterables is a runtime error
+		// Note: The latest version of "Rhino" failed 15 tests including: spread syntax for iterable objects: spreading non-iterables is a runtime error
+		Chrome:  {{start: v{46, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{13, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{36, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{5, 0, 0}}},
+		Opera:   {{start: v{33, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	Arrow: {
+		// Note: The latest version of "Hermes" failed 3 tests including: arrow functions: lexical "super" binding in constructors
+		// Note: The latest version of "IE" failed 13 tests including: arrow functions: "this" unchanged by call or apply
+		// Note: The latest version of "Rhino" failed 3 tests including: arrow functions: lexical "new.target" binding
+		Chrome:  {{start: v{49, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{13, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{45, 0, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{6, 0, 0}}},
+		Opera:   {{start: v{36, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	AsyncAwait: {
+		// Note: The latest version of "Hermes" failed 4 tests including: async functions: async arrow functions
+		// Note: The latest version of "IE" failed 16 tests including: async functions: async arrow functions
+		// Note: The latest version of "Rhino" failed 16 tests including: async functions: async arrow functions
+		Chrome:  {{start: v{55, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{15, 0, 0}}},
+		ES:      {{start: v{2017, 0, 0}}},
+		Firefox: {{start: v{52, 0, 0}}},
+		IOS:     {{start: v{11, 0, 0}}},
+		Node:    {{start: v{7, 6, 0}}},
+		Opera:   {{start: v{42, 0, 0}}},
+		Safari:  {{start: v{11, 0, 0}}},
+	},
+	AsyncGenerator: {
+		// Note: The latest version of "Hermes" failed this test: Asynchronous Iterators: async generators
+		// Note: The latest version of "IE" failed this test: Asynchronous Iterators: async generators
+		// Note: The latest version of "Rhino" failed this test: Asynchronous Iterators: async generators
+		Chrome:  {{start: v{63, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2018, 0, 0}}},
+		Firefox: {{start: v{57, 0, 0}}},
+		IOS:     {{start: v{12, 0, 0}}},
+		Node:    {{start: v{10, 0, 0}}},
+		Opera:   {{start: v{50, 0, 0}}},
+		Safari:  {{start: v{12, 0, 0}}},
+	},
+	Bigint: {
+		// Note: The latest version of "IE" failed this test: BigInt: basic functionality
+		Chrome:  {{start: v{67, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2020, 0, 0}}},
+		Firefox: {{start: v{68, 0, 0}}},
+		Hermes:  {{start: v{0, 12, 0}}},
+		IOS:     {{start: v{14, 0, 0}}},
+		Node:    {{start: v{10, 4, 0}}},
+		Opera:   {{start: v{54, 0, 0}}},
+		Rhino:   {{start: v{1, 7, 14}}},
+		Safari:  {{start: v{14, 0, 0}}},
+	},
+	Class: {
+		// Note: The latest version of "Hermes" failed 24 tests including: class: accessor properties
+		// Note: The latest version of "IE" failed 24 tests including: class: accessor properties
+		// Note: The latest version of "Rhino" failed 24 tests including: class: accessor properties
+		Chrome:  {{start: v{49, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{13, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{45, 0, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{6, 0, 0}}},
+		Opera:   {{start: v{36, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	ClassField: {
+		// Note: The latest version of "Hermes" failed 2 tests including: instance class fields: computed instance class fields
+		// Note: The latest version of "IE" failed 2 tests including: instance class fields: computed instance class fields
+		// Note: The latest version of "Rhino" failed 2 tests including: instance class fields: computed instance class fields
+		Chrome:  {{start: v{73, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{69, 0, 0}}},
+		IOS:     {{start: v{14, 0, 0}}},
+		Node:    {{start: v{12, 0, 0}}},
+		Opera:   {{start: v{60, 0, 0}}},
+		Safari:  {{start: v{14, 0, 0}}},
+	},
+	ClassPrivateAccessor: {
+		// Note: The latest version of "Hermes" failed this test: private class methods: private accessor properties
+		// Note: The latest version of "IE" failed this test: private class methods: private accessor properties
+		// Note: The latest version of "Rhino" failed this test: private class methods: private accessor properties
+		Chrome:  {{start: v{84, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{84, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{90, 0, 0}}},
+		IOS:     {{start: v{15, 0, 0}}},
+		Node:    {{start: v{14, 6, 0}}},
+		Opera:   {{start: v{70, 0, 0}}},
+		Safari:  {{start: v{15, 0, 0}}},
+	},
+	ClassPrivateBrandCheck: {
+		// Note: The latest version of "Hermes" failed this test: Ergonomic brand checks for private fields
+		// Note: The latest version of "IE" failed this test: Ergonomic brand checks for private fields
+		// Note: The latest version of "Rhino" failed this test: Ergonomic brand checks for private fields
+		Chrome:  {{start: v{91, 0, 0}}},
+		Deno:    {{start: v{1, 9, 0}}},
+		Edge:    {{start: v{91, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{90, 0, 0}}},
+		IOS:     {{start: v{15, 0, 0}}},
+		Node:    {{start: v{16, 4, 0}}},
+		Opera:   {{start: v{77, 0, 0}}},
+		Safari:  {{start: v{15, 0, 0}}},
+	},
+	ClassPrivateField: {
+		// Note: The latest version of "Hermes" failed 4 tests including: instance class fields: optional deep private instance class fields access
+		// Note: The latest version of "IE" failed 4 tests including: instance class fields: optional deep private instance class fields access
+		// Note: The latest version of "Rhino" failed 4 tests including: instance class fields: optional deep private instance class fields access
+		Chrome:  {{start: v{84, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{84, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{90, 0, 0}}},
+		IOS:     {{start: v{14, 5, 0}}},
+		Node:    {{start: v{14, 6, 0}}},
+		Opera:   {{start: v{70, 0, 0}}},
+		Safari:  {{start: v{14, 1, 0}}},
+	},
+	ClassPrivateMethod: {
+		// Note: The latest version of "Hermes" failed this test: private class methods: private instance methods
+		// Note: The latest version of "IE" failed this test: private class methods: private instance methods
+		// Note: The latest version of "Rhino" failed this test: private class methods: private instance methods
+		Chrome:  {{start: v{84, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{84, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{90, 0, 0}}},
+		IOS:     {{start: v{15, 0, 0}}},
+		Node:    {{start: v{14, 6, 0}}},
+		Opera:   {{start: v{70, 0, 0}}},
+		Safari:  {{start: v{15, 0, 0}}},
+	},
+	ClassPrivateStaticAccessor: {
+		// Note: The latest version of "Hermes" failed this test: private class methods: private static accessor properties
+		// Note: The latest version of "IE" failed this test: private class methods: private static accessor properties
+		// Note: The latest version of "Rhino" failed this test: private class methods: private static accessor properties
+		Chrome:  {{start: v{84, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{84, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{90, 0, 0}}},
+		IOS:     {{start: v{15, 0, 0}}},
+		Node:    {{start: v{14, 6, 0}}},
+		Opera:   {{start: v{70, 0, 0}}},
+		Safari:  {{start: v{15, 0, 0}}},
+	},
+	ClassPrivateStaticField: {
+		// Note: The latest version of "Hermes" failed this test: static class fields: private static class fields
+		// Note: The latest version of "IE" failed this test: static class fields: private static class fields
+		// Note: The latest version of "Rhino" failed this test: static class fields: private static class fields
+		Chrome:  {{start: v{74, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{90, 0, 0}}},
+		IOS:     {{start: v{14, 5, 0}}},
+		Node:    {{start: v{12, 0, 0}}},
+		Opera:   {{start: v{62, 0, 0}}},
+		Safari:  {{start: v{14, 1, 0}}},
+	},
+	ClassPrivateStaticMethod: {
+		// Note: The latest version of "Hermes" failed this test: private class methods: private static methods
+		// Note: The latest version of "IE" failed this test: private class methods: private static methods
+		// Note: The latest version of "Rhino" failed this test: private class methods: private static methods
+		Chrome:  {{start: v{84, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{84, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{90, 0, 0}}},
+		IOS:     {{start: v{15, 0, 0}}},
+		Node:    {{start: v{14, 6, 0}}},
+		Opera:   {{start: v{70, 0, 0}}},
+		Safari:  {{start: v{15, 0, 0}}},
+	},
+	ClassStaticBlocks: {
+		Chrome:  {{start: v{91, 0, 0}}},
+		Deno:    {{start: v{1, 14, 0}}},
+		Edge:    {{start: v{94, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{93, 0, 0}}},
+		IOS:     {{start: v{16, 4, 0}}},
+		Node:    {{start: v{16, 11, 0}}},
+		Opera:   {{start: v{80, 0, 0}}},
+		Safari:  {{start: v{16, 4, 0}}},
+	},
+	ClassStaticField: {
+		// Note: The latest version of "Hermes" failed 2 tests including: static class fields: computed static class fields
+		// Note: The latest version of "IE" failed 2 tests including: static class fields: computed static class fields
+		// Note: The latest version of "Rhino" failed 2 tests including: static class fields: computed static class fields
+		Chrome:  {{start: v{73, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{75, 0, 0}}},
+		IOS:     {{start: v{14, 5, 0}}},
+		Node:    {{start: v{12, 0, 0}}},
+		Opera:   {{start: v{60, 0, 0}}},
+		Safari:  {{start: v{14, 1, 0}}},
+	},
+	ConstAndLet: {
+		// Note: The latest version of "Hermes" failed 20 tests including: const: for loop statement scope
+		// Note: The latest version of "IE" failed 6 tests including: const: for-in loop iteration scope
+		// Note: The latest version of "Rhino" failed 22 tests including: const: cannot be in statements
+		Chrome:  {{start: v{49, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{14, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{51, 0, 0}}},
+		IOS:     {{start: v{11, 0, 0}}},
+		Node:    {{start: v{6, 0, 0}}},
+		Opera:   {{start: v{36, 0, 0}}},
+		Safari:  {{start: v{11, 0, 0}}},
+	},
+	Decorators: {},
+	DefaultArgument: {
+		// Note: The latest version of "Hermes" failed 2 tests including: default function parameters: separate scope
+		// Note: The latest version of "IE" failed 7 tests including: default function parameters: arguments object interaction
+		// Note: The latest version of "Rhino" failed 7 tests including: default function parameters: arguments object interaction
+		Chrome:  {{start: v{49, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{14, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{53, 0, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{6, 0, 0}}},
+		Opera:   {{start: v{36, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	Destructuring: {
+		// Note: The latest version of "Hermes" failed 3 tests including: destructuring, declarations: defaults, let temporal dead zone
+		// Note: The latest version of "IE" failed 71 tests including: destructuring, assignment: chained iterable destructuring
+		// Note: The latest version of "Rhino" failed 33 tests including: destructuring, assignment: computed properties
+		Chrome:  {{start: v{51, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{18, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{53, 0, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{6, 5, 0}}},
+		Opera:   {{start: v{38, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	DynamicImport: {
+		Chrome:  {{start: v{63, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{67, 0, 0}}},
+		IOS:     {{start: v{11, 0, 0}}},
+		Node:    {{start: v{12, 20, 0}, end: v{13, 0, 0}}, {start: v{13, 2, 0}}},
+		Opera:   {{start: v{50, 0, 0}}},
+		Safari:  {{start: v{11, 1, 0}}},
+	},
+	ExponentOperator: {
+		// Note: The latest version of "IE" failed 3 tests including: exponentiation (**) operator: assignment
+		Chrome:  {{start: v{52, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{14, 0, 0}}},
+		ES:      {{start: v{2016, 0, 0}}},
+		Firefox: {{start: v{52, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{10, 3, 0}}},
+		Node:    {{start: v{7, 0, 0}}},
+		Opera:   {{start: v{39, 0, 0}}},
+		Rhino:   {{start: v{1, 7, 14}}},
+		Safari:  {{start: v{10, 1, 0}}},
+	},
+	ExportStarAs: {
+		Chrome:  {{start: v{72, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2020, 0, 0}}},
+		Firefox: {{start: v{80, 0, 0}}},
+		IOS:     {{start: v{14, 5, 0}}},
+		Node:    {{start: v{13, 2, 0}}},
+		Opera:   {{start: v{60, 0, 0}}},
+		Safari:  {{start: v{14, 1, 0}}},
+	},
+	ForAwait: {
+		// Note: The latest version of "Hermes" failed this test: Asynchronous Iterators: for-await-of loops
+		// Note: The latest version of "IE" failed this test: Asynchronous Iterators: for-await-of loops
+		// Note: The latest version of "Rhino" failed this test: Asynchronous Iterators: for-await-of loops
+		Chrome:  {{start: v{63, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2018, 0, 0}}},
+		Firefox: {{start: v{57, 0, 0}}},
+		IOS:     {{start: v{12, 0, 0}}},
+		Node:    {{start: v{10, 0, 0}}},
+		Opera:   {{start: v{50, 0, 0}}},
+		Safari:  {{start: v{12, 0, 0}}},
+	},
+	ForOf: {
+		// Note: The latest version of "IE" failed 9 tests including: for..of loops: iterator closing, break
+		// Note: The latest version of "Rhino" failed 2 tests including: for..of loops: iterator closing, break
+		Chrome:  {{start: v{51, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{15, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{53, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{6, 5, 0}}},
+		Opera:   {{start: v{38, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	FunctionNameConfigurable: {
+		// Note: The latest version of "IE" failed this test: function "name" property: isn't writable, is configurable
+		Chrome:  {{start: v{43, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{12, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{38, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{4, 0, 0}}},
+		Opera:   {{start: v{30, 0, 0}}},
+		Rhino:   {{start: v{1, 7, 15}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	FunctionOrClassPropertyAccess: {
+		Chrome:  {{start: v{0, 0, 0}}},
+		Deno:    {{start: v{0, 0, 0}}},
+		Edge:    {{start: v{0, 0, 0}}},
+		ES:      {{start: v{0, 0, 0}}},
+		Firefox: {{start: v{0, 0, 0}}},
+		Hermes:  {{start: v{0, 0, 0}}},
+		IE:      {{start: v{0, 0, 0}}},
+		IOS:     {{start: v{0, 0, 0}}},
+		Node:    {{start: v{0, 0, 0}}},
+		Opera:   {{start: v{0, 0, 0}}},
+		Rhino:   {{start: v{0, 0, 0}}},
+		Safari:  {{start: v{16, 3, 0}}},
+	},
+	Generator: {
+		// Note: The latest version of "Hermes" failed 3 tests including: generators: computed shorthand generators, classes
+		// Note: The latest version of "IE" failed 27 tests including: generators: %GeneratorPrototype%
+		// Note: The latest version of "Rhino" failed 11 tests including: generators: %GeneratorPrototype%
+		Chrome:  {{start: v{50, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{13, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{53, 0, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{6, 0, 0}}},
+		Opera:   {{start: v{37, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	Hashbang: {
+		// Note: The latest version of "IE" failed this test: Hashbang Grammar
+		Chrome:  {{start: v{74, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2023, 0, 0}}},
+		Firefox: {{start: v{67, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{13, 4, 0}}},
+		Node:    {{start: v{12, 5, 0}}},
+		Opera:   {{start: v{62, 0, 0}}},
+		Rhino:   {{start: v{1, 7, 15}}},
+		Safari:  {{start: v{13, 1, 0}}},
+	},
+	ImportAssertions: {
+		Chrome: {{start: v{91, 0, 0}}},
+		Deno:   {{start: v{1, 17, 0}}},
+		Edge:   {{start: v{91, 0, 0}}},
+		Node:   {{start: v{16, 14, 0}, end: v{22, 0, 0}}},
+	},
+	ImportAttributes: {
+		Chrome: {{start: v{123, 0, 0}}},
+		Deno:   {{start: v{1, 37, 0}}},
+		Edge:   {{start: v{123, 0, 0}}},
+		IOS:    {{start: v{17, 2, 0}}},
+		Node:   {{start: v{18, 20, 0}, end: v{19, 0, 0}}, {start: v{20, 10, 0}}},
+		Opera:  {{start: v{109, 0, 0}}},
+		Safari: {{start: v{17, 2, 0}}},
+	},
+	ImportMeta: {
+		Chrome:  {{start: v{64, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2020, 0, 0}}},
+		Firefox: {{start: v{62, 0, 0}}},
+		IOS:     {{start: v{12, 0, 0}}},
+		Node:    {{start: v{10, 4, 0}}},
+		Opera:   {{start: v{51, 0, 0}}},
+		Safari:  {{start: v{11, 1, 0}}},
+	},
+	InlineScript: {},
+	LogicalAssignment: {
+		// Note: The latest version of "IE" failed 9 tests including: Logical Assignment: &&= basic support
+		// Note: The latest version of "Rhino" failed 9 tests including: Logical Assignment: &&= basic support
+		Chrome:  {{start: v{85, 0, 0}}},
+		Deno:    {{start: v{1, 2, 0}}},
+		Edge:    {{start: v{85, 0, 0}}},
+		ES:      {{start: v{2021, 0, 0}}},
+		Firefox: {{start: v{79, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{14, 0, 0}}},
+		Node:    {{start: v{15, 0, 0}}},
+		Opera:   {{start: v{71, 0, 0}}},
+		Safari:  {{start: v{14, 0, 0}}},
+	},
+	NestedRestBinding: {
+		// Note: The latest version of "IE" failed 2 tests including: nested rest destructuring, declarations
+		// Note: The latest version of "Rhino" failed 2 tests including: nested rest destructuring, declarations
+		Chrome:  {{start: v{49, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{14, 0, 0}}},
+		ES:      {{start: v{2016, 0, 0}}},
+		Firefox: {{start: v{47, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{10, 3, 0}}},
+		Node:    {{start: v{6, 0, 0}}},
+		Opera:   {{start: v{36, 0, 0}}},
+		Safari:  {{start: v{10, 1, 0}}},
+	},
+	NewTarget: {
+		// Note: The latest version of "IE" failed 2 tests including: new.target: assignment is an early error
+		// Note: The latest version of "Rhino" failed 2 tests including: new.target: assignment is an early error
+		Chrome:  {{start: v{46, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{14, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{41, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{5, 0, 0}}},
+		Opera:   {{start: v{33, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	NodeColonPrefixImport: {
+		ES:   {{start: v{0, 0, 0}}},
+		Node: {{start: v{12, 20, 0}, end: v{13, 0, 0}}, {start: v{14, 13, 1}}},
+	},
+	NodeColonPrefixRequire: {
+		ES:   {{start: v{0, 0, 0}}},
+		Node: {{start: v{14, 18, 0}, end: v{15, 0, 0}}, {start: v{16, 0, 0}}},
+	},
+	NullishCoalescing: {
+		// Note: The latest version of "IE" failed this test: nullish coalescing operator (??)
+		// Note: The latest version of "Rhino" failed this test: nullish coalescing operator (??)
+		Chrome:  {{start: v{80, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{80, 0, 0}}},
+		ES:      {{start: v{2020, 0, 0}}},
+		Firefox: {{start: v{72, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{13, 4, 0}}},
+		Node:    {{start: v{14, 0, 0}}},
+		Opera:   {{start: v{67, 0, 0}}},
+		Safari:  {{start: v{13, 1, 0}}},
+	},
+	ObjectAccessors: {
+		Chrome:  {{start: v{5, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{12, 0, 0}}},
+		ES:      {{start: v{5, 0, 0}}},
+		Firefox: {{start: v{2, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IE:      {{start: v{9, 0, 0}}},
+		IOS:     {{start: v{6, 0, 0}}},
+		Node:    {{start: v{0, 4, 0}}},
+		Opera:   {{start: v{10, 10, 0}}},
+		Rhino:   {{start: v{1, 7, 13}}},
+		Safari:  {{start: v{3, 1, 0}}},
+	},
+	ObjectExtensions: {
+		// Note: The latest version of "IE" failed 6 tests including: object literal extensions: computed accessors
+		// Note: The latest version of "Rhino" failed 3 tests including: object literal extensions: computed accessors
+		Chrome:  {{start: v{44, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{12, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{34, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{4, 0, 0}}},
+		Opera:   {{start: v{31, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	ObjectRestSpread: {
+		// Note: The latest version of "IE" failed 2 tests including: object rest/spread properties: object rest properties
+		// Note: The latest version of "Rhino" failed 2 tests including: object rest/spread properties: object rest properties
+		Chrome:  {{start: v{60, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2018, 0, 0}}},
+		Firefox: {{start: v{55, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{11, 3, 0}}},
+		Node:    {{start: v{8, 3, 0}}},
+		Opera:   {{start: v{47, 0, 0}}},
+		Safari:  {{start: v{11, 1, 0}}},
+	},
+	OptionalCatchBinding: {
+		// Note: The latest version of "IE" failed 3 tests including: optional catch binding: await
+		// Note: The latest version of "Rhino" failed this test: optional catch binding: await
+		Chrome:  {{start: v{66, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2019, 0, 0}}},
+		Firefox: {{start: v{58, 0, 0}}},
+		Hermes:  {{start: v{0, 12, 0}}},
+		IOS:     {{start: v{11, 3, 0}}},
+		Node:    {{start: v{10, 0, 0}}},
+		Opera:   {{start: v{53, 0, 0}}},
+		Safari:  {{start: v{11, 1, 0}}},
+	},
+	OptionalChain: {
+		// Note: The latest version of "IE" failed 5 tests including: optional chaining operator (?.): optional bracket access
+		// Note: The latest version of "Rhino" failed 5 tests including: optional chaining operator (?.): optional bracket access
+		Chrome:  {{start: v{91, 0, 0}}},
+		Deno:    {{start: v{1, 9, 0}}},
+		Edge:    {{start: v{91, 0, 0}}},
+		ES:      {{start: v{2020, 0, 0}}},
+		Firefox: {{start: v{74, 0, 0}}},
+		Hermes:  {{start: v{0, 12, 0}}},
+		IOS:     {{start: v{13, 4, 0}}},
+		Node:    {{start: v{16, 1, 0}}},
+		Opera:   {{start: v{77, 0, 0}}},
+		Safari:  {{start: v{13, 1, 0}}},
+	},
+	RegexpDotAllFlag: {
+		// Note: The latest version of "IE" failed this test: s (dotAll) flag for regular expressions
+		Chrome:  {{start: v{62, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2018, 0, 0}}},
+		Firefox: {{start: v{78, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{11, 3, 0}}},
+		Node:    {{start: v{8, 10, 0}}},
+		Opera:   {{start: v{49, 0, 0}}},
+		Rhino:   {{start: v{1, 7, 15}}},
+		Safari:  {{start: v{11, 1, 0}}},
+	},
+	RegexpLookbehindAssertions: {
+		// Note: The latest version of "IE" failed this test: RegExp Lookbehind Assertions
+		// Note: The latest version of "Rhino" failed this test: RegExp Lookbehind Assertions
+		Chrome:  {{start: v{62, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2018, 0, 0}}},
+		Firefox: {{start: v{78, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{16, 4, 0}}},
+		Node:    {{start: v{8, 10, 0}}},
+		Opera:   {{start: v{49, 0, 0}}},
+		Safari:  {{start: v{16, 4, 0}}},
+	},
+	RegexpMatchIndices: {
+		Chrome:  {{start: v{90, 0, 0}}},
+		Deno:    {{start: v{1, 8, 0}}},
+		Edge:    {{start: v{90, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{88, 0, 0}}},
+		IOS:     {{start: v{15, 0, 0}}},
+		Node:    {{start: v{16, 0, 0}}},
+		Opera:   {{start: v{76, 0, 0}}},
+		Safari:  {{start: v{15, 0, 0}}},
+	},
+	RegexpNamedCaptureGroups: {
+		// Note: The latest version of "Hermes" failed this test: RegExp named capture groups
+		// Note: The latest version of "IE" failed this test: RegExp named capture groups
+		// Note: The latest version of "Rhino" failed this test: RegExp named capture groups
+		Chrome:  {{start: v{64, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{79, 0, 0}}},
+		ES:      {{start: v{2018, 0, 0}}},
+		Firefox: {{start: v{78, 0, 0}}},
+		IOS:     {{start: v{11, 3, 0}}},
+		Node:    {{start: v{10, 0, 0}}},
+		Opera:   {{start: v{51, 0, 0}}},
+		Safari:  {{start: v{11, 1, 0}}},
+	},
+	RegexpSetNotation: {
+		ES: {{start: v{2024, 0, 0}}},
+	},
+	RegexpStickyAndUnicodeFlags: {
+		// Note: The latest version of "IE" failed 6 tests including: RegExp "y" and "u" flags: "u" flag
+		// Note: The latest version of "Rhino" failed 4 tests including: RegExp "y" and "u" flags: "u" flag
+		Chrome:  {{start: v{50, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{13, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{46, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{12, 0, 0}}},
+		Node:    {{start: v{6, 0, 0}}},
+		Opera:   {{start: v{37, 0, 0}}},
+		Safari:  {{start: v{12, 0, 0}}},
+	},
+	RegexpUnicodePropertyEscapes: {
+		// Note: The latest version of "Chrome" failed this test: RegExp Unicode Property Escapes: Unicode 16.0
+		// Note: The latest version of "Edge" failed this test: RegExp Unicode Property Escapes: Unicode 16.0
+		// Note: The latest version of "Firefox" failed 2 tests including: RegExp Unicode Property Escapes: Unicode 15.1
+		// Note: The latest version of "Hermes" failed 8 tests including: RegExp Unicode Property Escapes: Unicode 11
+		// Note: The latest version of "IE" failed 8 tests including: RegExp Unicode Property Escapes: Unicode 11
+		// Note: The latest version of "IOS" failed this test: RegExp Unicode Property Escapes: Unicode 16.0
+		// Note: The latest version of "Node" failed this test: RegExp Unicode Property Escapes: Unicode 16.0
+		// Note: The latest version of "Rhino" failed 8 tests including: RegExp Unicode Property Escapes: Unicode 11
+		// Note: The latest version of "Safari" failed this test: RegExp Unicode Property Escapes: Unicode 16.0
+		ES:    {{start: v{2018, 0, 0}}},
+		Opera: {{start: v{111, 0, 0}}},
+	},
+	RestArgument: {
+		// Note: The latest version of "Hermes" failed this test: rest parameters: function 'length' property
+		// Note: The latest version of "IE" failed 5 tests including: rest parameters: arguments object interaction
+		// Note: The latest version of "Rhino" failed 2 tests including: rest parameters: arguments object interaction
+		Chrome:  {{start: v{47, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{12, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{43, 0, 0}}},
+		IOS:     {{start: v{10, 0, 0}}},
+		Node:    {{start: v{6, 0, 0}}},
+		Opera:   {{start: v{34, 0, 0}}},
+		Safari:  {{start: v{10, 0, 0}}},
+	},
+	TemplateLiteral: {
+		// Note: The latest version of "Hermes" failed this test: template literals: TemplateStrings call site caching
+		// Note: The latest version of "IE" failed 7 tests including: template literals: TemplateStrings call site caching
+		// Note: The latest version of "Rhino" failed this test: template literals: toString conversion
+		Chrome:  {{start: v{41, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{13, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{34, 0, 0}}},
+		IOS:     {{start: v{13, 0, 0}}},
+		Node:    {{start: v{10, 0, 0}}},
+		Opera:   {{start: v{28, 0, 0}}},
+		Safari:  {{start: v{13, 0, 0}}},
+	},
+	TopLevelAwait: {
+		Chrome:  {{start: v{89, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{89, 0, 0}}},
+		ES:      {{start: v{2022, 0, 0}}},
+		Firefox: {{start: v{89, 0, 0}}},
+		IOS:     {{start: v{15, 0, 0}}},
+		Node:    {{start: v{14, 8, 0}}},
+		Opera:   {{start: v{75, 0, 0}}},
+		Safari:  {{start: v{15, 0, 0}}},
+	},
+	TypeofExoticObjectIsObject: {
+		Chrome:  {{start: v{0, 0, 0}}},
+		Deno:    {{start: v{0, 0, 0}}},
+		Edge:    {{start: v{0, 0, 0}}},
+		ES:      {{start: v{2020, 0, 0}}},
+		Firefox: {{start: v{0, 0, 0}}},
+		Hermes:  {{start: v{0, 0, 0}}},
+		IOS:     {{start: v{0, 0, 0}}},
+		Node:    {{start: v{0, 0, 0}}},
+		Opera:   {{start: v{0, 0, 0}}},
+		Rhino:   {{start: v{0, 0, 0}}},
+		Safari:  {{start: v{0, 0, 0}}},
+	},
+	UnicodeEscapes: {
+		// Note: The latest version of "IE" failed 2 tests including: Unicode code point escapes: in identifiers
+		Chrome:  {{start: v{44, 0, 0}}},
+		Deno:    {{start: v{1, 0, 0}}},
+		Edge:    {{start: v{12, 0, 0}}},
+		ES:      {{start: v{2015, 0, 0}}},
+		Firefox: {{start: v{53, 0, 0}}},
+		Hermes:  {{start: v{0, 7, 0}}},
+		IOS:     {{start: v{9, 0, 0}}},
+		Node:    {{start: v{4, 0, 0}}},
+		Opera:   {{start: v{31, 0, 0}}},
+		Rhino:   {{start: v{1, 7, 15}}},
+		Safari:  {{start: v{9, 0, 0}}},
+	},
+	Using: {},
+}
+
+// Return all features that are not available in at least one environment
+func UnsupportedJSFeatures(constraints map[Engine]Semver) (unsupported JSFeature) {
+	for feature, engines := range jsTable {
+		if feature == InlineScript {
+			continue // This is purely user-specified
+		}
+		for engine, version := range constraints {
+			if versionRanges, ok := engines[engine]; !ok || !isVersionSupported(versionRanges, version) {
+				unsupported |= feature
+			}
+		}
+	}
+	return
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/config/config.go b/source/vendor/github.com/evanw/esbuild/internal/config/config.go
new file mode 100644
index 0000000..615d688
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/config/config.go
@@ -0,0 +1,842 @@
+package config
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+	"sync"
+	"sync/atomic"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type JSXOptions struct {
+	Factory          DefineExpr
+	Fragment         DefineExpr
+	Parse            bool
+	Preserve         bool
+	AutomaticRuntime bool
+	ImportSource     string
+	Development      bool
+	SideEffects      bool
+}
+
+type TSJSX uint8
+
+const (
+	TSJSXNone TSJSX = iota
+	TSJSXPreserve
+	TSJSXReactNative
+	TSJSXReact
+	TSJSXReactJSX
+	TSJSXReactJSXDev
+)
+
+type TSOptions struct {
+	Config              TSConfig
+	Parse               bool
+	NoAmbiguousLessThan bool
+}
+
+type TSConfigJSX struct {
+	// If not empty, these should override the default values
+	JSXFactory         []string // Default if empty: "React.createElement"
+	JSXFragmentFactory []string // Default if empty: "React.Fragment"
+	JSXImportSource    *string  // Default if empty: "react"
+	JSX                TSJSX
+}
+
+// This is used for "extends" in "tsconfig.json"
+func (derived *TSConfigJSX) ApplyExtendedConfig(base TSConfigJSX) {
+	if base.JSXFactory != nil {
+		derived.JSXFactory = base.JSXFactory
+	}
+	if base.JSXFragmentFactory != nil {
+		derived.JSXFragmentFactory = base.JSXFragmentFactory
+	}
+	if base.JSXImportSource != nil {
+		derived.JSXImportSource = base.JSXImportSource
+	}
+	if base.JSX != TSJSXNone {
+		derived.JSX = base.JSX
+	}
+}
+
+func (tsConfig *TSConfigJSX) ApplyTo(jsxOptions *JSXOptions) {
+	switch tsConfig.JSX {
+	case TSJSXPreserve, TSJSXReactNative:
+		// Deliberately don't set "Preserve = true" here. Some tools from Vercel
+		// apparently automatically set "jsx": "preserve" in "tsconfig.json" and
+		// people are then confused when esbuild preserves their JSX. Ignoring this
+		// value means you now have to explicitly pass "--jsx=preserve" to esbuild
+		// to get this behavior.
+
+	case TSJSXReact:
+		jsxOptions.AutomaticRuntime = false
+		jsxOptions.Development = false
+
+	case TSJSXReactJSX:
+		jsxOptions.AutomaticRuntime = true
+		// Deliberately don't set "Development = false" here. People want to be
+		// able to have "react-jsx" in their "tsconfig.json" file and then swap
+		// that to "react-jsxdev" by passing "--jsx-dev" to esbuild.
+
+	case TSJSXReactJSXDev:
+		jsxOptions.AutomaticRuntime = true
+		jsxOptions.Development = true
+	}
+
+	if len(tsConfig.JSXFactory) > 0 {
+		jsxOptions.Factory = DefineExpr{Parts: tsConfig.JSXFactory}
+	}
+
+	if len(tsConfig.JSXFragmentFactory) > 0 {
+		jsxOptions.Fragment = DefineExpr{Parts: tsConfig.JSXFragmentFactory}
+	}
+
+	if tsConfig.JSXImportSource != nil {
+		jsxOptions.ImportSource = *tsConfig.JSXImportSource
+	}
+}
+
+// Note: This can currently only contain primitive values. It's compared
+// for equality using a structural equality comparison by the JS parser.
+type TSConfig struct {
+	ExperimentalDecorators  MaybeBool
+	ImportsNotUsedAsValues  TSImportsNotUsedAsValues
+	PreserveValueImports    MaybeBool
+	Target                  TSTarget
+	UseDefineForClassFields MaybeBool
+	VerbatimModuleSyntax    MaybeBool
+}
+
+// This is used for "extends" in "tsconfig.json"
+func (derived *TSConfig) ApplyExtendedConfig(base TSConfig) {
+	if base.ExperimentalDecorators != Unspecified {
+		derived.ExperimentalDecorators = base.ExperimentalDecorators
+	}
+	if base.ImportsNotUsedAsValues != TSImportsNotUsedAsValues_None {
+		derived.ImportsNotUsedAsValues = base.ImportsNotUsedAsValues
+	}
+	if base.PreserveValueImports != Unspecified {
+		derived.PreserveValueImports = base.PreserveValueImports
+	}
+	if base.Target != TSTargetUnspecified {
+		derived.Target = base.Target
+	}
+	if base.UseDefineForClassFields != Unspecified {
+		derived.UseDefineForClassFields = base.UseDefineForClassFields
+	}
+	if base.VerbatimModuleSyntax != Unspecified {
+		derived.VerbatimModuleSyntax = base.VerbatimModuleSyntax
+	}
+}
+
+func (cfg *TSConfig) UnusedImportFlags() (flags TSUnusedImportFlags) {
+	if cfg.VerbatimModuleSyntax == True {
+		return TSUnusedImport_KeepStmt | TSUnusedImport_KeepValues
+	}
+	if cfg.PreserveValueImports == True {
+		flags |= TSUnusedImport_KeepValues
+	}
+	if cfg.ImportsNotUsedAsValues == TSImportsNotUsedAsValues_Preserve || cfg.ImportsNotUsedAsValues == TSImportsNotUsedAsValues_Error {
+		flags |= TSUnusedImport_KeepStmt
+	}
+	return
+}
+
+type Platform uint8
+
+const (
+	PlatformBrowser Platform = iota
+	PlatformNode
+	PlatformNeutral
+)
+
+type SourceMap uint8
+
+const (
+	SourceMapNone SourceMap = iota
+	SourceMapInline
+	SourceMapLinkedWithComment
+	SourceMapExternalWithoutComment
+	SourceMapInlineAndExternal
+)
+
+type LegalComments uint8
+
+const (
+	LegalCommentsInline LegalComments = iota
+	LegalCommentsNone
+	LegalCommentsEndOfFile
+	LegalCommentsLinkedWithComment
+	LegalCommentsExternalWithoutComment
+)
+
+func (lc LegalComments) HasExternalFile() bool {
+	return lc == LegalCommentsLinkedWithComment || lc == LegalCommentsExternalWithoutComment
+}
+
+type Loader uint8
+
+const (
+	LoaderNone Loader = iota
+	LoaderBase64
+	LoaderBinary
+	LoaderCopy
+	LoaderCSS
+	LoaderDataURL
+	LoaderDefault
+	LoaderEmpty
+	LoaderFile
+	LoaderGlobalCSS
+	LoaderJS
+	LoaderJSON
+	LoaderWithTypeJSON // Has a "with { type: 'json' }" attribute
+	LoaderJSX
+	LoaderLocalCSS
+	LoaderText
+	LoaderTS
+	LoaderTSNoAmbiguousLessThan // Used with ".mts" and ".cts"
+	LoaderTSX
+)
+
+var LoaderToString = []string{
+	"none",
+	"base64",
+	"binary",
+	"copy",
+	"css",
+	"dataurl",
+	"default",
+	"empty",
+	"file",
+	"global-css",
+	"js",
+	"json",
+	"json",
+	"jsx",
+	"local-css",
+	"text",
+	"ts",
+	"ts",
+	"tsx",
+}
+
+func (loader Loader) IsTypeScript() bool {
+	switch loader {
+	case LoaderTS, LoaderTSNoAmbiguousLessThan, LoaderTSX:
+		return true
+	}
+	return false
+}
+
+func (loader Loader) IsCSS() bool {
+	switch loader {
+	case
+		LoaderCSS, LoaderGlobalCSS, LoaderLocalCSS:
+		return true
+	}
+	return false
+}
+
+func (loader Loader) CanHaveSourceMap() bool {
+	switch loader {
+	case
+		LoaderJS, LoaderJSX,
+		LoaderTS, LoaderTSNoAmbiguousLessThan, LoaderTSX,
+		LoaderCSS, LoaderGlobalCSS, LoaderLocalCSS,
+		LoaderJSON, LoaderWithTypeJSON, LoaderText:
+		return true
+	}
+	return false
+}
+
+type Format uint8
+
+const (
+	// This is used when not bundling. It means to preserve whatever form the
+	// import or export was originally in. ES6 syntax stays ES6 syntax and
+	// CommonJS syntax stays CommonJS syntax.
+	FormatPreserve Format = iota
+
+	// IIFE stands for immediately-invoked function expression. That looks like
+	// this:
+	//
+	//   (() => {
+	//     ... bundled code ...
+	//   })();
+	//
+	// If the optional GlobalName is configured, then we'll write out this:
+	//
+	//   let globalName = (() => {
+	//     ... bundled code ...
+	//     return exports;
+	//   })();
+	//
+	FormatIIFE
+
+	// The CommonJS format looks like this:
+	//
+	//   ... bundled code ...
+	//   module.exports = exports;
+	//
+	FormatCommonJS
+
+	// The ES module format looks like this:
+	//
+	//   ... bundled code ...
+	//   export {...};
+	//
+	FormatESModule
+)
+
+func (f Format) KeepESMImportExportSyntax() bool {
+	return f == FormatPreserve || f == FormatESModule
+}
+
+func (f Format) String() string {
+	switch f {
+	case FormatIIFE:
+		return "iife"
+	case FormatCommonJS:
+		return "cjs"
+	case FormatESModule:
+		return "esm"
+	}
+	return ""
+}
+
+type StdinInfo struct {
+	Contents      string
+	SourceFile    string
+	AbsResolveDir string
+	Loader        Loader
+}
+
+type WildcardPattern struct {
+	Prefix string
+	Suffix string
+}
+
+type ExternalMatchers struct {
+	Exact    map[string]bool
+	Patterns []WildcardPattern
+}
+
+func (matchers ExternalMatchers) HasMatchers() bool {
+	return len(matchers.Exact) > 0 || len(matchers.Patterns) > 0
+}
+
+type ExternalSettings struct {
+	PreResolve  ExternalMatchers
+	PostResolve ExternalMatchers
+}
+
+type APICall uint8
+
+const (
+	BuildCall APICall = iota
+	TransformCall
+)
+
+type Mode uint8
+
+const (
+	ModePassThrough Mode = iota
+	ModeConvertFormat
+	ModeBundle
+)
+
+type MaybeBool uint8
+
+const (
+	Unspecified MaybeBool = iota
+	True
+	False
+)
+
+type CancelFlag struct {
+	uint32
+}
+
+func (flag *CancelFlag) Cancel() {
+	atomic.StoreUint32(&flag.uint32, 1)
+}
+
+// This checks for nil in one place so we don't have to do that everywhere
+func (flag *CancelFlag) DidCancel() bool {
+	return flag != nil && atomic.LoadUint32(&flag.uint32) != 0
+}
+
+type Options struct {
+	ModuleTypeData js_ast.ModuleTypeData
+	Defines        *ProcessedDefines
+	TSAlwaysStrict *TSAlwaysStrict
+	MangleProps    *regexp.Regexp
+	ReserveProps   *regexp.Regexp
+	CancelFlag     *CancelFlag
+
+	// When mangling property names, call this function with a callback and do
+	// the property name mangling inside the callback. The callback takes an
+	// argument which is the mangle cache map to mutate. These callbacks are
+	// serialized so mutating the map does not require extra synchronization.
+	//
+	// This is a callback for determinism reasons. We may be building multiple
+	// entry points in parallel that are supposed to share a single cache. We
+	// don't want the order that each entry point mangles properties in to cause
+	// the output to change, so we serialize the property mangling over all entry
+	// points in entry point order. However, we still want to link everything in
+	// parallel so only property mangling is serialized, which is implemented by
+	// this function blocking until the previous entry point's property mangling
+	// has finished.
+	ExclusiveMangleCacheUpdate func(cb func(
+		mangleCache map[string]interface{},
+		cssUsedLocalNames map[string]bool,
+	))
+
+	// This is the original information that was used to generate the
+	// unsupported feature sets above. It's used for error messages.
+	OriginalTargetEnv string
+
+	DropLabels       []string
+	ExtensionOrder   []string
+	MainFields       []string
+	Conditions       []string
+	AbsNodePaths     []string // The "NODE_PATH" variable from Node.js
+	ExternalSettings ExternalSettings
+	ExternalPackages bool
+	PackageAliases   map[string]string
+
+	AbsOutputFile      string
+	AbsOutputDir       string
+	AbsOutputBase      string
+	OutputExtensionJS  string
+	OutputExtensionCSS string
+	GlobalName         []string
+	TSConfigPath       string
+	TSConfigRaw        string
+	ExtensionToLoader  map[string]Loader
+
+	PublicPath      string
+	InjectPaths     []string
+	InjectedDefines []InjectedDefine
+	InjectedFiles   []InjectedFile
+
+	JSBanner  string
+	JSFooter  string
+	CSSBanner string
+	CSSFooter string
+
+	EntryPathTemplate []PathTemplate
+	ChunkPathTemplate []PathTemplate
+	AssetPathTemplate []PathTemplate
+
+	Plugins    []Plugin
+	SourceRoot string
+	Stdin      *StdinInfo
+	JSX        JSXOptions
+	LineLimit  int
+
+	CSSPrefixData          map[css_ast.D]compat.CSSPrefix
+	UnsupportedJSFeatures  compat.JSFeature
+	UnsupportedCSSFeatures compat.CSSFeature
+
+	UnsupportedJSFeatureOverrides      compat.JSFeature
+	UnsupportedJSFeatureOverridesMask  compat.JSFeature
+	UnsupportedCSSFeatureOverrides     compat.CSSFeature
+	UnsupportedCSSFeatureOverridesMask compat.CSSFeature
+
+	TS                TSOptions
+	Mode              Mode
+	PreserveSymlinks  bool
+	MinifyWhitespace  bool
+	MinifyIdentifiers bool
+	MinifySyntax      bool
+	ProfilerNames     bool
+	CodeSplitting     bool
+	WatchMode         bool
+	AllowOverwrite    bool
+	LegalComments     LegalComments
+
+	// If true, make sure to generate a single file that can be written to stdout
+	WriteToStdout bool
+
+	OmitRuntimeForTests    bool
+	OmitJSXRuntimeForTests bool
+	ASCIIOnly              bool
+	KeepNames              bool
+	IgnoreDCEAnnotations   bool
+	TreeShaking            bool
+	DropDebugger           bool
+	MangleQuoted           bool
+	Platform               Platform
+	OutputFormat           Format
+	NeedsMetafile          bool
+	SourceMap              SourceMap
+	ExcludeSourcesContent  bool
+}
+
+type TSImportsNotUsedAsValues uint8
+
+const (
+	TSImportsNotUsedAsValues_None TSImportsNotUsedAsValues = iota
+	TSImportsNotUsedAsValues_Remove
+	TSImportsNotUsedAsValues_Preserve
+	TSImportsNotUsedAsValues_Error
+)
+
+// These flags represent the following separate "tsconfig.json" settings:
+//
+// - importsNotUsedAsValues
+// - preserveValueImports
+// - verbatimModuleSyntax
+//
+// TypeScript prefers for people to use "verbatimModuleSyntax" and has
+// deprecated the other two settings, but we must still support them.
+// All settings are combined into these two behavioral flags for us.
+type TSUnusedImportFlags uint8
+
+// With !TSUnusedImport_KeepStmt && !TSUnusedImport_KeepValues:
+//
+//	"import 'foo'"                      => "import 'foo'"
+//	"import * as unused from 'foo'"     => ""
+//	"import { unused } from 'foo'"      => ""
+//	"import { type unused } from 'foo'" => ""
+//
+// With TSUnusedImport_KeepStmt && !TSUnusedImport_KeepValues:
+//
+//	"import 'foo'"                      => "import 'foo'"
+//	"import * as unused from 'foo'"     => "import 'foo'"
+//	"import { unused } from 'foo'"      => "import 'foo'"
+//	"import { type unused } from 'foo'" => "import 'foo'"
+//
+// With !TSUnusedImport_KeepStmt && TSUnusedImport_KeepValues:
+//
+//	"import 'foo'"                      => "import 'foo'"
+//	"import * as unused from 'foo'"     => "import * as unused from 'foo'"
+//	"import { unused } from 'foo'"      => "import { unused } from 'foo'"
+//	"import { type unused } from 'foo'" => ""
+//
+// With TSUnusedImport_KeepStmt && TSUnusedImport_KeepValues:
+//
+//	"import 'foo'"                      => "import 'foo'"
+//	"import * as unused from 'foo'"     => "import * as unused from 'foo'"
+//	"import { unused } from 'foo'"      => "import { unused } from 'foo'"
+//	"import { type unused } from 'foo'" => "import {} from 'foo'"
+const (
+	TSUnusedImport_KeepStmt   TSUnusedImportFlags = 1 << iota // "importsNotUsedAsValues" != "remove"
+	TSUnusedImport_KeepValues                                 // "preserveValueImports" == true
+)
+
+type TSTarget uint8
+
+const (
+	TSTargetUnspecified     TSTarget = iota
+	TSTargetBelowES2022              // "useDefineForClassFields" defaults to false
+	TSTargetAtOrAboveES2022          // "useDefineForClassFields" defaults to true
+)
+
+type TSAlwaysStrict struct {
+	// This information is only used for error messages
+	Name   string
+	Source logger.Source
+	Range  logger.Range
+
+	// This information can affect code transformation
+	Value bool
+}
+
+type PathPlaceholder uint8
+
+const (
+	NoPlaceholder PathPlaceholder = iota
+
+	// The relative path from the original parent directory to the configured
+	// "outbase" directory, or to the lowest common ancestor directory
+	DirPlaceholder
+
+	// The original name of the file, or the manual chunk name, or the name of
+	// the type of output file ("entry" or "chunk" or "asset")
+	NamePlaceholder
+
+	// A hash of the contents of this file, and the contents and output paths of
+	// all dependencies (except for their hash placeholders)
+	HashPlaceholder
+
+	// The original extension of the file, or the name of the output file
+	// (e.g. "css", "svg", "png")
+	ExtPlaceholder
+)
+
+type PathTemplate struct {
+	Data        string
+	Placeholder PathPlaceholder
+}
+
+type PathPlaceholders struct {
+	Dir  *string
+	Name *string
+	Hash *string
+	Ext  *string
+}
+
+func (placeholders PathPlaceholders) Get(placeholder PathPlaceholder) *string {
+	switch placeholder {
+	case DirPlaceholder:
+		return placeholders.Dir
+	case NamePlaceholder:
+		return placeholders.Name
+	case HashPlaceholder:
+		return placeholders.Hash
+	case ExtPlaceholder:
+		return placeholders.Ext
+	}
+	return nil
+}
+
+func TemplateToString(template []PathTemplate) string {
+	if len(template) == 1 && template[0].Placeholder == NoPlaceholder {
+		// Avoid allocations in this case
+		return template[0].Data
+	}
+	sb := strings.Builder{}
+	for _, part := range template {
+		sb.WriteString(part.Data)
+		switch part.Placeholder {
+		case DirPlaceholder:
+			sb.WriteString("[dir]")
+		case NamePlaceholder:
+			sb.WriteString("[name]")
+		case HashPlaceholder:
+			sb.WriteString("[hash]")
+		case ExtPlaceholder:
+			sb.WriteString("[ext]")
+		}
+	}
+	return sb.String()
+}
+
+func HasPlaceholder(template []PathTemplate, placeholder PathPlaceholder) bool {
+	for _, part := range template {
+		if part.Placeholder == placeholder {
+			return true
+		}
+	}
+	return false
+}
+
+func SubstituteTemplate(template []PathTemplate, placeholders PathPlaceholders) []PathTemplate {
+	// Don't allocate if no substitution is possible and the template is already minimal
+	shouldSubstitute := false
+	for i, part := range template {
+		if placeholders.Get(part.Placeholder) != nil || (part.Placeholder == NoPlaceholder && i+1 < len(template)) {
+			shouldSubstitute = true
+			break
+		}
+	}
+	if !shouldSubstitute {
+		return template
+	}
+
+	// Otherwise, substitute and merge as appropriate
+	result := make([]PathTemplate, 0, len(template))
+	for _, part := range template {
+		if sub := placeholders.Get(part.Placeholder); sub != nil {
+			part.Data += *sub
+			part.Placeholder = NoPlaceholder
+		}
+		if last := len(result) - 1; last >= 0 && result[last].Placeholder == NoPlaceholder {
+			last := &result[last]
+			last.Data += part.Data
+			last.Placeholder = part.Placeholder
+		} else {
+			result = append(result, part)
+		}
+	}
+	return result
+}
+
+func ShouldCallRuntimeRequire(mode Mode, outputFormat Format) bool {
+	return mode == ModeBundle && outputFormat != FormatCommonJS
+}
+
+type InjectedDefine struct {
+	Data   js_ast.E
+	Name   string
+	Source logger.Source
+}
+
+type InjectedFile struct {
+	Exports      []InjectableExport
+	DefineName   string // For injected files generated when you "--define" a non-literal
+	Source       logger.Source
+	IsCopyLoader bool // If you set the loader to "copy" (see https://github.com/evanw/esbuild/issues/3041)
+}
+
+type InjectableExport struct {
+	Alias string
+	Loc   logger.Loc
+}
+
+var filterMutex sync.Mutex
+var filterCache map[string]*regexp.Regexp
+
+func compileFilter(filter string) (result *regexp.Regexp) {
+	if filter == "" {
+		// Must provide a filter
+		return nil
+	}
+	ok := false
+
+	// Cache hit?
+	(func() {
+		filterMutex.Lock()
+		defer filterMutex.Unlock()
+		if filterCache != nil {
+			result, ok = filterCache[filter]
+		}
+	})()
+	if ok {
+		return
+	}
+
+	// Cache miss
+	result, err := regexp.Compile(filter)
+	if err != nil {
+		return nil
+	}
+
+	// Cache for next time
+	filterMutex.Lock()
+	defer filterMutex.Unlock()
+	if filterCache == nil {
+		filterCache = make(map[string]*regexp.Regexp)
+	}
+	filterCache[filter] = result
+	return
+}
+
+func CompileFilterForPlugin(pluginName string, kind string, filter string) (*regexp.Regexp, error) {
+	if filter == "" {
+		return nil, fmt.Errorf("[%s] %q is missing a filter", pluginName, kind)
+	}
+
+	result := compileFilter(filter)
+	if result == nil {
+		return nil, fmt.Errorf("[%s] %q filter is not a valid Go regular expression: %q", pluginName, kind, filter)
+	}
+
+	return result, nil
+}
+
+func PluginAppliesToPath(path logger.Path, filter *regexp.Regexp, namespace string) bool {
+	return (namespace == "" || path.Namespace == namespace) && filter.MatchString(path.Text)
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Plugin API
+
+type Plugin struct {
+	Name      string
+	OnStart   []OnStart
+	OnResolve []OnResolve
+	OnLoad    []OnLoad
+}
+
+type OnStart struct {
+	Callback func() OnStartResult
+	Name     string
+}
+
+type OnStartResult struct {
+	ThrownError error
+	Msgs        []logger.Msg
+}
+
+type OnResolve struct {
+	Filter    *regexp.Regexp
+	Callback  func(OnResolveArgs) OnResolveResult
+	Name      string
+	Namespace string
+}
+
+type OnResolveArgs struct {
+	Path       string
+	ResolveDir string
+	PluginData interface{}
+	Importer   logger.Path
+	Kind       ast.ImportKind
+	With       logger.ImportAttributes
+}
+
+type OnResolveResult struct {
+	PluginName string
+
+	Msgs        []logger.Msg
+	ThrownError error
+
+	AbsWatchFiles []string
+	AbsWatchDirs  []string
+
+	PluginData       interface{}
+	Path             logger.Path
+	External         bool
+	IsSideEffectFree bool
+}
+
+type OnLoad struct {
+	Filter    *regexp.Regexp
+	Callback  func(OnLoadArgs) OnLoadResult
+	Name      string
+	Namespace string
+}
+
+type OnLoadArgs struct {
+	PluginData interface{}
+	Path       logger.Path
+}
+
+type OnLoadResult struct {
+	PluginName string
+
+	Contents      *string
+	AbsResolveDir string
+	PluginData    interface{}
+
+	Msgs        []logger.Msg
+	ThrownError error
+
+	AbsWatchFiles []string
+	AbsWatchDirs  []string
+
+	Loader Loader
+}
+
+func PrettyPrintTargetEnvironment(originalTargetEnv string, unsupportedJSFeatureOverridesMask compat.JSFeature) (where string) {
+	where = "the configured target environment"
+	overrides := ""
+	if unsupportedJSFeatureOverridesMask != 0 {
+		count := 0
+		mask := unsupportedJSFeatureOverridesMask
+		for mask != 0 {
+			if (mask & 1) != 0 {
+				count++
+			}
+			mask >>= 1
+		}
+		s := "s"
+		if count == 1 {
+			s = ""
+		}
+		overrides = fmt.Sprintf(" + %d override%s", count, s)
+	}
+	if originalTargetEnv != "" {
+		where = fmt.Sprintf("%s (%s%s)", where, originalTargetEnv, overrides)
+	}
+	return
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/config/globals.go b/source/vendor/github.com/evanw/esbuild/internal/config/globals.go
new file mode 100644
index 0000000..4a77c02
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/config/globals.go
@@ -0,0 +1,1014 @@
+package config
+
+import (
+	"math"
+	"strings"
+	"sync"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+)
+
+var processedGlobalsMutex sync.Mutex
+var processedGlobals *ProcessedDefines
+
+// If something is in this list, then a direct identifier expression or property
+// access chain matching this will be assumed to have no side effects and will
+// be removed.
+//
+// This also means code is allowed to be reordered past things in this list. For
+// example, if "console.log" is in this list, permitting reordering allows for
+// "if (a) console.log(b); else console.log(c)" to be reordered and transformed
+// into "console.log(a ? b : c)". Notice that "a" and "console.log" are in a
+// different order, which can only happen if evaluating the "console.log"
+// property access can be assumed to not change the value of "a".
+//
+// Note that membership in this list says nothing about whether calling any of
+// these functions has any side effects. It only says something about
+// referencing these function without calling them.
+var knownGlobals = [][]string{
+	// These global identifiers should exist in all JavaScript environments. This
+	// deliberately omits "NaN", "Infinity", and "undefined" because these are
+	// treated as automatically-inlined constants instead of identifiers.
+	{"Array"},
+	{"Boolean"},
+	{"Function"},
+	{"Math"},
+	{"Number"},
+	{"Object"},
+	{"RegExp"},
+	{"String"},
+
+	// Object: Static methods
+	// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object#Static_methods
+	{"Object", "assign"},
+	{"Object", "create"},
+	{"Object", "defineProperties"},
+	{"Object", "defineProperty"},
+	{"Object", "entries"},
+	{"Object", "freeze"},
+	{"Object", "fromEntries"},
+	{"Object", "getOwnPropertyDescriptor"},
+	{"Object", "getOwnPropertyDescriptors"},
+	{"Object", "getOwnPropertyNames"},
+	{"Object", "getOwnPropertySymbols"},
+	{"Object", "getPrototypeOf"},
+	{"Object", "is"},
+	{"Object", "isExtensible"},
+	{"Object", "isFrozen"},
+	{"Object", "isSealed"},
+	{"Object", "keys"},
+	{"Object", "preventExtensions"},
+	{"Object", "seal"},
+	{"Object", "setPrototypeOf"},
+	{"Object", "values"},
+
+	// Object: Instance methods
+	// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object#Instance_methods
+	{"Object", "prototype", "__defineGetter__"},
+	{"Object", "prototype", "__defineSetter__"},
+	{"Object", "prototype", "__lookupGetter__"},
+	{"Object", "prototype", "__lookupSetter__"},
+	{"Object", "prototype", "hasOwnProperty"},
+	{"Object", "prototype", "isPrototypeOf"},
+	{"Object", "prototype", "propertyIsEnumerable"},
+	{"Object", "prototype", "toLocaleString"},
+	{"Object", "prototype", "toString"},
+	{"Object", "prototype", "unwatch"},
+	{"Object", "prototype", "valueOf"},
+	{"Object", "prototype", "watch"},
+
+	// Symbol: Static properties
+	// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Symbol#static_properties
+	{"Symbol", "asyncDispose"},
+	{"Symbol", "asyncIterator"},
+	{"Symbol", "dispose"},
+	{"Symbol", "hasInstance"},
+	{"Symbol", "isConcatSpreadable"},
+	{"Symbol", "iterator"},
+	{"Symbol", "match"},
+	{"Symbol", "matchAll"},
+	{"Symbol", "replace"},
+	{"Symbol", "search"},
+	{"Symbol", "species"},
+	{"Symbol", "split"},
+	{"Symbol", "toPrimitive"},
+	{"Symbol", "toStringTag"},
+	{"Symbol", "unscopables"},
+
+	// Math: Static properties
+	// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math#Static_properties
+	{"Math", "E"},
+	{"Math", "LN10"},
+	{"Math", "LN2"},
+	{"Math", "LOG10E"},
+	{"Math", "LOG2E"},
+	{"Math", "PI"},
+	{"Math", "SQRT1_2"},
+	{"Math", "SQRT2"},
+
+	// Math: Static methods
+	// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Math#Static_methods
+	{"Math", "abs"},
+	{"Math", "acos"},
+	{"Math", "acosh"},
+	{"Math", "asin"},
+	{"Math", "asinh"},
+	{"Math", "atan"},
+	{"Math", "atan2"},
+	{"Math", "atanh"},
+	{"Math", "cbrt"},
+	{"Math", "ceil"},
+	{"Math", "clz32"},
+	{"Math", "cos"},
+	{"Math", "cosh"},
+	{"Math", "exp"},
+	{"Math", "expm1"},
+	{"Math", "floor"},
+	{"Math", "fround"},
+	{"Math", "hypot"},
+	{"Math", "imul"},
+	{"Math", "log"},
+	{"Math", "log10"},
+	{"Math", "log1p"},
+	{"Math", "log2"},
+	{"Math", "max"},
+	{"Math", "min"},
+	{"Math", "pow"},
+	{"Math", "random"},
+	{"Math", "round"},
+	{"Math", "sign"},
+	{"Math", "sin"},
+	{"Math", "sinh"},
+	{"Math", "sqrt"},
+	{"Math", "tan"},
+	{"Math", "tanh"},
+	{"Math", "trunc"},
+
+	// Reflect: Static methods
+	// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Reflect#static_methods
+	{"Reflect", "apply"},
+	{"Reflect", "construct"},
+	{"Reflect", "defineProperty"},
+	{"Reflect", "deleteProperty"},
+	{"Reflect", "get"},
+	{"Reflect", "getOwnPropertyDescriptor"},
+	{"Reflect", "getPrototypeOf"},
+	{"Reflect", "has"},
+	{"Reflect", "isExtensible"},
+	{"Reflect", "ownKeys"},
+	{"Reflect", "preventExtensions"},
+	{"Reflect", "set"},
+	{"Reflect", "setPrototypeOf"},
+
+	// JSON: Static Methods
+	// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/JSON#static_methods
+	{"JSON", "parse"},
+	{"JSON", "stringify"},
+
+	// Other globals present in both the browser and node (except "eval" because
+	// it has special behavior)
+	{"AbortController"},
+	{"AbortSignal"},
+	{"AggregateError"},
+	{"ArrayBuffer"},
+	{"BigInt"},
+	{"DataView"},
+	{"Date"},
+	{"Error"},
+	{"EvalError"},
+	{"Event"},
+	{"EventTarget"},
+	{"Float32Array"},
+	{"Float64Array"},
+	{"Int16Array"},
+	{"Int32Array"},
+	{"Int8Array"},
+	{"Intl"},
+	{"JSON"},
+	{"Map"},
+	{"MessageChannel"},
+	{"MessageEvent"},
+	{"MessagePort"},
+	{"Promise"},
+	{"Proxy"},
+	{"RangeError"},
+	{"ReferenceError"},
+	{"Reflect"},
+	{"Set"},
+	{"Symbol"},
+	{"SyntaxError"},
+	{"TextDecoder"},
+	{"TextEncoder"},
+	{"TypeError"},
+	{"URIError"},
+	{"URL"},
+	{"URLSearchParams"},
+	{"Uint16Array"},
+	{"Uint32Array"},
+	{"Uint8Array"},
+	{"Uint8ClampedArray"},
+	{"WeakMap"},
+	{"WeakSet"},
+	{"WebAssembly"},
+	{"clearInterval"},
+	{"clearTimeout"},
+	{"console"},
+	{"decodeURI"},
+	{"decodeURIComponent"},
+	{"encodeURI"},
+	{"encodeURIComponent"},
+	{"escape"},
+	{"globalThis"},
+	{"isFinite"},
+	{"isNaN"},
+	{"parseFloat"},
+	{"parseInt"},
+	{"queueMicrotask"},
+	{"setInterval"},
+	{"setTimeout"},
+	{"unescape"},
+
+	// Console method references are assumed to have no side effects
+	// https://developer.mozilla.org/en-US/docs/Web/API/console
+	{"console", "assert"},
+	{"console", "clear"},
+	{"console", "count"},
+	{"console", "countReset"},
+	{"console", "debug"},
+	{"console", "dir"},
+	{"console", "dirxml"},
+	{"console", "error"},
+	{"console", "group"},
+	{"console", "groupCollapsed"},
+	{"console", "groupEnd"},
+	{"console", "info"},
+	{"console", "log"},
+	{"console", "table"},
+	{"console", "time"},
+	{"console", "timeEnd"},
+	{"console", "timeLog"},
+	{"console", "trace"},
+	{"console", "warn"},
+
+	// CSSOM APIs
+	{"CSSAnimation"},
+	{"CSSFontFaceRule"},
+	{"CSSImportRule"},
+	{"CSSKeyframeRule"},
+	{"CSSKeyframesRule"},
+	{"CSSMediaRule"},
+	{"CSSNamespaceRule"},
+	{"CSSPageRule"},
+	{"CSSRule"},
+	{"CSSRuleList"},
+	{"CSSStyleDeclaration"},
+	{"CSSStyleRule"},
+	{"CSSStyleSheet"},
+	{"CSSSupportsRule"},
+	{"CSSTransition"},
+
+	// SVG DOM
+	{"SVGAElement"},
+	{"SVGAngle"},
+	{"SVGAnimateElement"},
+	{"SVGAnimateMotionElement"},
+	{"SVGAnimateTransformElement"},
+	{"SVGAnimatedAngle"},
+	{"SVGAnimatedBoolean"},
+	{"SVGAnimatedEnumeration"},
+	{"SVGAnimatedInteger"},
+	{"SVGAnimatedLength"},
+	{"SVGAnimatedLengthList"},
+	{"SVGAnimatedNumber"},
+	{"SVGAnimatedNumberList"},
+	{"SVGAnimatedPreserveAspectRatio"},
+	{"SVGAnimatedRect"},
+	{"SVGAnimatedString"},
+	{"SVGAnimatedTransformList"},
+	{"SVGAnimationElement"},
+	{"SVGCircleElement"},
+	{"SVGClipPathElement"},
+	{"SVGComponentTransferFunctionElement"},
+	{"SVGDefsElement"},
+	{"SVGDescElement"},
+	{"SVGElement"},
+	{"SVGEllipseElement"},
+	{"SVGFEBlendElement"},
+	{"SVGFEColorMatrixElement"},
+	{"SVGFEComponentTransferElement"},
+	{"SVGFECompositeElement"},
+	{"SVGFEConvolveMatrixElement"},
+	{"SVGFEDiffuseLightingElement"},
+	{"SVGFEDisplacementMapElement"},
+	{"SVGFEDistantLightElement"},
+	{"SVGFEDropShadowElement"},
+	{"SVGFEFloodElement"},
+	{"SVGFEFuncAElement"},
+	{"SVGFEFuncBElement"},
+	{"SVGFEFuncGElement"},
+	{"SVGFEFuncRElement"},
+	{"SVGFEGaussianBlurElement"},
+	{"SVGFEImageElement"},
+	{"SVGFEMergeElement"},
+	{"SVGFEMergeNodeElement"},
+	{"SVGFEMorphologyElement"},
+	{"SVGFEOffsetElement"},
+	{"SVGFEPointLightElement"},
+	{"SVGFESpecularLightingElement"},
+	{"SVGFESpotLightElement"},
+	{"SVGFETileElement"},
+	{"SVGFETurbulenceElement"},
+	{"SVGFilterElement"},
+	{"SVGForeignObjectElement"},
+	{"SVGGElement"},
+	{"SVGGeometryElement"},
+	{"SVGGradientElement"},
+	{"SVGGraphicsElement"},
+	{"SVGImageElement"},
+	{"SVGLength"},
+	{"SVGLengthList"},
+	{"SVGLineElement"},
+	{"SVGLinearGradientElement"},
+	{"SVGMPathElement"},
+	{"SVGMarkerElement"},
+	{"SVGMaskElement"},
+	{"SVGMatrix"},
+	{"SVGMetadataElement"},
+	{"SVGNumber"},
+	{"SVGNumberList"},
+	{"SVGPathElement"},
+	{"SVGPatternElement"},
+	{"SVGPoint"},
+	{"SVGPointList"},
+	{"SVGPolygonElement"},
+	{"SVGPolylineElement"},
+	{"SVGPreserveAspectRatio"},
+	{"SVGRadialGradientElement"},
+	{"SVGRect"},
+	{"SVGRectElement"},
+	{"SVGSVGElement"},
+	{"SVGScriptElement"},
+	{"SVGSetElement"},
+	{"SVGStopElement"},
+	{"SVGStringList"},
+	{"SVGStyleElement"},
+	{"SVGSwitchElement"},
+	{"SVGSymbolElement"},
+	{"SVGTSpanElement"},
+	{"SVGTextContentElement"},
+	{"SVGTextElement"},
+	{"SVGTextPathElement"},
+	{"SVGTextPositioningElement"},
+	{"SVGTitleElement"},
+	{"SVGTransform"},
+	{"SVGTransformList"},
+	{"SVGUnitTypes"},
+	{"SVGUseElement"},
+	{"SVGViewElement"},
+
+	// Other browser APIs
+	//
+	// This list contains all globals present in modern versions of Chrome, Safari,
+	// and Firefox except for the following properties, since they have a side effect
+	// of triggering layout (https://gist.github.com/paulirish/5d52fb081b3570c81e3a):
+	//
+	//   - scrollX
+	//   - scrollY
+	//   - innerWidth
+	//   - innerHeight
+	//   - pageXOffset
+	//   - pageYOffset
+	//
+	// The following globals have also been removed since they sometimes throw an
+	// exception when accessed, which is a side effect (for more information see
+	// https://stackoverflow.com/a/33047477):
+	//
+	//   - localStorage
+	//   - sessionStorage
+	//
+	{"AnalyserNode"},
+	{"Animation"},
+	{"AnimationEffect"},
+	{"AnimationEvent"},
+	{"AnimationPlaybackEvent"},
+	{"AnimationTimeline"},
+	{"Attr"},
+	{"Audio"},
+	{"AudioBuffer"},
+	{"AudioBufferSourceNode"},
+	{"AudioDestinationNode"},
+	{"AudioListener"},
+	{"AudioNode"},
+	{"AudioParam"},
+	{"AudioProcessingEvent"},
+	{"AudioScheduledSourceNode"},
+	{"BarProp"},
+	{"BeforeUnloadEvent"},
+	{"BiquadFilterNode"},
+	{"Blob"},
+	{"BlobEvent"},
+	{"ByteLengthQueuingStrategy"},
+	{"CDATASection"},
+	{"CSS"},
+	{"CanvasGradient"},
+	{"CanvasPattern"},
+	{"CanvasRenderingContext2D"},
+	{"ChannelMergerNode"},
+	{"ChannelSplitterNode"},
+	{"CharacterData"},
+	{"ClipboardEvent"},
+	{"CloseEvent"},
+	{"Comment"},
+	{"CompositionEvent"},
+	{"ConvolverNode"},
+	{"CountQueuingStrategy"},
+	{"Crypto"},
+	{"CustomElementRegistry"},
+	{"CustomEvent"},
+	{"DOMException"},
+	{"DOMImplementation"},
+	{"DOMMatrix"},
+	{"DOMMatrixReadOnly"},
+	{"DOMParser"},
+	{"DOMPoint"},
+	{"DOMPointReadOnly"},
+	{"DOMQuad"},
+	{"DOMRect"},
+	{"DOMRectList"},
+	{"DOMRectReadOnly"},
+	{"DOMStringList"},
+	{"DOMStringMap"},
+	{"DOMTokenList"},
+	{"DataTransfer"},
+	{"DataTransferItem"},
+	{"DataTransferItemList"},
+	{"DelayNode"},
+	{"Document"},
+	{"DocumentFragment"},
+	{"DocumentTimeline"},
+	{"DocumentType"},
+	{"DragEvent"},
+	{"DynamicsCompressorNode"},
+	{"Element"},
+	{"ErrorEvent"},
+	{"EventSource"},
+	{"File"},
+	{"FileList"},
+	{"FileReader"},
+	{"FocusEvent"},
+	{"FontFace"},
+	{"FormData"},
+	{"GainNode"},
+	{"Gamepad"},
+	{"GamepadButton"},
+	{"GamepadEvent"},
+	{"Geolocation"},
+	{"GeolocationPositionError"},
+	{"HTMLAllCollection"},
+	{"HTMLAnchorElement"},
+	{"HTMLAreaElement"},
+	{"HTMLAudioElement"},
+	{"HTMLBRElement"},
+	{"HTMLBaseElement"},
+	{"HTMLBodyElement"},
+	{"HTMLButtonElement"},
+	{"HTMLCanvasElement"},
+	{"HTMLCollection"},
+	{"HTMLDListElement"},
+	{"HTMLDataElement"},
+	{"HTMLDataListElement"},
+	{"HTMLDetailsElement"},
+	{"HTMLDirectoryElement"},
+	{"HTMLDivElement"},
+	{"HTMLDocument"},
+	{"HTMLElement"},
+	{"HTMLEmbedElement"},
+	{"HTMLFieldSetElement"},
+	{"HTMLFontElement"},
+	{"HTMLFormControlsCollection"},
+	{"HTMLFormElement"},
+	{"HTMLFrameElement"},
+	{"HTMLFrameSetElement"},
+	{"HTMLHRElement"},
+	{"HTMLHeadElement"},
+	{"HTMLHeadingElement"},
+	{"HTMLHtmlElement"},
+	{"HTMLIFrameElement"},
+	{"HTMLImageElement"},
+	{"HTMLInputElement"},
+	{"HTMLLIElement"},
+	{"HTMLLabelElement"},
+	{"HTMLLegendElement"},
+	{"HTMLLinkElement"},
+	{"HTMLMapElement"},
+	{"HTMLMarqueeElement"},
+	{"HTMLMediaElement"},
+	{"HTMLMenuElement"},
+	{"HTMLMetaElement"},
+	{"HTMLMeterElement"},
+	{"HTMLModElement"},
+	{"HTMLOListElement"},
+	{"HTMLObjectElement"},
+	{"HTMLOptGroupElement"},
+	{"HTMLOptionElement"},
+	{"HTMLOptionsCollection"},
+	{"HTMLOutputElement"},
+	{"HTMLParagraphElement"},
+	{"HTMLParamElement"},
+	{"HTMLPictureElement"},
+	{"HTMLPreElement"},
+	{"HTMLProgressElement"},
+	{"HTMLQuoteElement"},
+	{"HTMLScriptElement"},
+	{"HTMLSelectElement"},
+	{"HTMLSlotElement"},
+	{"HTMLSourceElement"},
+	{"HTMLSpanElement"},
+	{"HTMLStyleElement"},
+	{"HTMLTableCaptionElement"},
+	{"HTMLTableCellElement"},
+	{"HTMLTableColElement"},
+	{"HTMLTableElement"},
+	{"HTMLTableRowElement"},
+	{"HTMLTableSectionElement"},
+	{"HTMLTemplateElement"},
+	{"HTMLTextAreaElement"},
+	{"HTMLTimeElement"},
+	{"HTMLTitleElement"},
+	{"HTMLTrackElement"},
+	{"HTMLUListElement"},
+	{"HTMLUnknownElement"},
+	{"HTMLVideoElement"},
+	{"HashChangeEvent"},
+	{"Headers"},
+	{"History"},
+	{"IDBCursor"},
+	{"IDBCursorWithValue"},
+	{"IDBDatabase"},
+	{"IDBFactory"},
+	{"IDBIndex"},
+	{"IDBKeyRange"},
+	{"IDBObjectStore"},
+	{"IDBOpenDBRequest"},
+	{"IDBRequest"},
+	{"IDBTransaction"},
+	{"IDBVersionChangeEvent"},
+	{"Image"},
+	{"ImageData"},
+	{"InputEvent"},
+	{"IntersectionObserver"},
+	{"IntersectionObserverEntry"},
+	{"KeyboardEvent"},
+	{"KeyframeEffect"},
+	{"Location"},
+	{"MediaCapabilities"},
+	{"MediaElementAudioSourceNode"},
+	{"MediaEncryptedEvent"},
+	{"MediaError"},
+	{"MediaList"},
+	{"MediaQueryList"},
+	{"MediaQueryListEvent"},
+	{"MediaRecorder"},
+	{"MediaSource"},
+	{"MediaStream"},
+	{"MediaStreamAudioDestinationNode"},
+	{"MediaStreamAudioSourceNode"},
+	{"MediaStreamTrack"},
+	{"MediaStreamTrackEvent"},
+	{"MimeType"},
+	{"MimeTypeArray"},
+	{"MouseEvent"},
+	{"MutationEvent"},
+	{"MutationObserver"},
+	{"MutationRecord"},
+	{"NamedNodeMap"},
+	{"Navigator"},
+	{"Node"},
+	{"NodeFilter"},
+	{"NodeIterator"},
+	{"NodeList"},
+	{"Notification"},
+	{"OfflineAudioCompletionEvent"},
+	{"Option"},
+	{"OscillatorNode"},
+	{"PageTransitionEvent"},
+	{"Path2D"},
+	{"Performance"},
+	{"PerformanceEntry"},
+	{"PerformanceMark"},
+	{"PerformanceMeasure"},
+	{"PerformanceNavigation"},
+	{"PerformanceObserver"},
+	{"PerformanceObserverEntryList"},
+	{"PerformanceResourceTiming"},
+	{"PerformanceTiming"},
+	{"PeriodicWave"},
+	{"Plugin"},
+	{"PluginArray"},
+	{"PointerEvent"},
+	{"PopStateEvent"},
+	{"ProcessingInstruction"},
+	{"ProgressEvent"},
+	{"PromiseRejectionEvent"},
+	{"RTCCertificate"},
+	{"RTCDTMFSender"},
+	{"RTCDTMFToneChangeEvent"},
+	{"RTCDataChannel"},
+	{"RTCDataChannelEvent"},
+	{"RTCIceCandidate"},
+	{"RTCPeerConnection"},
+	{"RTCPeerConnectionIceEvent"},
+	{"RTCRtpReceiver"},
+	{"RTCRtpSender"},
+	{"RTCRtpTransceiver"},
+	{"RTCSessionDescription"},
+	{"RTCStatsReport"},
+	{"RTCTrackEvent"},
+	{"RadioNodeList"},
+	{"Range"},
+	{"ReadableStream"},
+	{"Request"},
+	{"ResizeObserver"},
+	{"ResizeObserverEntry"},
+	{"Response"},
+	{"Screen"},
+	{"ScriptProcessorNode"},
+	{"SecurityPolicyViolationEvent"},
+	{"Selection"},
+	{"ShadowRoot"},
+	{"SourceBuffer"},
+	{"SourceBufferList"},
+	{"SpeechSynthesisEvent"},
+	{"SpeechSynthesisUtterance"},
+	{"StaticRange"},
+	{"Storage"},
+	{"StorageEvent"},
+	{"StyleSheet"},
+	{"StyleSheetList"},
+	{"Text"},
+	{"TextMetrics"},
+	{"TextTrack"},
+	{"TextTrackCue"},
+	{"TextTrackCueList"},
+	{"TextTrackList"},
+	{"TimeRanges"},
+	{"TrackEvent"},
+	{"TransitionEvent"},
+	{"TreeWalker"},
+	{"UIEvent"},
+	{"VTTCue"},
+	{"ValidityState"},
+	{"VisualViewport"},
+	{"WaveShaperNode"},
+	{"WebGLActiveInfo"},
+	{"WebGLBuffer"},
+	{"WebGLContextEvent"},
+	{"WebGLFramebuffer"},
+	{"WebGLProgram"},
+	{"WebGLQuery"},
+	{"WebGLRenderbuffer"},
+	{"WebGLRenderingContext"},
+	{"WebGLSampler"},
+	{"WebGLShader"},
+	{"WebGLShaderPrecisionFormat"},
+	{"WebGLSync"},
+	{"WebGLTexture"},
+	{"WebGLUniformLocation"},
+	{"WebKitCSSMatrix"},
+	{"WebSocket"},
+	{"WheelEvent"},
+	{"Window"},
+	{"Worker"},
+	{"XMLDocument"},
+	{"XMLHttpRequest"},
+	{"XMLHttpRequestEventTarget"},
+	{"XMLHttpRequestUpload"},
+	{"XMLSerializer"},
+	{"XPathEvaluator"},
+	{"XPathExpression"},
+	{"XPathResult"},
+	{"XSLTProcessor"},
+	{"alert"},
+	{"atob"},
+	{"blur"},
+	{"btoa"},
+	{"cancelAnimationFrame"},
+	{"captureEvents"},
+	{"close"},
+	{"closed"},
+	{"confirm"},
+	{"customElements"},
+	{"devicePixelRatio"},
+	{"document"},
+	{"event"},
+	{"fetch"},
+	{"find"},
+	{"focus"},
+	{"frameElement"},
+	{"frames"},
+	{"getComputedStyle"},
+	{"getSelection"},
+	{"history"},
+	{"indexedDB"},
+	{"isSecureContext"},
+	{"length"},
+	{"location"},
+	{"locationbar"},
+	{"matchMedia"},
+	{"menubar"},
+	{"moveBy"},
+	{"moveTo"},
+	{"name"},
+	{"navigator"},
+	{"onabort"},
+	{"onafterprint"},
+	{"onanimationend"},
+	{"onanimationiteration"},
+	{"onanimationstart"},
+	{"onbeforeprint"},
+	{"onbeforeunload"},
+	{"onblur"},
+	{"oncanplay"},
+	{"oncanplaythrough"},
+	{"onchange"},
+	{"onclick"},
+	{"oncontextmenu"},
+	{"oncuechange"},
+	{"ondblclick"},
+	{"ondrag"},
+	{"ondragend"},
+	{"ondragenter"},
+	{"ondragleave"},
+	{"ondragover"},
+	{"ondragstart"},
+	{"ondrop"},
+	{"ondurationchange"},
+	{"onemptied"},
+	{"onended"},
+	{"onerror"},
+	{"onfocus"},
+	{"ongotpointercapture"},
+	{"onhashchange"},
+	{"oninput"},
+	{"oninvalid"},
+	{"onkeydown"},
+	{"onkeypress"},
+	{"onkeyup"},
+	{"onlanguagechange"},
+	{"onload"},
+	{"onloadeddata"},
+	{"onloadedmetadata"},
+	{"onloadstart"},
+	{"onlostpointercapture"},
+	{"onmessage"},
+	{"onmousedown"},
+	{"onmouseenter"},
+	{"onmouseleave"},
+	{"onmousemove"},
+	{"onmouseout"},
+	{"onmouseover"},
+	{"onmouseup"},
+	{"onoffline"},
+	{"ononline"},
+	{"onpagehide"},
+	{"onpageshow"},
+	{"onpause"},
+	{"onplay"},
+	{"onplaying"},
+	{"onpointercancel"},
+	{"onpointerdown"},
+	{"onpointerenter"},
+	{"onpointerleave"},
+	{"onpointermove"},
+	{"onpointerout"},
+	{"onpointerover"},
+	{"onpointerup"},
+	{"onpopstate"},
+	{"onprogress"},
+	{"onratechange"},
+	{"onrejectionhandled"},
+	{"onreset"},
+	{"onresize"},
+	{"onscroll"},
+	{"onseeked"},
+	{"onseeking"},
+	{"onselect"},
+	{"onstalled"},
+	{"onstorage"},
+	{"onsubmit"},
+	{"onsuspend"},
+	{"ontimeupdate"},
+	{"ontoggle"},
+	{"ontransitioncancel"},
+	{"ontransitionend"},
+	{"ontransitionrun"},
+	{"ontransitionstart"},
+	{"onunhandledrejection"},
+	{"onunload"},
+	{"onvolumechange"},
+	{"onwaiting"},
+	{"onwebkitanimationend"},
+	{"onwebkitanimationiteration"},
+	{"onwebkitanimationstart"},
+	{"onwebkittransitionend"},
+	{"onwheel"},
+	{"open"},
+	{"opener"},
+	{"origin"},
+	{"outerHeight"},
+	{"outerWidth"},
+	{"parent"},
+	{"performance"},
+	{"personalbar"},
+	{"postMessage"},
+	{"print"},
+	{"prompt"},
+	{"releaseEvents"},
+	{"requestAnimationFrame"},
+	{"resizeBy"},
+	{"resizeTo"},
+	{"screen"},
+	{"screenLeft"},
+	{"screenTop"},
+	{"screenX"},
+	{"screenY"},
+	{"scroll"},
+	{"scrollBy"},
+	{"scrollTo"},
+	{"scrollbars"},
+	{"self"},
+	{"speechSynthesis"},
+	{"status"},
+	{"statusbar"},
+	{"stop"},
+	{"toolbar"},
+	{"top"},
+	{"webkitURL"},
+	{"window"},
+}
+
+// We currently only support compile-time replacement with certain expressions:
+//
+//   - Primitive literals
+//   - Identifiers
+//   - "Entity names" which are identifiers followed by property accesses
+//
+// We don't support arbitrary expressions because arbitrary expressions may
+// require the full AST. For example, there could be "import()" or "require()"
+// expressions that need an import record. We also need to re-generate some
+// nodes such as identifiers within the injected context so that they can
+// bind to symbols in that context. Other expressions such as "this" may
+// also be contextual.
+type DefineExpr struct {
+	Constant            js_ast.E
+	Parts               []string
+	InjectedDefineIndex ast.Index32
+}
+
+type DefineData struct {
+	DefineExpr *DefineExpr
+	Flags      DefineFlags
+}
+
+type DefineFlags uint8
+
+const (
+	// True if accessing this value is known to not have any side effects. For
+	// example, a bare reference to "Object.create" can be removed because it
+	// does not have any observable side effects.
+	CanBeRemovedIfUnused DefineFlags = 1 << iota
+
+	// True if a call to this value is known to not have any side effects. For
+	// example, a bare call to "Object()" can be removed because it does not
+	// have any observable side effects.
+	CallCanBeUnwrappedIfUnused
+
+	// If true, the user has indicated that every direct calls to a property on
+	// this object and all of that call's arguments are to be removed from the
+	// output, even when the arguments have side effects. This is used to
+	// implement the "--drop:console" flag.
+	MethodCallsMustBeReplacedWithUndefined
+
+	// Symbol values are known to not have side effects when used as property
+	// names in class declarations and object literals.
+	IsSymbolInstance
+)
+
+func (flags DefineFlags) Has(flag DefineFlags) bool {
+	return (flags & flag) != 0
+}
+
+func mergeDefineData(old DefineData, new DefineData) DefineData {
+	new.Flags |= old.Flags
+	return new
+}
+
+type DotDefine struct {
+	Data  DefineData
+	Parts []string
+}
+
+type ProcessedDefines struct {
+	IdentifierDefines map[string]DefineData
+	DotDefines        map[string][]DotDefine
+}
+
+// This transformation is expensive, so we only want to do it once. Make sure
+// to only call processDefines() once per compilation. Unfortunately Golang
+// doesn't have an efficient way to copy a map and the overhead of copying
+// all of the properties into a new map once for every new parser noticeably
+// slows down our benchmarks.
+func ProcessDefines(userDefines map[string]DefineData) ProcessedDefines {
+	// Optimization: reuse known globals if there are no user-specified defines
+	hasUserDefines := len(userDefines) != 0
+	if !hasUserDefines {
+		processedGlobalsMutex.Lock()
+		if processedGlobals != nil {
+			defer processedGlobalsMutex.Unlock()
+			return *processedGlobals
+		}
+		processedGlobalsMutex.Unlock()
+	}
+
+	result := ProcessedDefines{
+		IdentifierDefines: make(map[string]DefineData),
+		DotDefines:        make(map[string][]DotDefine),
+	}
+
+	// Mark these property accesses as free of side effects. That means they can
+	// be removed if their result is unused. We can't just remove all unused
+	// property accesses since property accesses can have side effects. For
+	// example, the property access "a.b.c" has the side effect of throwing an
+	// exception if "a.b" is undefined.
+	for _, parts := range knownGlobals {
+		tail := parts[len(parts)-1]
+		if len(parts) == 1 {
+			result.IdentifierDefines[tail] = DefineData{Flags: CanBeRemovedIfUnused}
+		} else {
+			flags := CanBeRemovedIfUnused
+
+			// All properties on the "Symbol" global are currently symbol instances
+			// (i.e. "typeof Symbol.iterator === 'symbol'"). This is used to avoid
+			// treating properties with these names as having side effects.
+			if parts[0] == "Symbol" {
+				flags |= IsSymbolInstance
+			}
+
+			result.DotDefines[tail] = append(result.DotDefines[tail], DotDefine{Parts: parts, Data: DefineData{Flags: flags}})
+		}
+	}
+
+	// Swap in certain literal values because those can be constant folded
+	result.IdentifierDefines["undefined"] = DefineData{
+		DefineExpr: &DefineExpr{Constant: js_ast.EUndefinedShared},
+	}
+	result.IdentifierDefines["NaN"] = DefineData{
+		DefineExpr: &DefineExpr{Constant: &js_ast.ENumber{Value: math.NaN()}},
+	}
+	result.IdentifierDefines["Infinity"] = DefineData{
+		DefineExpr: &DefineExpr{Constant: &js_ast.ENumber{Value: math.Inf(1)}},
+	}
+
+	// Then copy the user-specified defines in afterwards, which will overwrite
+	// any known globals above.
+	for key, data := range userDefines {
+		parts := strings.Split(key, ".")
+
+		// Identifier defines are special-cased
+		if len(parts) == 1 {
+			result.IdentifierDefines[key] = mergeDefineData(result.IdentifierDefines[key], data)
+			continue
+		}
+
+		tail := parts[len(parts)-1]
+		dotDefines := result.DotDefines[tail]
+		found := false
+
+		// Try to merge with existing dot defines first
+		for i, define := range dotDefines {
+			if helpers.StringArraysEqual(parts, define.Parts) {
+				define := &dotDefines[i]
+				define.Data = mergeDefineData(define.Data, data)
+				found = true
+				break
+			}
+		}
+
+		if !found {
+			dotDefines = append(dotDefines, DotDefine{Parts: parts, Data: data})
+		}
+		result.DotDefines[tail] = dotDefines
+	}
+
+	// Potentially cache the result for next time
+	if !hasUserDefines {
+		processedGlobalsMutex.Lock()
+		defer processedGlobalsMutex.Unlock()
+		if processedGlobals == nil {
+			processedGlobals = &result
+		}
+	}
+	return result
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_ast/css_ast.go b/source/vendor/github.com/evanw/esbuild/internal/css_ast/css_ast.go
new file mode 100644
index 0000000..b7a1731
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_ast/css_ast.go
@@ -0,0 +1,1205 @@
+package css_ast
+
+import (
+	"strconv"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+// CSS syntax comes in two layers: a minimal syntax that generally accepts
+// anything that looks vaguely like CSS, and a large set of built-in rules
+// (the things browsers actually interpret). That way CSS parsers can read
+// unknown rules and skip over them without having to stop due to errors.
+//
+// This AST format is mostly just the minimal syntax. It parses unknown rules
+// into a tree with enough information that it can write them back out again.
+// There are some additional layers of syntax including selectors and @-rules
+// which allow for better pretty-printing and minification.
+//
+// Most of the AST just references ranges of the original file by keeping the
+// original "Token" values around from the lexer. This is a memory-efficient
+// representation that helps provide good parsing and printing performance.
+
+type AST struct {
+	Symbols              []ast.Symbol
+	CharFreq             *ast.CharFreq
+	ImportRecords        []ast.ImportRecord
+	Rules                []Rule
+	SourceMapComment     logger.Span
+	ApproximateLineCount int32
+	LocalSymbols         []ast.LocRef
+	LocalScope           map[string]ast.LocRef
+	GlobalScope          map[string]ast.LocRef
+	Composes             map[ast.Ref]*Composes
+
+	// These contain all layer names in the file. It can be used to replace the
+	// layer-related side effects of importing this file. They are split into two
+	// groups (those before and after "@import" rules) so that the linker can put
+	// them in the right places.
+	LayersPreImport  [][]string
+	LayersPostImport [][]string
+}
+
+type Composes struct {
+	// Note that each of these can be either local or global. Local examples:
+	//
+	//   .foo { composes: bar }
+	//   .bar { color: red }
+	//
+	// Global examples:
+	//
+	//   .foo { composes: bar from global }
+	//   .foo :global { composes: bar }
+	//   .foo { :global { composes: bar } }
+	//   :global .bar { color: red }
+	//
+	Names []ast.LocRef
+
+	// Each of these is local in another file. For example:
+	//
+	//   .foo { composes: bar from "bar.css" }
+	//   .foo { composes: bar from url(bar.css) }
+	//
+	ImportedNames []ImportedComposesName
+
+	// This tracks what CSS properties each class uses so that we can warn when
+	// "composes" is used incorrectly to compose two classes from separate files
+	// that declare the same CSS properties.
+	Properties map[string]logger.Loc
+}
+
+type ImportedComposesName struct {
+	Alias             string
+	AliasLoc          logger.Loc
+	ImportRecordIndex uint32
+}
+
+// We create a lot of tokens, so make sure this layout is memory-efficient.
+// The layout here isn't optimal because it biases for convenience (e.g.
+// "string" could be shorter) but at least the ordering of fields was
+// deliberately chosen to minimize size.
+type Token struct {
+	// Contains the child tokens for component values that are simple blocks.
+	// These are either "(", "{", "[", or function tokens. The closing token is
+	// implicit and is not stored.
+	Children *[]Token // 8 bytes
+
+	// This is the raw contents of the token most of the time. However, it
+	// contains the decoded string contents for "TString" tokens.
+	Text string // 16 bytes
+
+	// The source location at the start of the token
+	Loc logger.Loc // 4 bytes
+
+	// URL tokens have an associated import record at the top-level of the AST.
+	// This index points to that import record.
+	//
+	// Symbol tokens have an associated symbol. This index is the "InnerIndex"
+	// of the "Ref" for this symbol. The "SourceIndex" for the "Ref" is just
+	// the source index of the file for this AST.
+	PayloadIndex uint32 // 4 bytes
+
+	// The division between the number and the unit for "TDimension" tokens.
+	UnitOffset uint16 // 2 bytes
+
+	// This will never be "TWhitespace" because whitespace isn't stored as a
+	// token directly. Instead it is stored in "HasWhitespaceAfter" on the
+	// previous token. This is to make it easier to pattern-match against
+	// tokens when handling CSS rules, since whitespace almost always doesn't
+	// matter. That way you can pattern match against e.g. "rgb(r, g, b)" and
+	// not have to handle all possible combinations of embedded whitespace
+	// tokens.
+	//
+	// There is one exception to this: when in verbatim whitespace mode and
+	// the token list is non-empty and is only whitespace tokens. In that case
+	// a single whitespace token is emitted. This is because otherwise there
+	// would be no tokens to attach the whitespace before/after flags to.
+	Kind css_lexer.T // 1 byte
+
+	// These flags indicate the presence of a "TWhitespace" token before or after
+	// this token. There should be whitespace printed between two tokens if either
+	// token indicates that there should be whitespace. Note that whitespace may
+	// be altered by processing in certain situations (e.g. minification).
+	Whitespace WhitespaceFlags // 1 byte
+}
+
+type WhitespaceFlags uint8
+
+const (
+	WhitespaceBefore WhitespaceFlags = 1 << iota
+	WhitespaceAfter
+)
+
+// This is necessary when comparing tokens between two different files
+type CrossFileEqualityCheck struct {
+	ImportRecordsA []ast.ImportRecord
+	ImportRecordsB []ast.ImportRecord
+	Symbols        ast.SymbolMap
+	SourceIndexA   uint32
+	SourceIndexB   uint32
+}
+
+func (check *CrossFileEqualityCheck) RefsAreEquivalent(a ast.Ref, b ast.Ref) bool {
+	if a == b {
+		return true
+	}
+	if check == nil || check.Symbols.SymbolsForSource == nil {
+		return false
+	}
+	a = ast.FollowSymbols(check.Symbols, a)
+	b = ast.FollowSymbols(check.Symbols, b)
+	if a == b {
+		return true
+	}
+	symbolA := check.Symbols.Get(a)
+	symbolB := check.Symbols.Get(b)
+	return symbolA.Kind == ast.SymbolGlobalCSS && symbolB.Kind == ast.SymbolGlobalCSS && symbolA.OriginalName == symbolB.OriginalName
+}
+
+func (a Token) Equal(b Token, check *CrossFileEqualityCheck) bool {
+	if a.Kind == b.Kind && a.Text == b.Text && a.Whitespace == b.Whitespace {
+		// URLs should be compared based on the text of the associated import record
+		// (which is what will actually be printed) instead of the original text
+		if a.Kind == css_lexer.TURL {
+			if check == nil {
+				// If both tokens are in the same file, just compare the index
+				if a.PayloadIndex != b.PayloadIndex {
+					return false
+				}
+			} else {
+				// If the tokens come from separate files, compare the import records
+				// themselves instead of comparing the indices. This can happen when
+				// the linker runs a "DuplicateRuleRemover" during bundling. This
+				// doesn't compare the source indices because at this point during
+				// linking, paths inside the bundle (e.g. due to the "copy" loader)
+				// should have already been converted into text (e.g. the "unique key"
+				// string).
+				if check.ImportRecordsA[a.PayloadIndex].Path.Text !=
+					check.ImportRecordsB[b.PayloadIndex].Path.Text {
+					return false
+				}
+			}
+		}
+
+		// Symbols should be compared based on the symbol reference instead of the
+		// original text
+		if a.Kind == css_lexer.TSymbol {
+			if check == nil {
+				// If both tokens are in the same file, just compare the index
+				if a.PayloadIndex != b.PayloadIndex {
+					return false
+				}
+			} else {
+				// If the tokens come from separate files, compare the symbols themselves
+				refA := ast.Ref{SourceIndex: check.SourceIndexA, InnerIndex: a.PayloadIndex}
+				refB := ast.Ref{SourceIndex: check.SourceIndexB, InnerIndex: b.PayloadIndex}
+				if !check.RefsAreEquivalent(refA, refB) {
+					return false
+				}
+			}
+		}
+
+		if a.Children == nil && b.Children == nil {
+			return true
+		}
+
+		if a.Children != nil && b.Children != nil && TokensEqual(*a.Children, *b.Children, check) {
+			return true
+		}
+	}
+
+	return false
+}
+
+func TokensEqual(a []Token, b []Token, check *CrossFileEqualityCheck) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, ai := range a {
+		if !ai.Equal(b[i], check) {
+			return false
+		}
+	}
+	return true
+}
+
+func HashTokens(hash uint32, tokens []Token) uint32 {
+	hash = helpers.HashCombine(hash, uint32(len(tokens)))
+
+	for _, t := range tokens {
+		hash = helpers.HashCombine(hash, uint32(t.Kind))
+		if t.Kind != css_lexer.TURL {
+			hash = helpers.HashCombineString(hash, t.Text)
+		}
+		if t.Children != nil {
+			hash = HashTokens(hash, *t.Children)
+		}
+	}
+
+	return hash
+}
+
+func (a Token) EqualIgnoringWhitespace(b Token) bool {
+	if a.Kind == b.Kind && a.Text == b.Text && a.PayloadIndex == b.PayloadIndex {
+		if a.Children == nil && b.Children == nil {
+			return true
+		}
+
+		if a.Children != nil && b.Children != nil && TokensEqualIgnoringWhitespace(*a.Children, *b.Children) {
+			return true
+		}
+	}
+
+	return false
+}
+
+func TokensEqualIgnoringWhitespace(a []Token, b []Token) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, c := range a {
+		if !c.EqualIgnoringWhitespace(b[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+func TokensAreCommaSeparated(tokens []Token) bool {
+	if n := len(tokens); (n & 1) != 0 {
+		for i := 1; i < n; i += 2 {
+			if tokens[i].Kind != css_lexer.TComma {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+type PercentageFlags uint8
+
+const (
+	AllowPercentageBelow0 PercentageFlags = 1 << iota
+	AllowPercentageAbove100
+	AllowAnyPercentage = AllowPercentageBelow0 | AllowPercentageAbove100
+)
+
+func (t Token) NumberOrFractionForPercentage(percentReferenceRange float64, flags PercentageFlags) (float64, bool) {
+	switch t.Kind {
+	case css_lexer.TNumber:
+		if f, err := strconv.ParseFloat(t.Text, 64); err == nil {
+			return f, true
+		}
+
+	case css_lexer.TPercentage:
+		if f, err := strconv.ParseFloat(t.PercentageValue(), 64); err == nil {
+			if (flags&AllowPercentageBelow0) == 0 && f < 0 {
+				return 0, true
+			}
+			if (flags&AllowPercentageAbove100) == 0 && f > 100 {
+				return percentReferenceRange, true
+			}
+			return f / 100 * percentReferenceRange, true
+		}
+	}
+
+	return 0, false
+}
+
+func (t Token) ClampedFractionForPercentage() (float64, bool) {
+	if t.Kind == css_lexer.TPercentage {
+		if f, err := strconv.ParseFloat(t.PercentageValue(), 64); err == nil {
+			if f < 0 {
+				return 0, true
+			}
+			if f > 100 {
+				return 1, true
+			}
+			return f / 100, true
+		}
+	}
+
+	return 0, false
+}
+
+// https://drafts.csswg.org/css-values-3/#lengths
+// For zero lengths the unit identifier is optional
+// (i.e. can be syntactically represented as the <number> 0).
+func (t *Token) TurnLengthIntoNumberIfZero() bool {
+	if t.Kind == css_lexer.TDimension && t.DimensionValue() == "0" {
+		t.Kind = css_lexer.TNumber
+		t.Text = "0"
+		return true
+	}
+	return false
+}
+
+func (t *Token) TurnLengthOrPercentageIntoNumberIfZero() bool {
+	if t.Kind == css_lexer.TPercentage && t.PercentageValue() == "0" {
+		t.Kind = css_lexer.TNumber
+		t.Text = "0"
+		return true
+	}
+	return t.TurnLengthIntoNumberIfZero()
+}
+
+func (t Token) PercentageValue() string {
+	return t.Text[:len(t.Text)-1]
+}
+
+func (t Token) DimensionValue() string {
+	return t.Text[:t.UnitOffset]
+}
+
+func (t Token) DimensionUnit() string {
+	return t.Text[t.UnitOffset:]
+}
+
+func (t Token) DimensionUnitIsSafeLength() bool {
+	switch strings.ToLower(t.DimensionUnit()) {
+	// These units can be reasonably expected to be supported everywhere.
+	// Information used: https://developer.mozilla.org/en-US/docs/Web/CSS/length
+	case "cm", "em", "in", "mm", "pc", "pt", "px":
+		return true
+	}
+	return false
+}
+
+func (t Token) IsZero() bool {
+	return t.Kind == css_lexer.TNumber && t.Text == "0"
+}
+
+func (t Token) IsOne() bool {
+	return t.Kind == css_lexer.TNumber && t.Text == "1"
+}
+
+func (t Token) IsAngle() bool {
+	if t.Kind == css_lexer.TDimension {
+		unit := strings.ToLower(t.DimensionUnit())
+		return unit == "deg" || unit == "grad" || unit == "rad" || unit == "turn"
+	}
+	return false
+}
+
+func CloneTokensWithoutImportRecords(tokensIn []Token) (tokensOut []Token) {
+	for _, t := range tokensIn {
+		if t.Children != nil {
+			children := CloneTokensWithoutImportRecords(*t.Children)
+			t.Children = &children
+		}
+		tokensOut = append(tokensOut, t)
+	}
+	return
+}
+
+func CloneTokensWithImportRecords(
+	tokensIn []Token, importRecordsIn []ast.ImportRecord,
+	tokensOut []Token, importRecordsOut []ast.ImportRecord,
+) ([]Token, []ast.ImportRecord) {
+	// Preallocate the output array if we can
+	if tokensOut == nil {
+		tokensOut = make([]Token, 0, len(tokensIn))
+	}
+
+	for _, t := range tokensIn {
+		// Clear the source mapping if this token is being used in another file
+		t.Loc.Start = 0
+
+		// If this is a URL token, also clone the import record
+		if t.Kind == css_lexer.TURL {
+			importRecordIndex := uint32(len(importRecordsOut))
+			importRecordsOut = append(importRecordsOut, importRecordsIn[t.PayloadIndex])
+			t.PayloadIndex = importRecordIndex
+		}
+
+		// Also search for URL tokens in this token's children
+		if t.Children != nil {
+			var children []Token
+			children, importRecordsOut = CloneTokensWithImportRecords(*t.Children, importRecordsIn, children, importRecordsOut)
+			t.Children = &children
+		}
+
+		tokensOut = append(tokensOut, t)
+	}
+
+	return tokensOut, importRecordsOut
+}
+
+type Rule struct {
+	Data R
+	Loc  logger.Loc
+}
+
+type R interface {
+	Equal(rule R, check *CrossFileEqualityCheck) bool
+	Hash() (uint32, bool)
+}
+
+func RulesEqual(a []Rule, b []Rule, check *CrossFileEqualityCheck) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, ai := range a {
+		if !ai.Data.Equal(b[i].Data, check) {
+			return false
+		}
+	}
+	return true
+}
+
+func HashRules(hash uint32, rules []Rule) uint32 {
+	hash = helpers.HashCombine(hash, uint32(len(rules)))
+	for _, child := range rules {
+		if childHash, ok := child.Data.Hash(); ok {
+			hash = helpers.HashCombine(hash, childHash)
+		} else {
+			hash = helpers.HashCombine(hash, 0)
+		}
+	}
+	return hash
+}
+
+type RAtCharset struct {
+	Encoding string
+}
+
+func (a *RAtCharset) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	b, ok := rule.(*RAtCharset)
+	return ok && a.Encoding == b.Encoding
+}
+
+func (r *RAtCharset) Hash() (uint32, bool) {
+	hash := uint32(1)
+	hash = helpers.HashCombineString(hash, r.Encoding)
+	return hash, true
+}
+
+type ImportConditions struct {
+	// The syntax for "@import" has been extended with optional conditions that
+	// behave as if the imported file was wrapped in a "@layer", "@supports",
+	// and/or "@media" rule. The possible syntax combinations are as follows:
+	//
+	//   @import url(...);
+	//   @import url(...) layer;
+	//   @import url(...) layer(layer-name);
+	//   @import url(...) layer(layer-name) supports(supports-condition);
+	//   @import url(...) layer(layer-name) supports(supports-condition) list-of-media-queries;
+	//   @import url(...) layer(layer-name) list-of-media-queries;
+	//   @import url(...) supports(supports-condition);
+	//   @import url(...) supports(supports-condition) list-of-media-queries;
+	//   @import url(...) list-of-media-queries;
+	//
+	// From: https://developer.mozilla.org/en-US/docs/Web/CSS/@import#syntax
+	Media []Token
+
+	// These two fields will only ever have zero or one tokens. However, they are
+	// implemented as arrays for convenience because most of esbuild's helper
+	// functions that operate on tokens take arrays instead of individual tokens.
+	Layers   []Token
+	Supports []Token
+}
+
+func (c *ImportConditions) CloneWithImportRecords(importRecordsIn []ast.ImportRecord, importRecordsOut []ast.ImportRecord) (ImportConditions, []ast.ImportRecord) {
+	result := ImportConditions{}
+	result.Layers, importRecordsOut = CloneTokensWithImportRecords(c.Layers, importRecordsIn, nil, importRecordsOut)
+	result.Supports, importRecordsOut = CloneTokensWithImportRecords(c.Supports, importRecordsIn, nil, importRecordsOut)
+	result.Media, importRecordsOut = CloneTokensWithImportRecords(c.Media, importRecordsIn, nil, importRecordsOut)
+	return result, importRecordsOut
+}
+
+type RAtImport struct {
+	ImportConditions  *ImportConditions
+	ImportRecordIndex uint32
+}
+
+func (*RAtImport) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	return false
+}
+
+func (r *RAtImport) Hash() (uint32, bool) {
+	return 0, false
+}
+
+type RAtKeyframes struct {
+	AtToken       string
+	Name          ast.LocRef
+	Blocks        []KeyframeBlock
+	CloseBraceLoc logger.Loc
+}
+
+type KeyframeBlock struct {
+	Selectors     []string
+	Rules         []Rule
+	Loc           logger.Loc
+	CloseBraceLoc logger.Loc
+}
+
+func (a *RAtKeyframes) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	if b, ok := rule.(*RAtKeyframes); ok && strings.EqualFold(a.AtToken, b.AtToken) && check.RefsAreEquivalent(a.Name.Ref, b.Name.Ref) && len(a.Blocks) == len(b.Blocks) {
+		for i, ai := range a.Blocks {
+			bi := b.Blocks[i]
+			if len(ai.Selectors) != len(bi.Selectors) {
+				return false
+			}
+			for j, aj := range ai.Selectors {
+				if aj != bi.Selectors[j] {
+					return false
+				}
+			}
+			if !RulesEqual(ai.Rules, bi.Rules, check) {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+func (r *RAtKeyframes) Hash() (uint32, bool) {
+	hash := uint32(2)
+	hash = helpers.HashCombineString(hash, r.AtToken)
+	hash = helpers.HashCombine(hash, uint32(len(r.Blocks)))
+	for _, block := range r.Blocks {
+		hash = helpers.HashCombine(hash, uint32(len(block.Selectors)))
+		for _, sel := range block.Selectors {
+			hash = helpers.HashCombineString(hash, sel)
+		}
+		hash = HashRules(hash, block.Rules)
+	}
+	return hash, true
+}
+
+type RKnownAt struct {
+	AtToken       string
+	Prelude       []Token
+	Rules         []Rule
+	CloseBraceLoc logger.Loc
+}
+
+func (a *RKnownAt) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	b, ok := rule.(*RKnownAt)
+	return ok && strings.EqualFold(a.AtToken, b.AtToken) && TokensEqual(a.Prelude, b.Prelude, check) && RulesEqual(a.Rules, b.Rules, check)
+}
+
+func (r *RKnownAt) Hash() (uint32, bool) {
+	hash := uint32(3)
+	hash = helpers.HashCombineString(hash, r.AtToken)
+	hash = HashTokens(hash, r.Prelude)
+	hash = HashRules(hash, r.Rules)
+	return hash, true
+}
+
+type RUnknownAt struct {
+	AtToken string
+	Prelude []Token
+	Block   []Token
+}
+
+func (a *RUnknownAt) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	b, ok := rule.(*RUnknownAt)
+	return ok && strings.EqualFold(a.AtToken, b.AtToken) && TokensEqual(a.Prelude, b.Prelude, check) && TokensEqual(a.Block, b.Block, check)
+}
+
+func (r *RUnknownAt) Hash() (uint32, bool) {
+	hash := uint32(4)
+	hash = helpers.HashCombineString(hash, r.AtToken)
+	hash = HashTokens(hash, r.Prelude)
+	hash = HashTokens(hash, r.Block)
+	return hash, true
+}
+
+type RSelector struct {
+	Selectors     []ComplexSelector
+	Rules         []Rule
+	CloseBraceLoc logger.Loc
+}
+
+func (a *RSelector) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	b, ok := rule.(*RSelector)
+	return ok && ComplexSelectorsEqual(a.Selectors, b.Selectors, check) && RulesEqual(a.Rules, b.Rules, check)
+}
+
+func (r *RSelector) Hash() (uint32, bool) {
+	hash := uint32(5)
+	hash = helpers.HashCombine(hash, uint32(len(r.Selectors)))
+	hash = HashComplexSelectors(hash, r.Selectors)
+	hash = HashRules(hash, r.Rules)
+	return hash, true
+}
+
+type RQualified struct {
+	Prelude       []Token
+	Rules         []Rule
+	CloseBraceLoc logger.Loc
+}
+
+func (a *RQualified) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	b, ok := rule.(*RQualified)
+	return ok && TokensEqual(a.Prelude, b.Prelude, check) && RulesEqual(a.Rules, b.Rules, check)
+}
+
+func (r *RQualified) Hash() (uint32, bool) {
+	hash := uint32(6)
+	hash = HashTokens(hash, r.Prelude)
+	hash = HashRules(hash, r.Rules)
+	return hash, true
+}
+
+type RDeclaration struct {
+	KeyText   string
+	Value     []Token
+	KeyRange  logger.Range
+	Key       D // Compare using this instead of "Key" for speed
+	Important bool
+}
+
+func (a *RDeclaration) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	b, ok := rule.(*RDeclaration)
+	return ok && a.KeyText == b.KeyText && TokensEqual(a.Value, b.Value, check) && a.Important == b.Important
+}
+
+func (r *RDeclaration) Hash() (uint32, bool) {
+	var hash uint32
+	if r.Key == DUnknown {
+		if r.Important {
+			hash = uint32(7)
+		} else {
+			hash = uint32(8)
+		}
+		hash = helpers.HashCombineString(hash, r.KeyText)
+	} else {
+		if r.Important {
+			hash = uint32(9)
+		} else {
+			hash = uint32(10)
+		}
+		hash = helpers.HashCombine(hash, uint32(r.Key))
+	}
+	hash = HashTokens(hash, r.Value)
+	return hash, true
+}
+
+type RBadDeclaration struct {
+	Tokens []Token
+}
+
+func (a *RBadDeclaration) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	b, ok := rule.(*RBadDeclaration)
+	return ok && TokensEqual(a.Tokens, b.Tokens, check)
+}
+
+func (r *RBadDeclaration) Hash() (uint32, bool) {
+	hash := uint32(11)
+	hash = HashTokens(hash, r.Tokens)
+	return hash, true
+}
+
+type RComment struct {
+	Text string
+}
+
+func (a *RComment) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	b, ok := rule.(*RComment)
+	return ok && a.Text == b.Text
+}
+
+func (r *RComment) Hash() (uint32, bool) {
+	hash := uint32(12)
+	hash = helpers.HashCombineString(hash, r.Text)
+	return hash, true
+}
+
+type RAtLayer struct {
+	Names         [][]string
+	Rules         []Rule
+	CloseBraceLoc logger.Loc
+}
+
+func (a *RAtLayer) Equal(rule R, check *CrossFileEqualityCheck) bool {
+	if b, ok := rule.(*RAtLayer); ok && len(a.Names) == len(b.Names) && len(a.Rules) == len(b.Rules) {
+		for i, ai := range a.Names {
+			bi := b.Names[i]
+			if len(ai) != len(bi) {
+				return false
+			}
+			for j, aj := range ai {
+				if aj != bi[j] {
+					return false
+				}
+			}
+		}
+		if !RulesEqual(a.Rules, b.Rules, check) {
+			return false
+		}
+	}
+	return false
+}
+
+func (r *RAtLayer) Hash() (uint32, bool) {
+	hash := uint32(13)
+	hash = helpers.HashCombine(hash, uint32(len(r.Names)))
+	for _, parts := range r.Names {
+		hash = helpers.HashCombine(hash, uint32(len(parts)))
+		for _, part := range parts {
+			hash = helpers.HashCombineString(hash, part)
+		}
+	}
+	hash = HashRules(hash, r.Rules)
+	return hash, true
+}
+
+type ComplexSelector struct {
+	Selectors []CompoundSelector
+}
+
+func ComplexSelectorsEqual(a []ComplexSelector, b []ComplexSelector, check *CrossFileEqualityCheck) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, ai := range a {
+		if !ai.Equal(b[i], check) {
+			return false
+		}
+	}
+	return true
+}
+
+func HashComplexSelectors(hash uint32, selectors []ComplexSelector) uint32 {
+	for _, complex := range selectors {
+		hash = helpers.HashCombine(hash, uint32(len(complex.Selectors)))
+		for _, sel := range complex.Selectors {
+			if sel.TypeSelector != nil {
+				hash = helpers.HashCombineString(hash, sel.TypeSelector.Name.Text)
+			} else {
+				hash = helpers.HashCombine(hash, 0)
+			}
+			hash = helpers.HashCombine(hash, uint32(len(sel.SubclassSelectors)))
+			for _, ss := range sel.SubclassSelectors {
+				hash = helpers.HashCombine(hash, ss.Data.Hash())
+			}
+			hash = helpers.HashCombine(hash, uint32(sel.Combinator.Byte))
+		}
+	}
+	return hash
+}
+
+func (s ComplexSelector) CloneWithoutLeadingCombinator() ComplexSelector {
+	clone := ComplexSelector{Selectors: make([]CompoundSelector, len(s.Selectors))}
+	for i, sel := range s.Selectors {
+		if i == 0 {
+			sel.Combinator = Combinator{}
+		}
+		clone.Selectors[i] = sel.Clone()
+	}
+	return clone
+}
+
+func (sel ComplexSelector) IsRelative() bool {
+	if sel.Selectors[0].Combinator.Byte == 0 {
+		for _, inner := range sel.Selectors {
+			if inner.HasNestingSelector() {
+				return false
+			}
+			for _, ss := range inner.SubclassSelectors {
+				if pseudo, ok := ss.Data.(*SSPseudoClassWithSelectorList); ok {
+					for _, nested := range pseudo.Selectors {
+						if !nested.IsRelative() {
+							return false
+						}
+					}
+				}
+			}
+		}
+	}
+	return true
+}
+
+func tokensContainAmpersandRecursive(tokens []Token) bool {
+	for _, t := range tokens {
+		if t.Kind == css_lexer.TDelimAmpersand {
+			return true
+		}
+		if children := t.Children; children != nil && tokensContainAmpersandRecursive(*children) {
+			return true
+		}
+	}
+	return false
+}
+
+func (sel ComplexSelector) UsesPseudoElement() bool {
+	for _, sel := range sel.Selectors {
+		for _, ss := range sel.SubclassSelectors {
+			if class, ok := ss.Data.(*SSPseudoClass); ok {
+				if class.IsElement {
+					return true
+				}
+
+				// https://www.w3.org/TR/selectors-4/#single-colon-pseudos
+				// The four Level 2 pseudo-elements (::before, ::after, ::first-line,
+				// and ::first-letter) may, for legacy reasons, be represented using
+				// the <pseudo-class-selector> grammar, with only a single ":"
+				// character at their start.
+				switch class.Name {
+				case "before", "after", "first-line", "first-letter":
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func (a ComplexSelector) Equal(b ComplexSelector, check *CrossFileEqualityCheck) bool {
+	if len(a.Selectors) != len(b.Selectors) {
+		return false
+	}
+
+	for i, ai := range a.Selectors {
+		bi := b.Selectors[i]
+		if ai.HasNestingSelector() != bi.HasNestingSelector() || ai.Combinator.Byte != bi.Combinator.Byte {
+			return false
+		}
+
+		if ats, bts := ai.TypeSelector, bi.TypeSelector; (ats == nil) != (bts == nil) {
+			return false
+		} else if ats != nil && bts != nil && !ats.Equal(*bts) {
+			return false
+		}
+
+		if len(ai.SubclassSelectors) != len(bi.SubclassSelectors) {
+			return false
+		}
+		for j, aj := range ai.SubclassSelectors {
+			if !aj.Data.Equal(bi.SubclassSelectors[j].Data, check) {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+type Combinator struct {
+	Loc  logger.Loc
+	Byte uint8 // Optional, may be 0 for no combinator
+}
+
+type CompoundSelector struct {
+	TypeSelector       *NamespacedName
+	SubclassSelectors  []SubclassSelector
+	NestingSelectorLoc ast.Index32 // "&"
+	Combinator         Combinator  // Optional, may be 0
+
+	// If this is true, this is a "&" that was generated by a bare ":local" or ":global"
+	WasEmptyFromLocalOrGlobal bool
+}
+
+func (sel *CompoundSelector) HasNestingSelector() bool {
+	return sel.NestingSelectorLoc.IsValid()
+}
+
+func (sel CompoundSelector) IsSingleAmpersand() bool {
+	return sel.HasNestingSelector() && sel.Combinator.Byte == 0 && sel.TypeSelector == nil && len(sel.SubclassSelectors) == 0
+}
+
+func (sel CompoundSelector) IsInvalidBecauseEmpty() bool {
+	return !sel.HasNestingSelector() && sel.TypeSelector == nil && len(sel.SubclassSelectors) == 0
+}
+
+func (sel CompoundSelector) Range() (r logger.Range) {
+	if sel.Combinator.Byte != 0 {
+		r = logger.Range{Loc: sel.Combinator.Loc, Len: 1}
+	}
+	if sel.TypeSelector != nil {
+		r.ExpandBy(sel.TypeSelector.Range())
+	}
+	if sel.NestingSelectorLoc.IsValid() {
+		r.ExpandBy(logger.Range{Loc: logger.Loc{Start: int32(sel.NestingSelectorLoc.GetIndex())}, Len: 1})
+	}
+	if len(sel.SubclassSelectors) > 0 {
+		for _, ss := range sel.SubclassSelectors {
+			r.ExpandBy(ss.Range)
+		}
+	}
+	return
+}
+
+func (sel CompoundSelector) Clone() CompoundSelector {
+	clone := sel
+
+	if sel.TypeSelector != nil {
+		t := sel.TypeSelector.Clone()
+		clone.TypeSelector = &t
+	}
+
+	if sel.SubclassSelectors != nil {
+		selectors := make([]SubclassSelector, len(sel.SubclassSelectors))
+		for i, ss := range sel.SubclassSelectors {
+			ss.Data = ss.Data.Clone()
+			selectors[i] = ss
+		}
+		clone.SubclassSelectors = selectors
+	}
+
+	return clone
+}
+
+type NameToken struct {
+	Text  string
+	Range logger.Range
+	Kind  css_lexer.T
+}
+
+func (a NameToken) Equal(b NameToken) bool {
+	return a.Text == b.Text && a.Kind == b.Kind
+}
+
+type NamespacedName struct {
+	// If present, this is an identifier or "*" and is followed by a "|" character
+	NamespacePrefix *NameToken
+
+	// This is an identifier or "*"
+	Name NameToken
+}
+
+func (n NamespacedName) Range() logger.Range {
+	if n.NamespacePrefix != nil {
+		loc := n.NamespacePrefix.Range.Loc
+		return logger.Range{Loc: loc, Len: n.Name.Range.End() - loc.Start}
+	}
+	return n.Name.Range
+}
+
+func (n NamespacedName) Clone() NamespacedName {
+	clone := n
+	if n.NamespacePrefix != nil {
+		prefix := *n.NamespacePrefix
+		clone.NamespacePrefix = &prefix
+	}
+	return clone
+}
+
+func (a NamespacedName) Equal(b NamespacedName) bool {
+	return a.Name.Equal(b.Name) && (a.NamespacePrefix == nil) == (b.NamespacePrefix == nil) &&
+		(a.NamespacePrefix == nil || b.NamespacePrefix == nil || a.NamespacePrefix.Equal(b.Name))
+}
+
+type SubclassSelector struct {
+	Data  SS
+	Range logger.Range
+}
+
+type SS interface {
+	Equal(ss SS, check *CrossFileEqualityCheck) bool
+	Hash() uint32
+	Clone() SS
+}
+
+type SSHash struct {
+	Name ast.LocRef
+}
+
+func (a *SSHash) Equal(ss SS, check *CrossFileEqualityCheck) bool {
+	b, ok := ss.(*SSHash)
+	return ok && check.RefsAreEquivalent(a.Name.Ref, b.Name.Ref)
+}
+
+func (ss *SSHash) Hash() uint32 {
+	hash := uint32(1)
+	return hash
+}
+
+func (ss *SSHash) Clone() SS {
+	clone := *ss
+	return &clone
+}
+
+type SSClass struct {
+	Name ast.LocRef
+}
+
+func (a *SSClass) Equal(ss SS, check *CrossFileEqualityCheck) bool {
+	b, ok := ss.(*SSClass)
+	return ok && check.RefsAreEquivalent(a.Name.Ref, b.Name.Ref)
+}
+
+func (ss *SSClass) Hash() uint32 {
+	hash := uint32(2)
+	return hash
+}
+
+func (ss *SSClass) Clone() SS {
+	clone := *ss
+	return &clone
+}
+
+type SSAttribute struct {
+	MatcherOp       string // Either "" or one of: "=" "~=" "|=" "^=" "$=" "*="
+	MatcherValue    string
+	NamespacedName  NamespacedName
+	MatcherModifier byte // Either 0 or one of: 'i' 'I' 's' 'S'
+}
+
+func (a *SSAttribute) Equal(ss SS, check *CrossFileEqualityCheck) bool {
+	b, ok := ss.(*SSAttribute)
+	return ok && a.NamespacedName.Equal(b.NamespacedName) && a.MatcherOp == b.MatcherOp &&
+		a.MatcherValue == b.MatcherValue && a.MatcherModifier == b.MatcherModifier
+}
+
+func (ss *SSAttribute) Hash() uint32 {
+	hash := uint32(3)
+	hash = helpers.HashCombineString(hash, ss.NamespacedName.Name.Text)
+	hash = helpers.HashCombineString(hash, ss.MatcherOp)
+	hash = helpers.HashCombineString(hash, ss.MatcherValue)
+	return hash
+}
+
+func (ss *SSAttribute) Clone() SS {
+	clone := *ss
+	clone.NamespacedName = ss.NamespacedName.Clone()
+	return &clone
+}
+
+type SSPseudoClass struct {
+	Name      string
+	Args      []Token
+	IsElement bool // If true, this is prefixed by "::" instead of ":"
+}
+
+func (a *SSPseudoClass) Equal(ss SS, check *CrossFileEqualityCheck) bool {
+	b, ok := ss.(*SSPseudoClass)
+	return ok && a.Name == b.Name && TokensEqual(a.Args, b.Args, check) && a.IsElement == b.IsElement
+}
+
+func (ss *SSPseudoClass) Hash() uint32 {
+	hash := uint32(4)
+	hash = helpers.HashCombineString(hash, ss.Name)
+	hash = HashTokens(hash, ss.Args)
+	return hash
+}
+
+func (ss *SSPseudoClass) Clone() SS {
+	clone := *ss
+	if ss.Args != nil {
+		ss.Args = CloneTokensWithoutImportRecords(ss.Args)
+	}
+	return &clone
+}
+
+type PseudoClassKind uint8
+
+const (
+	PseudoClassGlobal PseudoClassKind = iota
+	PseudoClassHas
+	PseudoClassIs
+	PseudoClassLocal
+	PseudoClassNot
+	PseudoClassNthChild
+	PseudoClassNthLastChild
+	PseudoClassNthLastOfType
+	PseudoClassNthOfType
+	PseudoClassWhere
+)
+
+func (kind PseudoClassKind) HasNthIndex() bool {
+	return kind >= PseudoClassNthChild && kind <= PseudoClassNthOfType
+}
+
+func (kind PseudoClassKind) String() string {
+	switch kind {
+	case PseudoClassGlobal:
+		return "global"
+	case PseudoClassHas:
+		return "has"
+	case PseudoClassIs:
+		return "is"
+	case PseudoClassLocal:
+		return "local"
+	case PseudoClassNot:
+		return "not"
+	case PseudoClassNthChild:
+		return "nth-child"
+	case PseudoClassNthLastChild:
+		return "nth-last-child"
+	case PseudoClassNthLastOfType:
+		return "nth-last-of-type"
+	case PseudoClassNthOfType:
+		return "nth-of-type"
+	case PseudoClassWhere:
+		return "where"
+	default:
+		panic("Internal error")
+	}
+}
+
+// This is the "An+B" syntax
+type NthIndex struct {
+	A string
+	B string // May be "even" or "odd"
+}
+
+func (index *NthIndex) Minify() {
+	// "even" => "2n"
+	if index.B == "even" {
+		index.A = "2"
+		index.B = ""
+		return
+	}
+
+	// "2n+1" => "odd"
+	if index.A == "2" && index.B == "1" {
+		index.A = ""
+		index.B = "odd"
+		return
+	}
+
+	// "0n+1" => "1"
+	if index.A == "0" {
+		index.A = ""
+		if index.B == "" {
+			// "0n" => "0"
+			index.B = "0"
+		}
+		return
+	}
+
+	// "1n+0" => "1n"
+	if index.B == "0" && index.A != "" {
+		index.B = ""
+	}
+}
+
+// See https://drafts.csswg.org/selectors/#grouping
+type SSPseudoClassWithSelectorList struct {
+	Selectors []ComplexSelector
+	Index     NthIndex
+	Kind      PseudoClassKind
+}
+
+func (a *SSPseudoClassWithSelectorList) Equal(ss SS, check *CrossFileEqualityCheck) bool {
+	b, ok := ss.(*SSPseudoClassWithSelectorList)
+	return ok && a.Kind == b.Kind && a.Index == b.Index && ComplexSelectorsEqual(a.Selectors, b.Selectors, check)
+}
+
+func (ss *SSPseudoClassWithSelectorList) Hash() uint32 {
+	hash := uint32(5)
+	hash = helpers.HashCombine(hash, uint32(ss.Kind))
+	hash = helpers.HashCombineString(hash, ss.Index.A)
+	hash = helpers.HashCombineString(hash, ss.Index.B)
+	hash = HashComplexSelectors(hash, ss.Selectors)
+	return hash
+}
+
+func (ss *SSPseudoClassWithSelectorList) Clone() SS {
+	clone := *ss
+	clone.Selectors = make([]ComplexSelector, len(ss.Selectors))
+	for i, sel := range ss.Selectors {
+		clone.Selectors[i] = sel.CloneWithoutLeadingCombinator()
+	}
+	return &clone
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_ast/css_decl_table.go b/source/vendor/github.com/evanw/esbuild/internal/css_ast/css_decl_table.go
new file mode 100644
index 0000000..231eac4
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_ast/css_decl_table.go
@@ -0,0 +1,698 @@
+package css_ast
+
+import (
+	"strings"
+	"sync"
+
+	"github.com/evanw/esbuild/internal/helpers"
+)
+
+type D uint16
+
+const (
+	DUnknown D = iota
+	DAlignContent
+	DAlignItems
+	DAlignSelf
+	DAlignmentBaseline
+	DAll
+	DAnimation
+	DAnimationDelay
+	DAnimationDirection
+	DAnimationDuration
+	DAnimationFillMode
+	DAnimationIterationCount
+	DAnimationName
+	DAnimationPlayState
+	DAnimationTimingFunction
+	DAppearance
+	DBackdropFilter
+	DBackfaceVisibility
+	DBackground
+	DBackgroundAttachment
+	DBackgroundClip
+	DBackgroundColor
+	DBackgroundImage
+	DBackgroundOrigin
+	DBackgroundPosition
+	DBackgroundPositionX
+	DBackgroundPositionY
+	DBackgroundRepeat
+	DBackgroundSize
+	DBaselineShift
+	DBlockSize
+	DBorder
+	DBorderBlockEnd
+	DBorderBlockEndColor
+	DBorderBlockEndStyle
+	DBorderBlockEndWidth
+	DBorderBlockStart
+	DBorderBlockStartColor
+	DBorderBlockStartStyle
+	DBorderBlockStartWidth
+	DBorderBottom
+	DBorderBottomColor
+	DBorderBottomLeftRadius
+	DBorderBottomRightRadius
+	DBorderBottomStyle
+	DBorderBottomWidth
+	DBorderCollapse
+	DBorderColor
+	DBorderImage
+	DBorderImageOutset
+	DBorderImageRepeat
+	DBorderImageSlice
+	DBorderImageSource
+	DBorderImageWidth
+	DBorderInlineEnd
+	DBorderInlineEndColor
+	DBorderInlineEndStyle
+	DBorderInlineEndWidth
+	DBorderInlineStart
+	DBorderInlineStartColor
+	DBorderInlineStartStyle
+	DBorderInlineStartWidth
+	DBorderLeft
+	DBorderLeftColor
+	DBorderLeftStyle
+	DBorderLeftWidth
+	DBorderRadius
+	DBorderRight
+	DBorderRightColor
+	DBorderRightStyle
+	DBorderRightWidth
+	DBorderSpacing
+	DBorderStyle
+	DBorderTop
+	DBorderTopColor
+	DBorderTopLeftRadius
+	DBorderTopRightRadius
+	DBorderTopStyle
+	DBorderTopWidth
+	DBorderWidth
+	DBottom
+	DBoxDecorationBreak
+	DBoxShadow
+	DBoxSizing
+	DBreakAfter
+	DBreakBefore
+	DBreakInside
+	DCaptionSide
+	DCaretColor
+	DClear
+	DClip
+	DClipPath
+	DClipRule
+	DColor
+	DColorInterpolation
+	DColorInterpolationFilters
+	DColumnCount
+	DColumnFill
+	DColumnGap
+	DColumnRule
+	DColumnRuleColor
+	DColumnRuleStyle
+	DColumnRuleWidth
+	DColumnSpan
+	DColumnWidth
+	DColumns
+	DComposes
+	DContainer
+	DContainerName
+	DContainerType
+	DContent
+	DCounterIncrement
+	DCounterReset
+	DCssFloat
+	DCssText
+	DCursor
+	DDirection
+	DDisplay
+	DDominantBaseline
+	DEmptyCells
+	DFill
+	DFillOpacity
+	DFillRule
+	DFilter
+	DFlex
+	DFlexBasis
+	DFlexDirection
+	DFlexFlow
+	DFlexGrow
+	DFlexShrink
+	DFlexWrap
+	DFloat
+	DFloodColor
+	DFloodOpacity
+	DFont
+	DFontFamily
+	DFontFeatureSettings
+	DFontKerning
+	DFontSize
+	DFontSizeAdjust
+	DFontStretch
+	DFontStyle
+	DFontSynthesis
+	DFontVariant
+	DFontVariantCaps
+	DFontVariantEastAsian
+	DFontVariantLigatures
+	DFontVariantNumeric
+	DFontVariantPosition
+	DFontWeight
+	DGap
+	DGlyphOrientationVertical
+	DGrid
+	DGridArea
+	DGridAutoColumns
+	DGridAutoFlow
+	DGridAutoRows
+	DGridColumn
+	DGridColumnEnd
+	DGridColumnGap
+	DGridColumnStart
+	DGridGap
+	DGridRow
+	DGridRowEnd
+	DGridRowGap
+	DGridRowStart
+	DGridTemplate
+	DGridTemplateAreas
+	DGridTemplateColumns
+	DGridTemplateRows
+	DHeight
+	DHyphens
+	DImageOrientation
+	DImageRendering
+	DInitialLetter
+	DInlineSize
+	DInset
+	DJustifyContent
+	DJustifyItems
+	DJustifySelf
+	DLeft
+	DLetterSpacing
+	DLightingColor
+	DLineBreak
+	DLineHeight
+	DListStyle
+	DListStyleImage
+	DListStylePosition
+	DListStyleType
+	DMargin
+	DMarginBlockEnd
+	DMarginBlockStart
+	DMarginBottom
+	DMarginInlineEnd
+	DMarginInlineStart
+	DMarginLeft
+	DMarginRight
+	DMarginTop
+	DMarker
+	DMarkerEnd
+	DMarkerMid
+	DMarkerStart
+	DMask
+	DMaskComposite
+	DMaskImage
+	DMaskOrigin
+	DMaskPosition
+	DMaskRepeat
+	DMaskSize
+	DMaskType
+	DMaxBlockSize
+	DMaxHeight
+	DMaxInlineSize
+	DMaxWidth
+	DMinBlockSize
+	DMinHeight
+	DMinInlineSize
+	DMinWidth
+	DObjectFit
+	DObjectPosition
+	DOpacity
+	DOrder
+	DOrphans
+	DOutline
+	DOutlineColor
+	DOutlineOffset
+	DOutlineStyle
+	DOutlineWidth
+	DOverflow
+	DOverflowAnchor
+	DOverflowWrap
+	DOverflowX
+	DOverflowY
+	DOverscrollBehavior
+	DOverscrollBehaviorBlock
+	DOverscrollBehaviorInline
+	DOverscrollBehaviorX
+	DOverscrollBehaviorY
+	DPadding
+	DPaddingBlockEnd
+	DPaddingBlockStart
+	DPaddingBottom
+	DPaddingInlineEnd
+	DPaddingInlineStart
+	DPaddingLeft
+	DPaddingRight
+	DPaddingTop
+	DPageBreakAfter
+	DPageBreakBefore
+	DPageBreakInside
+	DPaintOrder
+	DPerspective
+	DPerspectiveOrigin
+	DPlaceContent
+	DPlaceItems
+	DPlaceSelf
+	DPointerEvents
+	DPosition
+	DPrintColorAdjust
+	DQuotes
+	DResize
+	DRight
+	DRotate
+	DRowGap
+	DRubyAlign
+	DRubyPosition
+	DScale
+	DScrollBehavior
+	DShapeRendering
+	DStopColor
+	DStopOpacity
+	DStroke
+	DStrokeDasharray
+	DStrokeDashoffset
+	DStrokeLinecap
+	DStrokeLinejoin
+	DStrokeMiterlimit
+	DStrokeOpacity
+	DStrokeWidth
+	DTabSize
+	DTableLayout
+	DTextAlign
+	DTextAlignLast
+	DTextAnchor
+	DTextCombineUpright
+	DTextDecoration
+	DTextDecorationColor
+	DTextDecorationLine
+	DTextDecorationSkip
+	DTextDecorationStyle
+	DTextEmphasis
+	DTextEmphasisColor
+	DTextEmphasisPosition
+	DTextEmphasisStyle
+	DTextIndent
+	DTextJustify
+	DTextOrientation
+	DTextOverflow
+	DTextRendering
+	DTextShadow
+	DTextSizeAdjust
+	DTextTransform
+	DTextUnderlinePosition
+	DTop
+	DTouchAction
+	DTransform
+	DTransformBox
+	DTransformOrigin
+	DTransformStyle
+	DTransition
+	DTransitionDelay
+	DTransitionDuration
+	DTransitionProperty
+	DTransitionTimingFunction
+	DTranslate
+	DUnicodeBidi
+	DUserSelect
+	DVerticalAlign
+	DVisibility
+	DWhiteSpace
+	DWidows
+	DWidth
+	DWillChange
+	DWordBreak
+	DWordSpacing
+	DWordWrap
+	DWritingMode
+	DZIndex
+	DZoom
+)
+
+var KnownDeclarations = map[string]D{
+	"align-content":               DAlignContent,
+	"align-items":                 DAlignItems,
+	"align-self":                  DAlignSelf,
+	"alignment-baseline":          DAlignmentBaseline,
+	"all":                         DAll,
+	"animation":                   DAnimation,
+	"animation-delay":             DAnimationDelay,
+	"animation-direction":         DAnimationDirection,
+	"animation-duration":          DAnimationDuration,
+	"animation-fill-mode":         DAnimationFillMode,
+	"animation-iteration-count":   DAnimationIterationCount,
+	"animation-name":              DAnimationName,
+	"animation-play-state":        DAnimationPlayState,
+	"animation-timing-function":   DAnimationTimingFunction,
+	"appearance":                  DAppearance,
+	"backdrop-filter":             DBackdropFilter,
+	"backface-visibility":         DBackfaceVisibility,
+	"background":                  DBackground,
+	"background-attachment":       DBackgroundAttachment,
+	"background-clip":             DBackgroundClip,
+	"background-color":            DBackgroundColor,
+	"background-image":            DBackgroundImage,
+	"background-origin":           DBackgroundOrigin,
+	"background-position":         DBackgroundPosition,
+	"background-position-x":       DBackgroundPositionX,
+	"background-position-y":       DBackgroundPositionY,
+	"background-repeat":           DBackgroundRepeat,
+	"background-size":             DBackgroundSize,
+	"baseline-shift":              DBaselineShift,
+	"block-size":                  DBlockSize,
+	"border":                      DBorder,
+	"border-block-end":            DBorderBlockEnd,
+	"border-block-end-color":      DBorderBlockEndColor,
+	"border-block-end-style":      DBorderBlockEndStyle,
+	"border-block-end-width":      DBorderBlockEndWidth,
+	"border-block-start":          DBorderBlockStart,
+	"border-block-start-color":    DBorderBlockStartColor,
+	"border-block-start-style":    DBorderBlockStartStyle,
+	"border-block-start-width":    DBorderBlockStartWidth,
+	"border-bottom":               DBorderBottom,
+	"border-bottom-color":         DBorderBottomColor,
+	"border-bottom-left-radius":   DBorderBottomLeftRadius,
+	"border-bottom-right-radius":  DBorderBottomRightRadius,
+	"border-bottom-style":         DBorderBottomStyle,
+	"border-bottom-width":         DBorderBottomWidth,
+	"border-collapse":             DBorderCollapse,
+	"border-color":                DBorderColor,
+	"border-image":                DBorderImage,
+	"border-image-outset":         DBorderImageOutset,
+	"border-image-repeat":         DBorderImageRepeat,
+	"border-image-slice":          DBorderImageSlice,
+	"border-image-source":         DBorderImageSource,
+	"border-image-width":          DBorderImageWidth,
+	"border-inline-end":           DBorderInlineEnd,
+	"border-inline-end-color":     DBorderInlineEndColor,
+	"border-inline-end-style":     DBorderInlineEndStyle,
+	"border-inline-end-width":     DBorderInlineEndWidth,
+	"border-inline-start":         DBorderInlineStart,
+	"border-inline-start-color":   DBorderInlineStartColor,
+	"border-inline-start-style":   DBorderInlineStartStyle,
+	"border-inline-start-width":   DBorderInlineStartWidth,
+	"border-left":                 DBorderLeft,
+	"border-left-color":           DBorderLeftColor,
+	"border-left-style":           DBorderLeftStyle,
+	"border-left-width":           DBorderLeftWidth,
+	"border-radius":               DBorderRadius,
+	"border-right":                DBorderRight,
+	"border-right-color":          DBorderRightColor,
+	"border-right-style":          DBorderRightStyle,
+	"border-right-width":          DBorderRightWidth,
+	"border-spacing":              DBorderSpacing,
+	"border-style":                DBorderStyle,
+	"border-top":                  DBorderTop,
+	"border-top-color":            DBorderTopColor,
+	"border-top-left-radius":      DBorderTopLeftRadius,
+	"border-top-right-radius":     DBorderTopRightRadius,
+	"border-top-style":            DBorderTopStyle,
+	"border-top-width":            DBorderTopWidth,
+	"border-width":                DBorderWidth,
+	"bottom":                      DBottom,
+	"box-decoration-break":        DBoxDecorationBreak,
+	"box-shadow":                  DBoxShadow,
+	"box-sizing":                  DBoxSizing,
+	"break-after":                 DBreakAfter,
+	"break-before":                DBreakBefore,
+	"break-inside":                DBreakInside,
+	"caption-side":                DCaptionSide,
+	"caret-color":                 DCaretColor,
+	"clear":                       DClear,
+	"clip":                        DClip,
+	"clip-path":                   DClipPath,
+	"clip-rule":                   DClipRule,
+	"color":                       DColor,
+	"color-interpolation":         DColorInterpolation,
+	"color-interpolation-filters": DColorInterpolationFilters,
+	"column-count":                DColumnCount,
+	"column-fill":                 DColumnFill,
+	"column-gap":                  DColumnGap,
+	"column-rule":                 DColumnRule,
+	"column-rule-color":           DColumnRuleColor,
+	"column-rule-style":           DColumnRuleStyle,
+	"column-rule-width":           DColumnRuleWidth,
+	"column-span":                 DColumnSpan,
+	"column-width":                DColumnWidth,
+	"columns":                     DColumns,
+	"composes":                    DComposes,
+	"container":                   DContainer,
+	"container-name":              DContainerName,
+	"container-type":              DContainerType,
+	"content":                     DContent,
+	"counter-increment":           DCounterIncrement,
+	"counter-reset":               DCounterReset,
+	"css-float":                   DCssFloat,
+	"css-text":                    DCssText,
+	"cursor":                      DCursor,
+	"direction":                   DDirection,
+	"display":                     DDisplay,
+	"dominant-baseline":           DDominantBaseline,
+	"empty-cells":                 DEmptyCells,
+	"fill":                        DFill,
+	"fill-opacity":                DFillOpacity,
+	"fill-rule":                   DFillRule,
+	"filter":                      DFilter,
+	"flex":                        DFlex,
+	"flex-basis":                  DFlexBasis,
+	"flex-direction":              DFlexDirection,
+	"flex-flow":                   DFlexFlow,
+	"flex-grow":                   DFlexGrow,
+	"flex-shrink":                 DFlexShrink,
+	"flex-wrap":                   DFlexWrap,
+	"float":                       DFloat,
+	"flood-color":                 DFloodColor,
+	"flood-opacity":               DFloodOpacity,
+	"font":                        DFont,
+	"font-family":                 DFontFamily,
+	"font-feature-settings":       DFontFeatureSettings,
+	"font-kerning":                DFontKerning,
+	"font-size":                   DFontSize,
+	"font-size-adjust":            DFontSizeAdjust,
+	"font-stretch":                DFontStretch,
+	"font-style":                  DFontStyle,
+	"font-synthesis":              DFontSynthesis,
+	"font-variant":                DFontVariant,
+	"font-variant-caps":           DFontVariantCaps,
+	"font-variant-east-asian":     DFontVariantEastAsian,
+	"font-variant-ligatures":      DFontVariantLigatures,
+	"font-variant-numeric":        DFontVariantNumeric,
+	"font-variant-position":       DFontVariantPosition,
+	"font-weight":                 DFontWeight,
+	"gap":                         DGap,
+	"glyph-orientation-vertical":  DGlyphOrientationVertical,
+	"grid":                        DGrid,
+	"grid-area":                   DGridArea,
+	"grid-auto-columns":           DGridAutoColumns,
+	"grid-auto-flow":              DGridAutoFlow,
+	"grid-auto-rows":              DGridAutoRows,
+	"grid-column":                 DGridColumn,
+	"grid-column-end":             DGridColumnEnd,
+	"grid-column-gap":             DGridColumnGap,
+	"grid-column-start":           DGridColumnStart,
+	"grid-gap":                    DGridGap,
+	"grid-row":                    DGridRow,
+	"grid-row-end":                DGridRowEnd,
+	"grid-row-gap":                DGridRowGap,
+	"grid-row-start":              DGridRowStart,
+	"grid-template":               DGridTemplate,
+	"grid-template-areas":         DGridTemplateAreas,
+	"grid-template-columns":       DGridTemplateColumns,
+	"grid-template-rows":          DGridTemplateRows,
+	"height":                      DHeight,
+	"hyphens":                     DHyphens,
+	"image-orientation":           DImageOrientation,
+	"image-rendering":             DImageRendering,
+	"initial-letter":              DInitialLetter,
+	"inline-size":                 DInlineSize,
+	"inset":                       DInset,
+	"justify-content":             DJustifyContent,
+	"justify-items":               DJustifyItems,
+	"justify-self":                DJustifySelf,
+	"left":                        DLeft,
+	"letter-spacing":              DLetterSpacing,
+	"lighting-color":              DLightingColor,
+	"line-break":                  DLineBreak,
+	"line-height":                 DLineHeight,
+	"list-style":                  DListStyle,
+	"list-style-image":            DListStyleImage,
+	"list-style-position":         DListStylePosition,
+	"list-style-type":             DListStyleType,
+	"margin":                      DMargin,
+	"margin-block-end":            DMarginBlockEnd,
+	"margin-block-start":          DMarginBlockStart,
+	"margin-bottom":               DMarginBottom,
+	"margin-inline-end":           DMarginInlineEnd,
+	"margin-inline-start":         DMarginInlineStart,
+	"margin-left":                 DMarginLeft,
+	"margin-right":                DMarginRight,
+	"margin-top":                  DMarginTop,
+	"marker":                      DMarker,
+	"marker-end":                  DMarkerEnd,
+	"marker-mid":                  DMarkerMid,
+	"marker-start":                DMarkerStart,
+	"mask":                        DMask,
+	"mask-composite":              DMaskComposite,
+	"mask-image":                  DMaskImage,
+	"mask-origin":                 DMaskOrigin,
+	"mask-position":               DMaskPosition,
+	"mask-repeat":                 DMaskRepeat,
+	"mask-size":                   DMaskSize,
+	"mask-type":                   DMaskType,
+	"max-block-size":              DMaxBlockSize,
+	"max-height":                  DMaxHeight,
+	"max-inline-size":             DMaxInlineSize,
+	"max-width":                   DMaxWidth,
+	"min-block-size":              DMinBlockSize,
+	"min-height":                  DMinHeight,
+	"min-inline-size":             DMinInlineSize,
+	"min-width":                   DMinWidth,
+	"object-fit":                  DObjectFit,
+	"object-position":             DObjectPosition,
+	"opacity":                     DOpacity,
+	"order":                       DOrder,
+	"orphans":                     DOrphans,
+	"outline":                     DOutline,
+	"outline-color":               DOutlineColor,
+	"outline-offset":              DOutlineOffset,
+	"outline-style":               DOutlineStyle,
+	"outline-width":               DOutlineWidth,
+	"overflow":                    DOverflow,
+	"overflow-anchor":             DOverflowAnchor,
+	"overflow-wrap":               DOverflowWrap,
+	"overflow-x":                  DOverflowX,
+	"overflow-y":                  DOverflowY,
+	"overscroll-behavior":         DOverscrollBehavior,
+	"overscroll-behavior-block":   DOverscrollBehaviorBlock,
+	"overscroll-behavior-inline":  DOverscrollBehaviorInline,
+	"overscroll-behavior-x":       DOverscrollBehaviorX,
+	"overscroll-behavior-y":       DOverscrollBehaviorY,
+	"padding":                     DPadding,
+	"padding-block-end":           DPaddingBlockEnd,
+	"padding-block-start":         DPaddingBlockStart,
+	"padding-bottom":              DPaddingBottom,
+	"padding-inline-end":          DPaddingInlineEnd,
+	"padding-inline-start":        DPaddingInlineStart,
+	"padding-left":                DPaddingLeft,
+	"padding-right":               DPaddingRight,
+	"padding-top":                 DPaddingTop,
+	"page-break-after":            DPageBreakAfter,
+	"page-break-before":           DPageBreakBefore,
+	"page-break-inside":           DPageBreakInside,
+	"paint-order":                 DPaintOrder,
+	"perspective":                 DPerspective,
+	"perspective-origin":          DPerspectiveOrigin,
+	"place-content":               DPlaceContent,
+	"place-items":                 DPlaceItems,
+	"place-self":                  DPlaceSelf,
+	"pointer-events":              DPointerEvents,
+	"position":                    DPosition,
+	"print-color-adjust":          DPrintColorAdjust,
+	"quotes":                      DQuotes,
+	"resize":                      DResize,
+	"right":                       DRight,
+	"rotate":                      DRotate,
+	"row-gap":                     DRowGap,
+	"ruby-align":                  DRubyAlign,
+	"ruby-position":               DRubyPosition,
+	"scale":                       DScale,
+	"scroll-behavior":             DScrollBehavior,
+	"shape-rendering":             DShapeRendering,
+	"stop-color":                  DStopColor,
+	"stop-opacity":                DStopOpacity,
+	"stroke":                      DStroke,
+	"stroke-dasharray":            DStrokeDasharray,
+	"stroke-dashoffset":           DStrokeDashoffset,
+	"stroke-linecap":              DStrokeLinecap,
+	"stroke-linejoin":             DStrokeLinejoin,
+	"stroke-miterlimit":           DStrokeMiterlimit,
+	"stroke-opacity":              DStrokeOpacity,
+	"stroke-width":                DStrokeWidth,
+	"tab-size":                    DTabSize,
+	"table-layout":                DTableLayout,
+	"text-align":                  DTextAlign,
+	"text-align-last":             DTextAlignLast,
+	"text-anchor":                 DTextAnchor,
+	"text-combine-upright":        DTextCombineUpright,
+	"text-decoration":             DTextDecoration,
+	"text-decoration-color":       DTextDecorationColor,
+	"text-decoration-line":        DTextDecorationLine,
+	"text-decoration-skip":        DTextDecorationSkip,
+	"text-decoration-style":       DTextDecorationStyle,
+	"text-emphasis":               DTextEmphasis,
+	"text-emphasis-color":         DTextEmphasisColor,
+	"text-emphasis-position":      DTextEmphasisPosition,
+	"text-emphasis-style":         DTextEmphasisStyle,
+	"text-indent":                 DTextIndent,
+	"text-justify":                DTextJustify,
+	"text-orientation":            DTextOrientation,
+	"text-overflow":               DTextOverflow,
+	"text-rendering":              DTextRendering,
+	"text-shadow":                 DTextShadow,
+	"text-size-adjust":            DTextSizeAdjust,
+	"text-transform":              DTextTransform,
+	"text-underline-position":     DTextUnderlinePosition,
+	"top":                         DTop,
+	"touch-action":                DTouchAction,
+	"transform":                   DTransform,
+	"transform-box":               DTransformBox,
+	"transform-origin":            DTransformOrigin,
+	"transform-style":             DTransformStyle,
+	"transition":                  DTransition,
+	"transition-delay":            DTransitionDelay,
+	"transition-duration":         DTransitionDuration,
+	"transition-property":         DTransitionProperty,
+	"transition-timing-function":  DTransitionTimingFunction,
+	"translate":                   DTranslate,
+	"unicode-bidi":                DUnicodeBidi,
+	"user-select":                 DUserSelect,
+	"vertical-align":              DVerticalAlign,
+	"visibility":                  DVisibility,
+	"white-space":                 DWhiteSpace,
+	"widows":                      DWidows,
+	"width":                       DWidth,
+	"will-change":                 DWillChange,
+	"word-break":                  DWordBreak,
+	"word-spacing":                DWordSpacing,
+	"word-wrap":                   DWordWrap,
+	"writing-mode":                DWritingMode,
+	"z-index":                     DZIndex,
+	"zoom":                        DZoom,
+}
+
+var typoDetector *helpers.TypoDetector
+var typoDetectorMutex sync.Mutex
+
+func MaybeCorrectDeclarationTypo(text string) (string, bool) {
+	// Ignore CSS variables, which should not be corrected to CSS properties
+	if strings.HasPrefix(text, "--") {
+		return "", false
+	}
+
+	typoDetectorMutex.Lock()
+	defer typoDetectorMutex.Unlock()
+
+	// Lazily-initialize the typo detector for speed when it's not needed
+	if typoDetector == nil {
+		valid := make([]string, 0, len(KnownDeclarations))
+		for key := range KnownDeclarations {
+			valid = append(valid, key)
+		}
+		detector := helpers.MakeTypoDetector(valid)
+		typoDetector = &detector
+	}
+
+	return typoDetector.MaybeCorrectTypo(text)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_lexer/css_lexer.go b/source/vendor/github.com/evanw/esbuild/internal/css_lexer/css_lexer.go
new file mode 100644
index 0000000..9e8c088
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_lexer/css_lexer.go
@@ -0,0 +1,1081 @@
+package css_lexer
+
+import (
+	"strings"
+	"unicode/utf8"
+
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+// The lexer converts a source file to a stream of tokens. Unlike esbuild's
+// JavaScript lexer, this CSS lexer runs to completion before the CSS parser
+// begins, resulting in a single array of all tokens in the file.
+
+type T uint8
+
+const eof = -1
+
+const (
+	TEndOfFile T = iota
+
+	TAtKeyword
+	TUnterminatedString
+	TBadURL
+	TCDC // "-->"
+	TCDO // "<!--"
+	TCloseBrace
+	TCloseBracket
+	TCloseParen
+	TColon
+	TComma
+	TDelim
+	TDelimAmpersand
+	TDelimAsterisk
+	TDelimBar
+	TDelimCaret
+	TDelimDollar
+	TDelimDot
+	TDelimEquals
+	TDelimExclamation
+	TDelimGreaterThan
+	TDelimMinus
+	TDelimPlus
+	TDelimSlash
+	TDelimTilde
+	TDimension
+	TFunction
+	THash
+	TIdent
+	TNumber
+	TOpenBrace
+	TOpenBracket
+	TOpenParen
+	TPercentage
+	TSemicolon
+	TString
+	TURL
+	TWhitespace
+
+	// This is never something that the lexer generates directly. Instead this is
+	// an esbuild-specific token for global/local names that "TIdent" tokens may
+	// be changed into.
+	TSymbol
+)
+
+var tokenToString = []string{
+	"end of file",
+	"@-keyword",
+	"bad string token",
+	"bad URL token",
+	"\"-->\"",
+	"\"<!--\"",
+	"\"}\"",
+	"\"]\"",
+	"\")\"",
+	"\":\"",
+	"\",\"",
+	"delimiter",
+	"\"&\"",
+	"\"*\"",
+	"\"|\"",
+	"\"^\"",
+	"\"$\"",
+	"\".\"",
+	"\"=\"",
+	"\"!\"",
+	"\">\"",
+	"\"-\"",
+	"\"+\"",
+	"\"/\"",
+	"\"~\"",
+	"dimension",
+	"function token",
+	"hash token",
+	"identifier",
+	"number",
+	"\"{\"",
+	"\"[\"",
+	"\"(\"",
+	"percentage",
+	"\";\"",
+	"string token",
+	"URL token",
+	"whitespace",
+
+	"identifier",
+}
+
+func (t T) String() string {
+	return tokenToString[t]
+}
+
+func (t T) IsNumeric() bool {
+	return t == TNumber || t == TPercentage || t == TDimension
+}
+
+type TokenFlags uint8
+
+const (
+	IsID TokenFlags = 1 << iota
+	DidWarnAboutSingleLineComment
+)
+
+// This token struct is designed to be memory-efficient. It just references a
+// range in the input file instead of directly containing the substring of text
+// since a range takes up less memory than a string.
+type Token struct {
+	Range      logger.Range // 8 bytes
+	UnitOffset uint16       // 2 bytes
+	Kind       T            // 1 byte
+	Flags      TokenFlags   // 1 byte
+}
+
+func (token Token) DecodedText(contents string) string {
+	raw := contents[token.Range.Loc.Start:token.Range.End()]
+
+	switch token.Kind {
+	case TIdent, TDimension:
+		return decodeEscapesInToken(raw)
+
+	case TAtKeyword, THash:
+		return decodeEscapesInToken(raw[1:])
+
+	case TFunction:
+		return decodeEscapesInToken(raw[:len(raw)-1])
+
+	case TString:
+		return decodeEscapesInToken(raw[1 : len(raw)-1])
+
+	case TURL:
+		start := 4
+		end := len(raw)
+
+		// Note: URL tokens with syntax errors may not have a trailing ")"
+		if raw[end-1] == ')' {
+			end--
+		}
+
+		// Trim leading and trailing whitespace
+		for start < end && isWhitespace(rune(raw[start])) {
+			start++
+		}
+		for start < end && isWhitespace(rune(raw[end-1])) {
+			end--
+		}
+
+		return decodeEscapesInToken(raw[start:end])
+	}
+
+	return raw
+}
+
+type lexer struct {
+	Options
+	log                     logger.Log
+	source                  logger.Source
+	allComments             []logger.Range
+	legalCommentsBefore     []Comment
+	sourceMappingURL        logger.Span
+	tracker                 logger.LineColumnTracker
+	approximateNewlineCount int
+	current                 int
+	oldSingleLineCommentEnd logger.Loc
+	codePoint               rune
+	Token                   Token
+}
+
+type Comment struct {
+	Text            string
+	Loc             logger.Loc
+	TokenIndexAfter uint32
+}
+
+type TokenizeResult struct {
+	Tokens               []Token
+	AllComments          []logger.Range
+	LegalComments        []Comment
+	SourceMapComment     logger.Span
+	ApproximateLineCount int32
+}
+
+type Options struct {
+	RecordAllComments bool
+}
+
+func Tokenize(log logger.Log, source logger.Source, options Options) TokenizeResult {
+	lexer := lexer{
+		Options: options,
+		log:     log,
+		source:  source,
+		tracker: logger.MakeLineColumnTracker(&source),
+	}
+	lexer.step()
+
+	// The U+FEFF character is usually a zero-width non-breaking space. However,
+	// when it's used at the start of a text stream it is called a BOM (byte order
+	// mark) instead and indicates that the text stream is UTF-8 encoded. This is
+	// problematic for us because CSS does not treat U+FEFF as whitespace. Only
+	// " \t\r\n\f" characters are treated as whitespace. Skip over the BOM if it
+	// is present so it doesn't cause us trouble when we try to parse it.
+	if lexer.codePoint == '\uFEFF' {
+		lexer.step()
+	}
+
+	lexer.next()
+	var tokens []Token
+	var legalComments []Comment
+	for lexer.Token.Kind != TEndOfFile {
+		if lexer.legalCommentsBefore != nil {
+			for _, comment := range lexer.legalCommentsBefore {
+				comment.TokenIndexAfter = uint32(len(tokens))
+				legalComments = append(legalComments, comment)
+			}
+			lexer.legalCommentsBefore = nil
+		}
+		tokens = append(tokens, lexer.Token)
+		lexer.next()
+	}
+	if lexer.legalCommentsBefore != nil {
+		for _, comment := range lexer.legalCommentsBefore {
+			comment.TokenIndexAfter = uint32(len(tokens))
+			legalComments = append(legalComments, comment)
+		}
+		lexer.legalCommentsBefore = nil
+	}
+	return TokenizeResult{
+		Tokens:               tokens,
+		AllComments:          lexer.allComments,
+		LegalComments:        legalComments,
+		ApproximateLineCount: int32(lexer.approximateNewlineCount) + 1,
+		SourceMapComment:     lexer.sourceMappingURL,
+	}
+}
+
+func (lexer *lexer) step() {
+	codePoint, width := utf8.DecodeRuneInString(lexer.source.Contents[lexer.current:])
+
+	// Use -1 to indicate the end of the file
+	if width == 0 {
+		codePoint = eof
+	}
+
+	// Track the approximate number of newlines in the file so we can preallocate
+	// the line offset table in the printer for source maps. The line offset table
+	// is the #1 highest allocation in the heap profile, so this is worth doing.
+	// This count is approximate because it handles "\n" and "\r\n" (the common
+	// cases) but not "\r" or "\u2028" or "\u2029". Getting this wrong is harmless
+	// because it's only a preallocation. The array will just grow if it's too small.
+	if codePoint == '\n' {
+		lexer.approximateNewlineCount++
+	}
+
+	lexer.codePoint = codePoint
+	lexer.Token.Range.Len = int32(lexer.current) - lexer.Token.Range.Loc.Start
+	lexer.current += width
+}
+
+func (lexer *lexer) next() {
+	// Reference: https://www.w3.org/TR/css-syntax-3/
+
+	for {
+		lexer.Token = Token{Range: logger.Range{Loc: logger.Loc{Start: lexer.Token.Range.End()}}}
+
+		switch lexer.codePoint {
+		case eof:
+			lexer.Token.Kind = TEndOfFile
+
+		case '/':
+			lexer.step()
+			switch lexer.codePoint {
+			case '*':
+				lexer.step()
+				lexer.consumeToEndOfMultiLineComment(lexer.Token.Range)
+				continue
+			case '/':
+				// Warn when people use "//" comments, which are invalid in CSS
+				loc := lexer.Token.Range.Loc
+				if loc.Start >= lexer.oldSingleLineCommentEnd.Start {
+					contents := lexer.source.Contents
+					end := lexer.current
+					for end < len(contents) && !isNewline(rune(contents[end])) {
+						end++
+					}
+					lexer.log.AddID(logger.MsgID_CSS_JSCommentInCSS, logger.Warning, &lexer.tracker, logger.Range{Loc: loc, Len: 2},
+						"Comments in CSS use \"/* ... */\" instead of \"//\"")
+					lexer.oldSingleLineCommentEnd.Start = int32(end)
+					lexer.Token.Flags |= DidWarnAboutSingleLineComment
+				}
+			}
+			lexer.Token.Kind = TDelimSlash
+
+		case ' ', '\t', '\n', '\r', '\f':
+			lexer.step()
+			for {
+				if isWhitespace(lexer.codePoint) {
+					lexer.step()
+				} else if lexer.codePoint == '/' && lexer.current < len(lexer.source.Contents) && lexer.source.Contents[lexer.current] == '*' {
+					startRange := logger.Range{Loc: logger.Loc{Start: lexer.Token.Range.End()}, Len: 2}
+					lexer.step()
+					lexer.step()
+					lexer.consumeToEndOfMultiLineComment(startRange)
+				} else {
+					break
+				}
+			}
+			lexer.Token.Kind = TWhitespace
+
+		case '"', '\'':
+			lexer.Token.Kind = lexer.consumeString()
+
+		case '#':
+			lexer.step()
+			if IsNameContinue(lexer.codePoint) || lexer.isValidEscape() {
+				lexer.Token.Kind = THash
+				if lexer.wouldStartIdentifier() {
+					lexer.Token.Flags |= IsID
+				}
+				lexer.consumeName()
+			} else {
+				lexer.Token.Kind = TDelim
+			}
+
+		case '(':
+			lexer.step()
+			lexer.Token.Kind = TOpenParen
+
+		case ')':
+			lexer.step()
+			lexer.Token.Kind = TCloseParen
+
+		case '[':
+			lexer.step()
+			lexer.Token.Kind = TOpenBracket
+
+		case ']':
+			lexer.step()
+			lexer.Token.Kind = TCloseBracket
+
+		case '{':
+			lexer.step()
+			lexer.Token.Kind = TOpenBrace
+
+		case '}':
+			lexer.step()
+			lexer.Token.Kind = TCloseBrace
+
+		case ',':
+			lexer.step()
+			lexer.Token.Kind = TComma
+
+		case ':':
+			lexer.step()
+			lexer.Token.Kind = TColon
+
+		case ';':
+			lexer.step()
+			lexer.Token.Kind = TSemicolon
+
+		case '+':
+			if lexer.wouldStartNumber() {
+				lexer.Token.Kind = lexer.consumeNumeric()
+			} else {
+				lexer.step()
+				lexer.Token.Kind = TDelimPlus
+			}
+
+		case '.':
+			if lexer.wouldStartNumber() {
+				lexer.Token.Kind = lexer.consumeNumeric()
+			} else {
+				lexer.step()
+				lexer.Token.Kind = TDelimDot
+			}
+
+		case '-':
+			if lexer.wouldStartNumber() {
+				lexer.Token.Kind = lexer.consumeNumeric()
+			} else if lexer.current+2 <= len(lexer.source.Contents) && lexer.source.Contents[lexer.current:lexer.current+2] == "->" {
+				lexer.step()
+				lexer.step()
+				lexer.step()
+				lexer.Token.Kind = TCDC
+			} else if lexer.wouldStartIdentifier() {
+				lexer.Token.Kind = lexer.consumeIdentLike()
+			} else {
+				lexer.step()
+				lexer.Token.Kind = TDelimMinus
+			}
+
+		case '<':
+			if lexer.current+3 <= len(lexer.source.Contents) && lexer.source.Contents[lexer.current:lexer.current+3] == "!--" {
+				lexer.step()
+				lexer.step()
+				lexer.step()
+				lexer.step()
+				lexer.Token.Kind = TCDO
+			} else {
+				lexer.step()
+				lexer.Token.Kind = TDelim
+			}
+
+		case '@':
+			lexer.step()
+			if lexer.wouldStartIdentifier() {
+				lexer.consumeName()
+				lexer.Token.Kind = TAtKeyword
+			} else {
+				lexer.Token.Kind = TDelim
+			}
+
+		case '\\':
+			if lexer.isValidEscape() {
+				lexer.Token.Kind = lexer.consumeIdentLike()
+			} else {
+				lexer.step()
+				lexer.log.AddError(&lexer.tracker, lexer.Token.Range, "Invalid escape")
+				lexer.Token.Kind = TDelim
+			}
+
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			lexer.Token.Kind = lexer.consumeNumeric()
+
+		case '>':
+			lexer.step()
+			lexer.Token.Kind = TDelimGreaterThan
+
+		case '~':
+			lexer.step()
+			lexer.Token.Kind = TDelimTilde
+
+		case '&':
+			lexer.step()
+			lexer.Token.Kind = TDelimAmpersand
+
+		case '*':
+			lexer.step()
+			lexer.Token.Kind = TDelimAsterisk
+
+		case '|':
+			lexer.step()
+			lexer.Token.Kind = TDelimBar
+
+		case '!':
+			lexer.step()
+			lexer.Token.Kind = TDelimExclamation
+
+		case '=':
+			lexer.step()
+			lexer.Token.Kind = TDelimEquals
+
+		case '^':
+			lexer.step()
+			lexer.Token.Kind = TDelimCaret
+
+		case '$':
+			lexer.step()
+			lexer.Token.Kind = TDelimDollar
+
+		default:
+			if IsNameStart(lexer.codePoint) {
+				lexer.Token.Kind = lexer.consumeIdentLike()
+			} else {
+				lexer.step()
+				lexer.Token.Kind = TDelim
+			}
+		}
+
+		return
+	}
+}
+
+func (lexer *lexer) consumeToEndOfMultiLineComment(startRange logger.Range) {
+	startOfSourceMappingURL := 0
+	isLegalComment := false
+
+	switch lexer.codePoint {
+	case '#', '@':
+		// Keep track of the contents of the "sourceMappingURL=" comment
+		if strings.HasPrefix(lexer.source.Contents[lexer.current:], " sourceMappingURL=") {
+			startOfSourceMappingURL = lexer.current + len(" sourceMappingURL=")
+		}
+
+	case '!':
+		// Remember if this is a legal comment
+		isLegalComment = true
+	}
+
+	for {
+		switch lexer.codePoint {
+		case '*':
+			endOfSourceMappingURL := lexer.current - 1
+			lexer.step()
+			if lexer.codePoint == '/' {
+				commentEnd := lexer.current
+				lexer.step()
+
+				// Record the source mapping URL
+				if startOfSourceMappingURL != 0 {
+					r := logger.Range{Loc: logger.Loc{Start: int32(startOfSourceMappingURL)}}
+					text := lexer.source.Contents[startOfSourceMappingURL:endOfSourceMappingURL]
+					for int(r.Len) < len(text) && !isWhitespace(rune(text[r.Len])) {
+						r.Len++
+					}
+					lexer.sourceMappingURL = logger.Span{Text: text[:r.Len], Range: r}
+				}
+
+				// Record all comments
+				commentRange := logger.Range{Loc: startRange.Loc, Len: int32(commentEnd) - startRange.Loc.Start}
+				if lexer.RecordAllComments {
+					lexer.allComments = append(lexer.allComments, commentRange)
+				}
+
+				// Record legal comments
+				if text := lexer.source.Contents[startRange.Loc.Start:commentEnd]; isLegalComment || containsAtPreserveOrAtLicense(text) {
+					text = lexer.source.CommentTextWithoutIndent(commentRange)
+					lexer.legalCommentsBefore = append(lexer.legalCommentsBefore, Comment{Loc: startRange.Loc, Text: text})
+				}
+				return
+			}
+
+		case eof: // This indicates the end of the file
+			lexer.log.AddErrorWithNotes(&lexer.tracker, logger.Range{Loc: logger.Loc{Start: lexer.Token.Range.End()}},
+				"Expected \"*/\" to terminate multi-line comment",
+				[]logger.MsgData{lexer.tracker.MsgData(startRange, "The multi-line comment starts here:")})
+			return
+
+		default:
+			lexer.step()
+		}
+	}
+}
+
+func containsAtPreserveOrAtLicense(text string) bool {
+	for i, c := range text {
+		if c == '@' && (strings.HasPrefix(text[i+1:], "preserve") || strings.HasPrefix(text[i+1:], "license")) {
+			return true
+		}
+	}
+	return false
+}
+
+func (lexer *lexer) isValidEscape() bool {
+	if lexer.codePoint != '\\' {
+		return false
+	}
+	c, _ := utf8.DecodeRuneInString(lexer.source.Contents[lexer.current:])
+	return !isNewline(c)
+}
+
+func (lexer *lexer) wouldStartIdentifier() bool {
+	if IsNameStart(lexer.codePoint) {
+		return true
+	}
+
+	if lexer.codePoint == '-' {
+		c, width := utf8.DecodeRuneInString(lexer.source.Contents[lexer.current:])
+		if c == utf8.RuneError && width <= 1 {
+			return false // Decoding error
+		}
+		if IsNameStart(c) || c == '-' {
+			return true
+		}
+		if c == '\\' {
+			c2, _ := utf8.DecodeRuneInString(lexer.source.Contents[lexer.current+width:])
+			return !isNewline(c2)
+		}
+		return false
+	}
+
+	return lexer.isValidEscape()
+}
+
+func WouldStartIdentifierWithoutEscapes(text string) bool {
+	c, width := utf8.DecodeRuneInString(text)
+	if c == utf8.RuneError && width <= 1 {
+		return false // Decoding error
+	}
+	if IsNameStart(c) {
+		return true
+	}
+
+	if c == '-' {
+		c2, width2 := utf8.DecodeRuneInString(text[width:])
+		if c2 == utf8.RuneError && width2 <= 1 {
+			return false // Decoding error
+		}
+		if IsNameStart(c2) || c2 == '-' {
+			return true
+		}
+	}
+	return false
+}
+
+func RangeOfIdentifier(source logger.Source, loc logger.Loc) logger.Range {
+	text := source.Contents[loc.Start:]
+	if len(text) == 0 {
+		return logger.Range{Loc: loc, Len: 0}
+	}
+
+	i := 0
+	n := len(text)
+
+	for {
+		c, width := utf8.DecodeRuneInString(text[i:])
+		if IsNameContinue(c) {
+			i += width
+			continue
+		}
+
+		// Handle an escape
+		if c == '\\' && i+1 < n && !isNewline(rune(text[i+1])) {
+			i += width // Skip the backslash
+			c, width = utf8.DecodeRuneInString(text[i:])
+			if _, ok := isHex(c); ok {
+				i += width
+				c, width = utf8.DecodeRuneInString(text[i:])
+				for j := 0; j < 5; j++ {
+					if _, ok := isHex(c); !ok {
+						break
+					}
+					i += width
+					c, width = utf8.DecodeRuneInString(text[i:])
+				}
+				if isWhitespace(c) {
+					i += width
+				}
+			}
+			continue
+		}
+
+		break
+	}
+
+	// Don't end with a whitespace
+	if i > 0 && isWhitespace(rune(text[i-1])) {
+		i--
+	}
+
+	return logger.Range{Loc: loc, Len: int32(i)}
+}
+
+func (lexer *lexer) wouldStartNumber() bool {
+	if lexer.codePoint >= '0' && lexer.codePoint <= '9' {
+		return true
+	} else if lexer.codePoint == '.' {
+		contents := lexer.source.Contents
+		if lexer.current < len(contents) {
+			c := contents[lexer.current]
+			return c >= '0' && c <= '9'
+		}
+	} else if lexer.codePoint == '+' || lexer.codePoint == '-' {
+		contents := lexer.source.Contents
+		n := len(contents)
+		if lexer.current < n {
+			c := contents[lexer.current]
+			if c >= '0' && c <= '9' {
+				return true
+			}
+			if c == '.' && lexer.current+1 < n {
+				c = contents[lexer.current+1]
+				return c >= '0' && c <= '9'
+			}
+		}
+	}
+	return false
+}
+
+// Note: This function is hot in profiles
+func (lexer *lexer) consumeName() string {
+	// Common case: no escapes, identifier is a substring of the input. Doing this
+	// in a tight loop that avoids UTF-8 decoding and that increments a single
+	// number instead of doing "step()" is noticeably faster. For example, doing
+	// this sped up end-to-end parsing and printing of a large CSS file from 97ms
+	// to 84ms (around 15% faster).
+	contents := lexer.source.Contents
+	if IsNameContinue(lexer.codePoint) {
+		n := len(contents)
+		i := lexer.current
+		for i < n && IsNameContinue(rune(contents[i])) {
+			i++
+		}
+		lexer.current = i
+		lexer.step()
+	}
+	raw := contents[lexer.Token.Range.Loc.Start:lexer.Token.Range.End()]
+	if !lexer.isValidEscape() {
+		return raw
+	}
+
+	// Uncommon case: escapes, identifier is allocated
+	sb := strings.Builder{}
+	sb.WriteString(raw)
+	sb.WriteRune(lexer.consumeEscape())
+	for {
+		if IsNameContinue(lexer.codePoint) {
+			sb.WriteRune(lexer.codePoint)
+			lexer.step()
+		} else if lexer.isValidEscape() {
+			sb.WriteRune(lexer.consumeEscape())
+		} else {
+			break
+		}
+	}
+	return sb.String()
+}
+
+func (lexer *lexer) consumeEscape() rune {
+	lexer.step() // Skip the backslash
+	c := lexer.codePoint
+
+	if hex, ok := isHex(c); ok {
+		lexer.step()
+		for i := 0; i < 5; i++ {
+			if next, ok := isHex(lexer.codePoint); ok {
+				lexer.step()
+				hex = hex*16 + next
+			} else {
+				break
+			}
+		}
+		if isWhitespace(lexer.codePoint) {
+			lexer.step()
+		}
+		if hex == 0 || (hex >= 0xD800 && hex <= 0xDFFF) || hex > 0x10FFFF {
+			return utf8.RuneError
+		}
+		return rune(hex)
+	}
+
+	if c == eof {
+		return utf8.RuneError
+	}
+
+	lexer.step()
+	return c
+}
+
+func (lexer *lexer) consumeIdentLike() T {
+	name := lexer.consumeName()
+
+	if lexer.codePoint == '(' {
+		matchingLoc := logger.Loc{Start: lexer.Token.Range.End()}
+		lexer.step()
+		if len(name) == 3 {
+			u, r, l := name[0], name[1], name[2]
+			if (u == 'u' || u == 'U') && (r == 'r' || r == 'R') && (l == 'l' || l == 'L') {
+				// Save state
+				approximateNewlineCount := lexer.approximateNewlineCount
+				codePoint := lexer.codePoint
+				tokenRangeLen := lexer.Token.Range.Len
+				current := lexer.current
+
+				// Check to see if this is a URL token instead of a function
+				for isWhitespace(lexer.codePoint) {
+					lexer.step()
+				}
+				if lexer.codePoint != '"' && lexer.codePoint != '\'' {
+					return lexer.consumeURL(matchingLoc)
+				}
+
+				// Restore state (i.e. backtrack)
+				lexer.approximateNewlineCount = approximateNewlineCount
+				lexer.codePoint = codePoint
+				lexer.Token.Range.Len = tokenRangeLen
+				lexer.current = current
+			}
+		}
+		return TFunction
+	}
+
+	return TIdent
+}
+
+func (lexer *lexer) consumeURL(matchingLoc logger.Loc) T {
+validURL:
+	for {
+		switch lexer.codePoint {
+		case ')':
+			lexer.step()
+			return TURL
+
+		case eof:
+			loc := logger.Loc{Start: lexer.Token.Range.End()}
+			lexer.log.AddIDWithNotes(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &lexer.tracker, logger.Range{Loc: loc}, "Expected \")\" to end URL token",
+				[]logger.MsgData{lexer.tracker.MsgData(logger.Range{Loc: matchingLoc, Len: 1}, "The unbalanced \"(\" is here:")})
+			return TURL
+
+		case ' ', '\t', '\n', '\r', '\f':
+			lexer.step()
+			for isWhitespace(lexer.codePoint) {
+				lexer.step()
+			}
+			if lexer.codePoint != ')' {
+				loc := logger.Loc{Start: lexer.Token.Range.End()}
+				lexer.log.AddIDWithNotes(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &lexer.tracker, logger.Range{Loc: loc}, "Expected \")\" to end URL token",
+					[]logger.MsgData{lexer.tracker.MsgData(logger.Range{Loc: matchingLoc, Len: 1}, "The unbalanced \"(\" is here:")})
+				if lexer.codePoint == eof {
+					return TURL
+				}
+				break validURL
+			}
+			lexer.step()
+			return TURL
+
+		case '"', '\'', '(':
+			r := logger.Range{Loc: logger.Loc{Start: lexer.Token.Range.End()}, Len: 1}
+			lexer.log.AddIDWithNotes(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &lexer.tracker, r, "Expected \")\" to end URL token",
+				[]logger.MsgData{lexer.tracker.MsgData(logger.Range{Loc: matchingLoc, Len: 1}, "The unbalanced \"(\" is here:")})
+			break validURL
+
+		case '\\':
+			if !lexer.isValidEscape() {
+				r := logger.Range{Loc: logger.Loc{Start: lexer.Token.Range.End()}, Len: 1}
+				lexer.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &lexer.tracker, r, "Invalid escape")
+				break validURL
+			}
+			lexer.consumeEscape()
+
+		default:
+			if isNonPrintable(lexer.codePoint) {
+				r := logger.Range{Loc: logger.Loc{Start: lexer.Token.Range.End()}, Len: 1}
+				lexer.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &lexer.tracker, r, "Unexpected non-printable character in URL token")
+				break validURL
+			}
+			lexer.step()
+		}
+	}
+
+	// Consume the remnants of a bad url
+	for {
+		switch lexer.codePoint {
+		case ')', eof:
+			lexer.step()
+			return TBadURL
+
+		case '\\':
+			if lexer.isValidEscape() {
+				lexer.consumeEscape()
+			}
+		}
+		lexer.step()
+	}
+}
+
+func (lexer *lexer) consumeString() T {
+	quote := lexer.codePoint
+	lexer.step()
+
+	for {
+		switch lexer.codePoint {
+		case '\\':
+			lexer.step()
+
+			// Handle Windows CRLF
+			if lexer.codePoint == '\r' {
+				lexer.step()
+				if lexer.codePoint == '\n' {
+					lexer.step()
+				}
+				continue
+			}
+
+			// Otherwise, fall through to ignore the character after the backslash
+
+		case eof, '\n', '\r', '\f':
+			lexer.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &lexer.tracker,
+				logger.Range{Loc: logger.Loc{Start: lexer.Token.Range.End()}},
+				"Unterminated string token")
+			return TUnterminatedString
+
+		case quote:
+			lexer.step()
+			return TString
+		}
+		lexer.step()
+	}
+}
+
+func (lexer *lexer) consumeNumeric() T {
+	// Skip over leading sign
+	if lexer.codePoint == '+' || lexer.codePoint == '-' {
+		lexer.step()
+	}
+
+	// Skip over leading digits
+	for lexer.codePoint >= '0' && lexer.codePoint <= '9' {
+		lexer.step()
+	}
+
+	// Skip over digits after dot
+	if lexer.codePoint == '.' {
+		lexer.step()
+		for lexer.codePoint >= '0' && lexer.codePoint <= '9' {
+			lexer.step()
+		}
+	}
+
+	// Skip over exponent
+	if lexer.codePoint == 'e' || lexer.codePoint == 'E' {
+		contents := lexer.source.Contents
+
+		// Look ahead before advancing to make sure this is an exponent, not a unit
+		if lexer.current < len(contents) {
+			c := contents[lexer.current]
+			if (c == '+' || c == '-') && lexer.current+1 < len(contents) {
+				c = contents[lexer.current+1]
+			}
+
+			// Only consume this if it's an exponent
+			if c >= '0' && c <= '9' {
+				lexer.step()
+				if lexer.codePoint == '+' || lexer.codePoint == '-' {
+					lexer.step()
+				}
+				for lexer.codePoint >= '0' && lexer.codePoint <= '9' {
+					lexer.step()
+				}
+			}
+		}
+	}
+
+	// Determine the numeric type
+	if lexer.wouldStartIdentifier() {
+		lexer.Token.UnitOffset = uint16(lexer.Token.Range.Len)
+		lexer.consumeName()
+		return TDimension
+	}
+	if lexer.codePoint == '%' {
+		lexer.step()
+		return TPercentage
+	}
+	return TNumber
+}
+
+func IsNameStart(c rune) bool {
+	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == '_' || c >= 0x80 || c == '\x00'
+}
+
+func IsNameContinue(c rune) bool {
+	return IsNameStart(c) || (c >= '0' && c <= '9') || c == '-'
+}
+
+func isNewline(c rune) bool {
+	switch c {
+	case '\n', '\r', '\f':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c rune) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r', '\f':
+		return true
+	}
+	return false
+}
+
+func isHex(c rune) (int, bool) {
+	if c >= '0' && c <= '9' {
+		return int(c - '0'), true
+	}
+	if c >= 'a' && c <= 'f' {
+		return int(c + (10 - 'a')), true
+	}
+	if c >= 'A' && c <= 'F' {
+		return int(c + (10 - 'A')), true
+	}
+	return 0, false
+}
+
+func isNonPrintable(c rune) bool {
+	return c <= 0x08 || c == 0x0B || (c >= 0x0E && c <= 0x1F) || c == 0x7F
+}
+
+func decodeEscapesInToken(inner string) string {
+	i := 0
+
+	for i < len(inner) {
+		if c := inner[i]; c == '\\' || c == '\x00' {
+			break
+		}
+		i++
+	}
+
+	if i == len(inner) {
+		return inner
+	}
+
+	sb := strings.Builder{}
+	sb.WriteString(inner[:i])
+	inner = inner[i:]
+
+	for len(inner) > 0 {
+		c, width := utf8.DecodeRuneInString(inner)
+		inner = inner[width:]
+
+		if c != '\\' {
+			if c == '\x00' {
+				c = utf8.RuneError
+			}
+			sb.WriteRune(c)
+			continue
+		}
+
+		if len(inner) == 0 {
+			sb.WriteRune(utf8.RuneError)
+			continue
+		}
+
+		c, width = utf8.DecodeRuneInString(inner)
+		inner = inner[width:]
+		hex, ok := isHex(c)
+
+		if !ok {
+			if c == '\n' || c == '\f' {
+				continue
+			}
+
+			// Handle Windows CRLF
+			if c == '\r' {
+				c, width = utf8.DecodeRuneInString(inner)
+				if c == '\n' {
+					inner = inner[width:]
+				}
+				continue
+			}
+
+			// If we get here, this is not a valid escape. However, this is still
+			// allowed. In this case the backslash is just ignored.
+			sb.WriteRune(c)
+			continue
+		}
+
+		// Parse up to five additional hex characters (so six in total)
+		for i := 0; i < 5 && len(inner) > 0; i++ {
+			c, width = utf8.DecodeRuneInString(inner)
+			if next, ok := isHex(c); ok {
+				inner = inner[width:]
+				hex = hex*16 + next
+			} else {
+				break
+			}
+		}
+
+		if len(inner) > 0 {
+			c, width = utf8.DecodeRuneInString(inner)
+			if isWhitespace(c) {
+				inner = inner[width:]
+			}
+		}
+
+		if hex == 0 || (hex >= 0xD800 && hex <= 0xDFFF) || hex > 0x10FFFF {
+			sb.WriteRune(utf8.RuneError)
+			continue
+		}
+
+		sb.WriteRune(rune(hex))
+	}
+
+	return sb.String()
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_color_spaces.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_color_spaces.go
new file mode 100644
index 0000000..721ecd3
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_color_spaces.go
@@ -0,0 +1,620 @@
+package css_parser
+
+import (
+	"math"
+
+	"github.com/evanw/esbuild/internal/helpers"
+)
+
+// Wrap float64 math to avoid compiler optimizations that break determinism
+type F64 = helpers.F64
+
+// Reference: https://drafts.csswg.org/css-color/#color-conversion-code
+
+type colorSpace uint8
+
+const (
+	colorSpace_a98_rgb colorSpace = iota
+	colorSpace_display_p3
+	colorSpace_hsl
+	colorSpace_hwb
+	colorSpace_lab
+	colorSpace_lch
+	colorSpace_oklab
+	colorSpace_oklch
+	colorSpace_prophoto_rgb
+	colorSpace_rec2020
+	colorSpace_srgb
+	colorSpace_srgb_linear
+	colorSpace_xyz
+	colorSpace_xyz_d50
+	colorSpace_xyz_d65
+)
+
+func (colorSpace colorSpace) isPolar() bool {
+	switch colorSpace {
+	case colorSpace_hsl, colorSpace_hwb, colorSpace_lch, colorSpace_oklch:
+		return true
+	}
+	return false
+}
+
+type hueMethod uint8
+
+const (
+	shorterHue hueMethod = iota
+	longerHue
+	increasingHue
+	decreasingHue
+)
+
+func lin_srgb(r F64, g F64, b F64) (F64, F64, F64) {
+	f := func(val F64) F64 {
+		if abs := val.Abs(); abs.Value() < 0.04045 {
+			return val.DivConst(12.92)
+		} else {
+			return abs.AddConst(0.055).DivConst(1.055).PowConst(2.4).WithSignFrom(val)
+		}
+	}
+	return f(r), f(g), f(b)
+}
+
+func gam_srgb(r F64, g F64, b F64) (F64, F64, F64) {
+	f := func(val F64) F64 {
+		if abs := val.Abs(); abs.Value() > 0.0031308 {
+			return abs.PowConst(1 / 2.4).MulConst(1.055).SubConst(0.055).WithSignFrom(val)
+		} else {
+			return val.MulConst(12.92)
+		}
+	}
+	return f(r), f(g), f(b)
+}
+
+func lin_srgb_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
+	M := [9]float64{
+		506752.0 / 1228815, 87881.0 / 245763, 12673.0 / 70218,
+		87098.0 / 409605, 175762.0 / 245763, 12673.0 / 175545,
+		7918.0 / 409605, 87881.0 / 737289, 1001167.0 / 1053270,
+	}
+	return multiplyMatrices(M, r, g, b)
+}
+
+func xyz_to_lin_srgb(x F64, y F64, z F64) (F64, F64, F64) {
+	M := [9]float64{
+		12831.0 / 3959, -329.0 / 214, -1974.0 / 3959,
+		-851781.0 / 878810, 1648619.0 / 878810, 36519.0 / 878810,
+		705.0 / 12673, -2585.0 / 12673, 705.0 / 667,
+	}
+	return multiplyMatrices(M, x, y, z)
+}
+
+func lin_p3(r F64, g F64, b F64) (F64, F64, F64) {
+	return lin_srgb(r, g, b)
+}
+
+func gam_p3(r F64, g F64, b F64) (F64, F64, F64) {
+	return gam_srgb(r, g, b)
+}
+
+func lin_p3_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
+	M := [9]float64{
+		608311.0 / 1250200, 189793.0 / 714400, 198249.0 / 1000160,
+		35783.0 / 156275, 247089.0 / 357200, 198249.0 / 2500400,
+		0.0 / 1, 32229.0 / 714400, 5220557.0 / 5000800,
+	}
+	return multiplyMatrices(M, r, g, b)
+}
+
+func xyz_to_lin_p3(x F64, y F64, z F64) (F64, F64, F64) {
+	M := [9]float64{
+		446124.0 / 178915, -333277.0 / 357830, -72051.0 / 178915,
+		-14852.0 / 17905, 63121.0 / 35810, 423.0 / 17905,
+		11844.0 / 330415, -50337.0 / 660830, 316169.0 / 330415,
+	}
+	return multiplyMatrices(M, x, y, z)
+}
+
+func lin_prophoto(r F64, g F64, b F64) (F64, F64, F64) {
+	f := func(val F64) F64 {
+		const Et2 = 16.0 / 512
+		if abs := val.Abs(); abs.Value() <= Et2 {
+			return val.DivConst(16)
+		} else {
+			return abs.PowConst(1.8).WithSignFrom(val)
+		}
+	}
+	return f(r), f(g), f(b)
+}
+
+func gam_prophoto(r F64, g F64, b F64) (F64, F64, F64) {
+	f := func(val F64) F64 {
+		const Et = 1.0 / 512
+		if abs := val.Abs(); abs.Value() >= Et {
+			return abs.PowConst(1 / 1.8).WithSignFrom(val)
+		} else {
+			return val.MulConst(16)
+		}
+	}
+	return f(r), f(g), f(b)
+}
+
+func lin_prophoto_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
+	M := [9]float64{
+		0.7977604896723027, 0.13518583717574031, 0.0313493495815248,
+		0.2880711282292934, 0.7118432178101014, 0.00008565396060525902,
+		0.0, 0.0, 0.8251046025104601,
+	}
+	return multiplyMatrices(M, r, g, b)
+}
+
+func xyz_to_lin_prophoto(x F64, y F64, z F64) (F64, F64, F64) {
+	M := [9]float64{
+		1.3457989731028281, -0.25558010007997534, -0.05110628506753401,
+		-0.5446224939028347, 1.5082327413132781, 0.02053603239147973,
+		0.0, 0.0, 1.2119675456389454,
+	}
+	return multiplyMatrices(M, x, y, z)
+}
+
+func lin_a98rgb(r F64, g F64, b F64) (F64, F64, F64) {
+	f := func(val F64) F64 {
+		return val.Abs().PowConst(563.0 / 256).WithSignFrom(val)
+	}
+	return f(r), f(g), f(b)
+}
+
+func gam_a98rgb(r F64, g F64, b F64) (F64, F64, F64) {
+	f := func(val F64) F64 {
+		return val.Abs().PowConst(256.0 / 563).WithSignFrom(val)
+	}
+	return f(r), f(g), f(b)
+}
+
+func lin_a98rgb_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
+	M := [9]float64{
+		573536.0 / 994567, 263643.0 / 1420810, 187206.0 / 994567,
+		591459.0 / 1989134, 6239551.0 / 9945670, 374412.0 / 4972835,
+		53769.0 / 1989134, 351524.0 / 4972835, 4929758.0 / 4972835,
+	}
+	return multiplyMatrices(M, r, g, b)
+}
+
+func xyz_to_lin_a98rgb(x F64, y F64, z F64) (F64, F64, F64) {
+	M := [9]float64{
+		1829569.0 / 896150, -506331.0 / 896150, -308931.0 / 896150,
+		-851781.0 / 878810, 1648619.0 / 878810, 36519.0 / 878810,
+		16779.0 / 1248040, -147721.0 / 1248040, 1266979.0 / 1248040,
+	}
+	return multiplyMatrices(M, x, y, z)
+}
+
+func lin_2020(r F64, g F64, b F64) (F64, F64, F64) {
+	f := func(val F64) F64 {
+		const α = 1.09929682680944
+		const β = 0.018053968510807
+		if abs := val.Abs(); abs.Value() < β*4.5 {
+			return val.DivConst(4.5)
+		} else {
+			return abs.AddConst(α - 1).DivConst(α).PowConst(1 / 0.45).WithSignFrom(val)
+		}
+	}
+	return f(r), f(g), f(b)
+}
+
+func gam_2020(r F64, g F64, b F64) (F64, F64, F64) {
+	f := func(val F64) F64 {
+		const α = 1.09929682680944
+		const β = 0.018053968510807
+		if abs := val.Abs(); abs.Value() > β {
+			return abs.PowConst(0.45).MulConst(α).SubConst(α - 1).WithSignFrom(val)
+		} else {
+			return val.MulConst(4.5)
+		}
+	}
+	return f(r), f(g), f(b)
+}
+
+func lin_2020_to_xyz(r F64, g F64, b F64) (F64, F64, F64) {
+	var M = [9]float64{
+		63426534.0 / 99577255, 20160776.0 / 139408157, 47086771.0 / 278816314,
+		26158966.0 / 99577255, 472592308.0 / 697040785, 8267143.0 / 139408157,
+		0.0 / 1, 19567812.0 / 697040785, 295819943.0 / 278816314,
+	}
+	return multiplyMatrices(M, r, g, b)
+}
+
+func xyz_to_lin_2020(x F64, y F64, z F64) (F64, F64, F64) {
+	M := [9]float64{
+		30757411.0 / 17917100, -6372589.0 / 17917100, -4539589.0 / 17917100,
+		-19765991.0 / 29648200, 47925759.0 / 29648200, 467509.0 / 29648200,
+		792561.0 / 44930125, -1921689.0 / 44930125, 42328811.0 / 44930125,
+	}
+	return multiplyMatrices(M, x, y, z)
+}
+
+func d65_to_d50(x F64, y F64, z F64) (F64, F64, F64) {
+	M := [9]float64{
+		1.0479297925449969, 0.022946870601609652, -0.05019226628920524,
+		0.02962780877005599, 0.9904344267538799, -0.017073799063418826,
+		-0.009243040646204504, 0.015055191490298152, 0.7518742814281371,
+	}
+	return multiplyMatrices(M, x, y, z)
+}
+
+func d50_to_d65(x F64, y F64, z F64) (F64, F64, F64) {
+	M := [9]float64{
+		0.955473421488075, -0.02309845494876471, 0.06325924320057072,
+		-0.0283697093338637, 1.0099953980813041, 0.021041441191917323,
+		0.012314014864481998, -0.020507649298898964, 1.330365926242124,
+	}
+	return multiplyMatrices(M, x, y, z)
+}
+
+const d50_x = 0.3457 / 0.3585
+const d50_z = (1.0 - 0.3457 - 0.3585) / 0.3585
+
+func xyz_to_lab(x F64, y F64, z F64) (F64, F64, F64) {
+	const ε = 216.0 / 24389
+	const κ = 24389.0 / 27
+
+	x = x.DivConst(d50_x)
+	z = z.DivConst(d50_z)
+
+	var f0, f1, f2 F64
+	if x.Value() > ε {
+		f0 = x.Cbrt()
+	} else {
+		f0 = x.MulConst(κ).AddConst(16).DivConst(116)
+	}
+	if y.Value() > ε {
+		f1 = y.Cbrt()
+	} else {
+		f1 = y.MulConst(κ).AddConst(16).DivConst(116)
+	}
+	if z.Value() > ε {
+		f2 = z.Cbrt()
+	} else {
+		f2 = z.MulConst(κ).AddConst(16).DivConst(116)
+	}
+
+	return f1.MulConst(116).SubConst(16),
+		f0.Sub(f1).MulConst(500),
+		f1.Sub(f2).MulConst(200)
+}
+
+func lab_to_xyz(l F64, a F64, b F64) (x F64, y F64, z F64) {
+	const κ = 24389.0 / 27
+	const ε = 216.0 / 24389
+
+	f1 := l.AddConst(16).DivConst(116)
+	f0 := a.DivConst(500).Add(f1)
+	f2 := f1.Sub(b.DivConst(200))
+
+	f0_3 := f0.Cubed()
+	f2_3 := f2.Cubed()
+
+	if f0_3.Value() > ε {
+		x = f0_3
+	} else {
+		x = f0.MulConst(116).SubConst(16).DivConst(κ)
+	}
+	if l.Value() > κ*ε {
+		y = l.AddConst(16).DivConst(116)
+		y = y.Cubed()
+	} else {
+		y = l.DivConst(κ)
+	}
+	if f2_3.Value() > ε {
+		z = f2_3
+	} else {
+		z = f2.MulConst(116).SubConst(16).DivConst(κ)
+	}
+
+	return x.MulConst(d50_x), y, z.MulConst(d50_z)
+}
+
+func lab_to_lch(l F64, a F64, b F64) (F64, F64, F64) {
+	hue := b.Atan2(a).MulConst(180 / math.Pi)
+	if hue.Value() < 0 {
+		hue = hue.AddConst(360)
+	}
+	return l,
+		a.Squared().Add(b.Squared()).Sqrt(),
+		hue
+}
+
+func lch_to_lab(l F64, c F64, h F64) (F64, F64, F64) {
+	return l,
+		h.MulConst(math.Pi / 180).Cos().Mul(c),
+		h.MulConst(math.Pi / 180).Sin().Mul(c)
+}
+
+func xyz_to_oklab(x F64, y F64, z F64) (F64, F64, F64) {
+	XYZtoLMS := [9]float64{
+		0.8190224432164319, 0.3619062562801221, -0.12887378261216414,
+		0.0329836671980271, 0.9292868468965546, 0.03614466816999844,
+		0.048177199566046255, 0.26423952494422764, 0.6335478258136937,
+	}
+	LMStoOKLab := [9]float64{
+		0.2104542553, 0.7936177850, -0.0040720468,
+		1.9779984951, -2.4285922050, 0.4505937099,
+		0.0259040371, 0.7827717662, -0.8086757660,
+	}
+	l, m, s := multiplyMatrices(XYZtoLMS, x, y, z)
+	return multiplyMatrices(LMStoOKLab, l.Cbrt(), m.Cbrt(), s.Cbrt())
+}
+
+func oklab_to_xyz(l F64, a F64, b F64) (F64, F64, F64) {
+	LMStoXYZ := [9]float64{
+		1.2268798733741557, -0.5578149965554813, 0.28139105017721583,
+		-0.04057576262431372, 1.1122868293970594, -0.07171106666151701,
+		-0.07637294974672142, -0.4214933239627914, 1.5869240244272418,
+	}
+	OKLabtoLMS := [9]float64{
+		0.99999999845051981432, 0.39633779217376785678, 0.21580375806075880339,
+		1.0000000088817607767, -0.1055613423236563494, -0.063854174771705903402,
+		1.0000000546724109177, -0.089484182094965759684, -1.2914855378640917399,
+	}
+	l, m, s := multiplyMatrices(OKLabtoLMS, l, a, b)
+	return multiplyMatrices(LMStoXYZ, l.Cubed(), m.Cubed(), s.Cubed())
+}
+
+func oklab_to_oklch(l F64, a F64, b F64) (F64, F64, F64) {
+	return lab_to_lch(l, a, b)
+}
+
+func oklch_to_oklab(l F64, c F64, h F64) (F64, F64, F64) {
+	return lch_to_lab(l, c, h)
+}
+
+func multiplyMatrices(A [9]float64, b0 F64, b1 F64, b2 F64) (F64, F64, F64) {
+	return b0.MulConst(A[0]).Add(b1.MulConst(A[1])).Add(b2.MulConst(A[2])),
+		b0.MulConst(A[3]).Add(b1.MulConst(A[4])).Add(b2.MulConst(A[5])),
+		b0.MulConst(A[6]).Add(b1.MulConst(A[7])).Add(b2.MulConst(A[8]))
+}
+
+func delta_eok(L1 F64, a1 F64, b1 F64, L2 F64, a2 F64, b2 F64) F64 {
+	ΔL_sq := L1.Sub(L2).Squared()
+	Δa_sq := a1.Sub(a2).Squared()
+	Δb_sq := b1.Sub(b2).Squared()
+	return ΔL_sq.Add(Δa_sq).Add(Δb_sq).Sqrt()
+}
+
+func gamut_mapping_xyz_to_srgb(x F64, y F64, z F64) (F64, F64, F64) {
+	origin_l, origin_c, origin_h := oklab_to_oklch(xyz_to_oklab(x, y, z))
+
+	if origin_l.Value() >= 1 || origin_l.Value() <= 0 {
+		return origin_l, origin_l, origin_l
+	}
+
+	oklch_to_srgb := func(l F64, c F64, h F64) (F64, F64, F64) {
+		l, a, b := oklch_to_oklab(l, c, h)
+		x, y, z := oklab_to_xyz(l, a, b)
+		r, g, b := xyz_to_lin_srgb(x, y, z)
+		return gam_srgb(r, g, b)
+	}
+
+	srgb_to_oklab := func(r F64, g F64, b F64) (F64, F64, F64) {
+		r, g, b = lin_srgb(r, g, b)
+		x, y, z := lin_srgb_to_xyz(r, g, b)
+		return xyz_to_oklab(x, y, z)
+	}
+
+	inGamut := func(r F64, g F64, b F64) bool {
+		return r.Value() >= 0 && r.Value() <= 1 &&
+			g.Value() >= 0 && g.Value() <= 1 &&
+			b.Value() >= 0 && b.Value() <= 1
+	}
+
+	r, g, b := oklch_to_srgb(origin_l, origin_c, origin_h)
+	if inGamut(r, g, b) {
+		return r, g, b
+	}
+
+	const JND = 0.02
+	const epsilon = 0.0001
+	min := helpers.NewF64(0.0)
+	max := origin_c
+
+	clip := func(x F64) F64 {
+		if x.Value() < 0 {
+			return helpers.NewF64(0)
+		}
+		if x.Value() > 1 {
+			return helpers.NewF64(1)
+		}
+		return x
+	}
+
+	for max.Sub(min).Value() > epsilon {
+		chroma := min.Add(max).DivConst(2)
+		origin_c = chroma
+
+		r, g, b = oklch_to_srgb(origin_l, origin_c, origin_h)
+		if inGamut(r, g, b) {
+			min = chroma
+			continue
+		}
+
+		clipped_r, clipped_g, clipped_b := clip(r), clip(g), clip(b)
+		L1, a1, b1 := srgb_to_oklab(clipped_r, clipped_b, clipped_g)
+		L2, a2, b2 := srgb_to_oklab(r, g, b)
+		E := delta_eok(L1, a1, b1, L2, a2, b2)
+		if E.Value() < JND {
+			return clipped_r, clipped_g, clipped_b
+		}
+
+		max = chroma
+	}
+
+	return r, g, b
+}
+
+func hsl_to_rgb(hue F64, sat F64, light F64) (F64, F64, F64) {
+	hue = hue.DivConst(360)
+	hue = hue.Sub(hue.Floor())
+	hue = hue.MulConst(360)
+
+	sat = sat.DivConst(100)
+	light = light.DivConst(100)
+
+	f := func(n float64) F64 {
+		k := hue.DivConst(30).AddConst(n)
+		k = k.DivConst(12)
+		k = k.Sub(k.Floor())
+		k = k.MulConst(12)
+		a := helpers.Min2(light, light.Neg().AddConst(1)).Mul(sat)
+		return light.Sub(helpers.Max2(helpers.NewF64(-1), helpers.Min3(k.SubConst(3), k.Neg().AddConst(9), helpers.NewF64(1))).Mul(a))
+	}
+
+	return f(0), f(8), f(4)
+}
+
+func rgb_to_hsl(red F64, green F64, blue F64) (F64, F64, F64) {
+	max := helpers.Max3(red, green, blue)
+	min := helpers.Min3(red, green, blue)
+	hue, sat, light := helpers.NewF64(math.NaN()), helpers.NewF64(0.0), min.Add(max).DivConst(2)
+	d := max.Sub(min)
+
+	if d.Value() != 0 {
+		if div := helpers.Min2(light, light.Neg().AddConst(1)); div.Value() != 0 {
+			sat = max.Sub(light).Div(div)
+		}
+
+		switch max {
+		case red:
+			hue = green.Sub(blue).Div(d)
+			if green.Value() < blue.Value() {
+				hue = hue.AddConst(6)
+			}
+		case green:
+			hue = blue.Sub(red).Div(d).AddConst(2)
+		case blue:
+			hue = red.Sub(green).Div(d).AddConst(4)
+		}
+
+		hue = hue.MulConst(60)
+	}
+
+	return hue, sat.MulConst(100), light.MulConst(100)
+}
+
+func hwb_to_rgb(hue F64, white F64, black F64) (F64, F64, F64) {
+	white = white.DivConst(100)
+	black = black.DivConst(100)
+	if white.Add(black).Value() >= 1 {
+		gray := white.Div(white.Add(black))
+		return gray, gray, gray
+	}
+	delta := white.Add(black).Neg().AddConst(1)
+	r, g, b := hsl_to_rgb(hue, helpers.NewF64(100), helpers.NewF64(50))
+	r = delta.Mul(r).Add(white)
+	g = delta.Mul(g).Add(white)
+	b = delta.Mul(b).Add(white)
+	return r, g, b
+}
+
+func rgb_to_hwb(red F64, green F64, blue F64) (F64, F64, F64) {
+	h, _, _ := rgb_to_hsl(red, green, blue)
+	white := helpers.Min3(red, green, blue)
+	black := helpers.Max3(red, green, blue).Neg().AddConst(1)
+	return h, white.MulConst(100), black.MulConst(100)
+}
+
+func xyz_to_colorSpace(x F64, y F64, z F64, colorSpace colorSpace) (F64, F64, F64) {
+	switch colorSpace {
+	case colorSpace_a98_rgb:
+		return gam_a98rgb(xyz_to_lin_a98rgb(x, y, z))
+
+	case colorSpace_display_p3:
+		return gam_p3(xyz_to_lin_p3(x, y, z))
+
+	case colorSpace_hsl:
+		return rgb_to_hsl(gam_srgb(xyz_to_lin_srgb(x, y, z)))
+
+	case colorSpace_hwb:
+		return rgb_to_hwb(gam_srgb(xyz_to_lin_srgb(x, y, z)))
+
+	case colorSpace_lab:
+		return xyz_to_lab(d65_to_d50(x, y, z))
+
+	case colorSpace_lch:
+		return lab_to_lch(xyz_to_lab(d65_to_d50(x, y, z)))
+
+	case colorSpace_oklab:
+		return xyz_to_oklab(x, y, z)
+
+	case colorSpace_oklch:
+		return oklab_to_oklch(xyz_to_oklab(x, y, z))
+
+	case colorSpace_prophoto_rgb:
+		return gam_prophoto(xyz_to_lin_prophoto(d65_to_d50(x, y, z)))
+
+	case colorSpace_rec2020:
+		return gam_2020(xyz_to_lin_2020(x, y, z))
+
+	case colorSpace_srgb:
+		return gam_srgb(xyz_to_lin_srgb(x, y, z))
+
+	case colorSpace_srgb_linear:
+		return xyz_to_lin_srgb(x, y, z)
+
+	case colorSpace_xyz, colorSpace_xyz_d65:
+		return x, y, z
+
+	case colorSpace_xyz_d50:
+		return d65_to_d50(x, y, z)
+
+	default:
+		panic("Internal error")
+	}
+}
+
+func colorSpace_to_xyz(v0 F64, v1 F64, v2 F64, colorSpace colorSpace) (F64, F64, F64) {
+	switch colorSpace {
+	case colorSpace_a98_rgb:
+		return lin_a98rgb_to_xyz(lin_a98rgb(v0, v1, v2))
+
+	case colorSpace_display_p3:
+		return lin_p3_to_xyz(lin_p3(v0, v1, v2))
+
+	case colorSpace_hsl:
+		return lin_srgb_to_xyz(lin_srgb(hsl_to_rgb(v0, v1, v2)))
+
+	case colorSpace_hwb:
+		return lin_srgb_to_xyz(lin_srgb(hwb_to_rgb(v0, v1, v2)))
+
+	case colorSpace_lab:
+		return d50_to_d65(lab_to_xyz(v0, v1, v2))
+
+	case colorSpace_lch:
+		return d50_to_d65(lab_to_xyz(lch_to_lab(v0, v1, v2)))
+
+	case colorSpace_oklab:
+		return oklab_to_xyz(v0, v1, v2)
+
+	case colorSpace_oklch:
+		return oklab_to_xyz(oklch_to_oklab(v0, v1, v2))
+
+	case colorSpace_prophoto_rgb:
+		return d50_to_d65(lin_prophoto_to_xyz(lin_prophoto(v0, v1, v2)))
+
+	case colorSpace_rec2020:
+		return lin_2020_to_xyz(lin_2020(v0, v1, v2))
+
+	case colorSpace_srgb:
+		return lin_srgb_to_xyz(lin_srgb(v0, v1, v2))
+
+	case colorSpace_srgb_linear:
+		return lin_srgb_to_xyz(v0, v1, v2)
+
+	case colorSpace_xyz, colorSpace_xyz_d65:
+		return v0, v1, v2
+
+	case colorSpace_xyz_d50:
+		return d50_to_d65(v0, v1, v2)
+
+	default:
+		panic("Internal error")
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls.go
new file mode 100644
index 0000000..eaca876
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls.go
@@ -0,0 +1,538 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+func (p *parser) commaToken(loc logger.Loc) css_ast.Token {
+	t := css_ast.Token{
+		Loc:  loc,
+		Kind: css_lexer.TComma,
+		Text: ",",
+	}
+	if !p.options.minifyWhitespace {
+		t.Whitespace = css_ast.WhitespaceAfter
+	}
+	return t
+}
+
+func expandTokenQuad(tokens []css_ast.Token, allowedIdent string) (result [4]css_ast.Token, ok bool) {
+	n := len(tokens)
+	if n < 1 || n > 4 {
+		return
+	}
+
+	// Don't do this if we encounter any unexpected tokens such as "var()"
+	for i := 0; i < n; i++ {
+		if t := tokens[i]; !t.Kind.IsNumeric() && (t.Kind != css_lexer.TIdent || allowedIdent == "" || t.Text != allowedIdent) {
+			return
+		}
+	}
+
+	result[0] = tokens[0]
+	if n > 1 {
+		result[1] = tokens[1]
+	} else {
+		result[1] = result[0]
+	}
+	if n > 2 {
+		result[2] = tokens[2]
+	} else {
+		result[2] = result[0]
+	}
+	if n > 3 {
+		result[3] = tokens[3]
+	} else {
+		result[3] = result[1]
+	}
+
+	ok = true
+	return
+}
+
+func compactTokenQuad(a css_ast.Token, b css_ast.Token, c css_ast.Token, d css_ast.Token, minifyWhitespace bool) []css_ast.Token {
+	tokens := []css_ast.Token{a, b, c, d}
+	if tokens[3].EqualIgnoringWhitespace(tokens[1]) {
+		if tokens[2].EqualIgnoringWhitespace(tokens[0]) {
+			if tokens[1].EqualIgnoringWhitespace(tokens[0]) {
+				tokens = tokens[:1]
+			} else {
+				tokens = tokens[:2]
+			}
+		} else {
+			tokens = tokens[:3]
+		}
+	}
+	for i := range tokens {
+		var whitespace css_ast.WhitespaceFlags
+		if !minifyWhitespace || i > 0 {
+			whitespace |= css_ast.WhitespaceBefore
+		}
+		if i+1 < len(tokens) {
+			whitespace |= css_ast.WhitespaceAfter
+		}
+		tokens[i].Whitespace = whitespace
+	}
+	return tokens
+}
+
+func (p *parser) processDeclarations(rules []css_ast.Rule, composesContext *composesContext) (rewrittenRules []css_ast.Rule) {
+	margin := boxTracker{key: css_ast.DMargin, keyText: "margin", allowAuto: true}
+	padding := boxTracker{key: css_ast.DPadding, keyText: "padding", allowAuto: false}
+	inset := boxTracker{key: css_ast.DInset, keyText: "inset", allowAuto: true}
+	borderRadius := borderRadiusTracker{}
+	rewrittenRules = make([]css_ast.Rule, 0, len(rules))
+	didWarnAboutComposes := false
+	wouldClipColorFlag := false
+	var declarationKeys map[string]struct{}
+
+	// Don't automatically generate the "inset" property if it's not supported
+	if p.options.unsupportedCSSFeatures.Has(compat.InsetProperty) {
+		inset.key = css_ast.DUnknown
+		inset.keyText = ""
+	}
+
+	// If this is a local class selector, track which CSS properties it declares.
+	// This is used to warn when CSS "composes" is used incorrectly.
+	if composesContext != nil {
+		for _, ref := range composesContext.parentRefs {
+			composes, ok := p.composes[ref]
+			if !ok {
+				composes = &css_ast.Composes{}
+				p.composes[ref] = composes
+			}
+			properties := composes.Properties
+			if properties == nil {
+				properties = make(map[string]logger.Loc)
+				composes.Properties = properties
+			}
+			for _, rule := range rules {
+				if decl, ok := rule.Data.(*css_ast.RDeclaration); ok && decl.Key != css_ast.DComposes {
+					properties[decl.KeyText] = decl.KeyRange.Loc
+				}
+			}
+		}
+	}
+
+	for i := 0; i < len(rules); i++ {
+		rule := rules[i]
+		rewrittenRules = append(rewrittenRules, rule)
+		decl, ok := rule.Data.(*css_ast.RDeclaration)
+		if !ok {
+			continue
+		}
+
+		// If the previous loop iteration would have clipped a color, we will
+		// duplicate it and insert the clipped copy before the unclipped copy
+		var wouldClipColor *bool
+		if wouldClipColorFlag {
+			wouldClipColorFlag = false
+			clone := *decl
+			clone.Value = css_ast.CloneTokensWithoutImportRecords(clone.Value)
+			decl = &clone
+			rule.Data = decl
+			n := len(rewrittenRules) - 2
+			rewrittenRules = append(rewrittenRules[:n], rule, rewrittenRules[n])
+		} else {
+			wouldClipColor = &wouldClipColorFlag
+		}
+
+		switch decl.Key {
+		case css_ast.DComposes:
+			// Only process "composes" directives if we're in "local-css" or
+			// "global-css" mode. In these cases, "composes" directives will always
+			// be removed (because they are being processed) even if they contain
+			// errors. Otherwise we leave "composes" directives there untouched and
+			// don't check them for errors.
+			if p.options.symbolMode != symbolModeDisabled {
+				if composesContext == nil {
+					if !didWarnAboutComposes {
+						didWarnAboutComposes = true
+						p.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, decl.KeyRange, "\"composes\" is not valid here")
+					}
+				} else if composesContext.problemRange.Len > 0 {
+					if !didWarnAboutComposes {
+						didWarnAboutComposes = true
+						p.log.AddIDWithNotes(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, decl.KeyRange, "\"composes\" only works inside single class selectors",
+							[]logger.MsgData{p.tracker.MsgData(composesContext.problemRange, "The parent selector is not a single class selector because of the syntax here:")})
+					}
+				} else {
+					p.handleComposesPragma(*composesContext, decl.Value)
+				}
+				rewrittenRules = rewrittenRules[:len(rewrittenRules)-1]
+			}
+
+		case css_ast.DBackground:
+			for i, t := range decl.Value {
+				t = p.lowerAndMinifyColor(t, wouldClipColor)
+				t = p.lowerAndMinifyGradient(t, wouldClipColor)
+				decl.Value[i] = t
+			}
+
+		case css_ast.DBackgroundImage,
+			css_ast.DBorderImage,
+			css_ast.DMaskImage:
+
+			for i, t := range decl.Value {
+				t = p.lowerAndMinifyGradient(t, wouldClipColor)
+				decl.Value[i] = t
+			}
+
+		case css_ast.DBackgroundColor,
+			css_ast.DBorderBlockEndColor,
+			css_ast.DBorderBlockStartColor,
+			css_ast.DBorderBottomColor,
+			css_ast.DBorderColor,
+			css_ast.DBorderInlineEndColor,
+			css_ast.DBorderInlineStartColor,
+			css_ast.DBorderLeftColor,
+			css_ast.DBorderRightColor,
+			css_ast.DBorderTopColor,
+			css_ast.DCaretColor,
+			css_ast.DColor,
+			css_ast.DColumnRuleColor,
+			css_ast.DFill,
+			css_ast.DFloodColor,
+			css_ast.DLightingColor,
+			css_ast.DOutlineColor,
+			css_ast.DStopColor,
+			css_ast.DStroke,
+			css_ast.DTextDecorationColor,
+			css_ast.DTextEmphasisColor:
+
+			if len(decl.Value) == 1 {
+				decl.Value[0] = p.lowerAndMinifyColor(decl.Value[0], wouldClipColor)
+			}
+
+		case css_ast.DTransform:
+			if p.options.minifySyntax {
+				decl.Value = p.mangleTransforms(decl.Value)
+			}
+
+		case css_ast.DBoxShadow:
+			decl.Value = p.lowerAndMangleBoxShadows(decl.Value, wouldClipColor)
+
+		// Container name
+		case css_ast.DContainer:
+			p.processContainerShorthand(decl.Value)
+		case css_ast.DContainerName:
+			p.processContainerName(decl.Value)
+
+			// Animation name
+		case css_ast.DAnimation:
+			p.processAnimationShorthand(decl.Value)
+		case css_ast.DAnimationName:
+			p.processAnimationName(decl.Value)
+
+		// List style
+		case css_ast.DListStyle:
+			p.processListStyleShorthand(decl.Value)
+		case css_ast.DListStyleType:
+			if len(decl.Value) == 1 {
+				p.processListStyleType(&decl.Value[0])
+			}
+
+			// Font
+		case css_ast.DFont:
+			if p.options.minifySyntax {
+				decl.Value = p.mangleFont(decl.Value)
+			}
+		case css_ast.DFontFamily:
+			if p.options.minifySyntax {
+				if value, ok := p.mangleFontFamily(decl.Value); ok {
+					decl.Value = value
+				}
+			}
+		case css_ast.DFontWeight:
+			if len(decl.Value) == 1 && p.options.minifySyntax {
+				decl.Value[0] = p.mangleFontWeight(decl.Value[0])
+			}
+
+			// Margin
+		case css_ast.DMargin:
+			if p.options.minifySyntax {
+				margin.mangleSides(rewrittenRules, decl, p.options.minifyWhitespace)
+			}
+		case css_ast.DMarginTop:
+			if p.options.minifySyntax {
+				margin.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxTop)
+			}
+		case css_ast.DMarginRight:
+			if p.options.minifySyntax {
+				margin.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxRight)
+			}
+		case css_ast.DMarginBottom:
+			if p.options.minifySyntax {
+				margin.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxBottom)
+			}
+		case css_ast.DMarginLeft:
+			if p.options.minifySyntax {
+				margin.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxLeft)
+			}
+
+		// Padding
+		case css_ast.DPadding:
+			if p.options.minifySyntax {
+				padding.mangleSides(rewrittenRules, decl, p.options.minifyWhitespace)
+			}
+		case css_ast.DPaddingTop:
+			if p.options.minifySyntax {
+				padding.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxTop)
+			}
+		case css_ast.DPaddingRight:
+			if p.options.minifySyntax {
+				padding.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxRight)
+			}
+		case css_ast.DPaddingBottom:
+			if p.options.minifySyntax {
+				padding.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxBottom)
+			}
+		case css_ast.DPaddingLeft:
+			if p.options.minifySyntax {
+				padding.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxLeft)
+			}
+
+		// Inset
+		case css_ast.DInset:
+			if p.options.unsupportedCSSFeatures.Has(compat.InsetProperty) {
+				if decls, ok := p.lowerInset(rule.Loc, decl); ok {
+					rewrittenRules = rewrittenRules[:len(rewrittenRules)-1]
+					for i := range decls {
+						rewrittenRules = append(rewrittenRules, decls[i])
+						if p.options.minifySyntax {
+							inset.mangleSide(rewrittenRules, decls[i].Data.(*css_ast.RDeclaration), p.options.minifyWhitespace, i)
+						}
+					}
+					break
+				}
+			}
+			if p.options.minifySyntax {
+				inset.mangleSides(rewrittenRules, decl, p.options.minifyWhitespace)
+			}
+		case css_ast.DTop:
+			if p.options.minifySyntax {
+				inset.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxTop)
+			}
+		case css_ast.DRight:
+			if p.options.minifySyntax {
+				inset.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxRight)
+			}
+		case css_ast.DBottom:
+			if p.options.minifySyntax {
+				inset.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxBottom)
+			}
+		case css_ast.DLeft:
+			if p.options.minifySyntax {
+				inset.mangleSide(rewrittenRules, decl, p.options.minifyWhitespace, boxLeft)
+			}
+
+		// Border radius
+		case css_ast.DBorderRadius:
+			if p.options.minifySyntax {
+				borderRadius.mangleCorners(rewrittenRules, decl, p.options.minifyWhitespace)
+			}
+		case css_ast.DBorderTopLeftRadius:
+			if p.options.minifySyntax {
+				borderRadius.mangleCorner(rewrittenRules, decl, p.options.minifyWhitespace, borderRadiusTopLeft)
+			}
+		case css_ast.DBorderTopRightRadius:
+			if p.options.minifySyntax {
+				borderRadius.mangleCorner(rewrittenRules, decl, p.options.minifyWhitespace, borderRadiusTopRight)
+			}
+		case css_ast.DBorderBottomRightRadius:
+			if p.options.minifySyntax {
+				borderRadius.mangleCorner(rewrittenRules, decl, p.options.minifyWhitespace, borderRadiusBottomRight)
+			}
+		case css_ast.DBorderBottomLeftRadius:
+			if p.options.minifySyntax {
+				borderRadius.mangleCorner(rewrittenRules, decl, p.options.minifyWhitespace, borderRadiusBottomLeft)
+			}
+		}
+
+		if prefixes, ok := p.options.cssPrefixData[decl.Key]; ok {
+			if declarationKeys == nil {
+				// Only generate this map if it's needed
+				declarationKeys = make(map[string]struct{})
+				for _, rule := range rules {
+					if decl, ok := rule.Data.(*css_ast.RDeclaration); ok {
+						declarationKeys[decl.KeyText] = struct{}{}
+					}
+				}
+			}
+			if (prefixes & compat.WebkitPrefix) != 0 {
+				rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-webkit-", rule.Loc, decl, declarationKeys)
+			}
+			if (prefixes & compat.KhtmlPrefix) != 0 {
+				rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-khtml-", rule.Loc, decl, declarationKeys)
+			}
+			if (prefixes & compat.MozPrefix) != 0 {
+				rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-moz-", rule.Loc, decl, declarationKeys)
+			}
+			if (prefixes & compat.MsPrefix) != 0 {
+				rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-ms-", rule.Loc, decl, declarationKeys)
+			}
+			if (prefixes & compat.OPrefix) != 0 {
+				rewrittenRules = p.insertPrefixedDeclaration(rewrittenRules, "-o-", rule.Loc, decl, declarationKeys)
+			}
+		}
+
+		// If this loop iteration would have clipped a color, the out-of-gamut
+		// colors will not be clipped and this flag will be set. We then set up the
+		// next iteration of the loop to duplicate this rule and process it again
+		// with color clipping enabled.
+		if wouldClipColorFlag {
+			if p.options.unsupportedCSSFeatures.Has(compat.ColorFunctions) {
+				// Only do this if there was no previous instance of that property so
+				// we avoid overwriting any manually-specified fallback values
+				for j := len(rewrittenRules) - 2; j >= 0; j-- {
+					if prev, ok := rewrittenRules[j].Data.(*css_ast.RDeclaration); ok && prev.Key == decl.Key {
+						wouldClipColorFlag = false
+						break
+					}
+				}
+				if wouldClipColorFlag {
+					// If the code above would have clipped a color outside of the sRGB gamut,
+					// process this rule again so we can generate the clipped version next time
+					i -= 1
+					continue
+				}
+			}
+			wouldClipColorFlag = false
+		}
+	}
+
+	// Compact removed rules
+	if p.options.minifySyntax {
+		end := 0
+		for _, rule := range rewrittenRules {
+			if rule.Data != nil {
+				rewrittenRules[end] = rule
+				end++
+			}
+		}
+		rewrittenRules = rewrittenRules[:end]
+	}
+
+	return
+}
+
+func (p *parser) insertPrefixedDeclaration(rules []css_ast.Rule, prefix string, loc logger.Loc, decl *css_ast.RDeclaration, declarationKeys map[string]struct{}) []css_ast.Rule {
+	keyText := prefix + decl.KeyText
+
+	// Don't insert a prefixed declaration if there already is one
+	if _, ok := declarationKeys[keyText]; ok {
+		// We found a previous declaration with a matching prefixed property.
+		// The value is ignored, which matches the behavior of "autoprefixer".
+		return rules
+	}
+
+	// Additional special cases for when the prefix applies
+	switch decl.Key {
+	case css_ast.DBackgroundClip:
+		// The prefix is only needed for "background-clip: text"
+		if len(decl.Value) != 1 || decl.Value[0].Kind != css_lexer.TIdent || !strings.EqualFold(decl.Value[0].Text, "text") {
+			return rules
+		}
+
+	case css_ast.DPosition:
+		// The prefix is only needed for "position: sticky"
+		if len(decl.Value) != 1 || decl.Value[0].Kind != css_lexer.TIdent || !strings.EqualFold(decl.Value[0].Text, "sticky") {
+			return rules
+		}
+	}
+
+	value := css_ast.CloneTokensWithoutImportRecords(decl.Value)
+
+	// Additional special cases for how to transform the contents
+	switch decl.Key {
+	case css_ast.DPosition:
+		// The prefix applies to the value, not the property
+		keyText = decl.KeyText
+		value[0].Text = "-webkit-sticky"
+
+	case css_ast.DUserSelect:
+		// The prefix applies to the value as well as the property
+		if prefix == "-moz-" && len(value) == 1 && value[0].Kind == css_lexer.TIdent && strings.EqualFold(value[0].Text, "none") {
+			value[0].Text = "-moz-none"
+		}
+
+	case css_ast.DMaskComposite:
+		// WebKit uses different names for these values
+		if prefix == "-webkit-" {
+			for i, token := range value {
+				if token.Kind == css_lexer.TIdent {
+					switch token.Text {
+					case "add":
+						value[i].Text = "source-over"
+					case "subtract":
+						value[i].Text = "source-out"
+					case "intersect":
+						value[i].Text = "source-in"
+					case "exclude":
+						value[i].Text = "xor"
+					}
+				}
+			}
+		}
+	}
+
+	// Overwrite the latest declaration with the prefixed declaration
+	rules[len(rules)-1] = css_ast.Rule{Loc: loc, Data: &css_ast.RDeclaration{
+		KeyText:   keyText,
+		KeyRange:  decl.KeyRange,
+		Value:     value,
+		Important: decl.Important,
+	}}
+
+	// Re-add the latest declaration after the inserted declaration
+	rules = append(rules, css_ast.Rule{Loc: loc, Data: decl})
+	return rules
+}
+
+func (p *parser) lowerInset(loc logger.Loc, decl *css_ast.RDeclaration) ([]css_ast.Rule, bool) {
+	if tokens, ok := expandTokenQuad(decl.Value, ""); ok {
+		mask := ^css_ast.WhitespaceAfter
+		if p.options.minifyWhitespace {
+			mask = 0
+		}
+		for i := range tokens {
+			tokens[i].Whitespace &= mask
+		}
+		return []css_ast.Rule{
+			{Loc: loc, Data: &css_ast.RDeclaration{
+				KeyText:   "top",
+				KeyRange:  decl.KeyRange,
+				Key:       css_ast.DTop,
+				Value:     tokens[0:1],
+				Important: decl.Important,
+			}},
+			{Loc: loc, Data: &css_ast.RDeclaration{
+				KeyText:   "right",
+				KeyRange:  decl.KeyRange,
+				Key:       css_ast.DRight,
+				Value:     tokens[1:2],
+				Important: decl.Important,
+			}},
+			{Loc: loc, Data: &css_ast.RDeclaration{
+				KeyText:   "bottom",
+				KeyRange:  decl.KeyRange,
+				Key:       css_ast.DBottom,
+				Value:     tokens[2:3],
+				Important: decl.Important,
+			}},
+			{Loc: loc, Data: &css_ast.RDeclaration{
+				KeyText:   "left",
+				KeyRange:  decl.KeyRange,
+				Key:       css_ast.DLeft,
+				Value:     tokens[3:4],
+				Important: decl.Important,
+			}},
+		}, true
+	}
+	return nil, false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_animation.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_animation.go
new file mode 100644
index 0000000..e4368d5
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_animation.go
@@ -0,0 +1,119 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+)
+
+// Scan for animation names in the "animation" shorthand property
+func (p *parser) processAnimationShorthand(tokens []css_ast.Token) {
+	type foundFlags struct {
+		timingFunction bool
+		iterationCount bool
+		direction      bool
+		fillMode       bool
+		playState      bool
+		name           bool
+	}
+
+	found := foundFlags{}
+
+	for i, t := range tokens {
+		switch t.Kind {
+		case css_lexer.TComma:
+			// Reset the flags when we encounter a comma
+			found = foundFlags{}
+
+		case css_lexer.TNumber:
+			if !found.iterationCount {
+				found.iterationCount = true
+				continue
+			}
+
+		case css_lexer.TIdent:
+			if !found.timingFunction {
+				switch strings.ToLower(t.Text) {
+				case "linear", "ease", "ease-in", "ease-out", "ease-in-out", "step-start", "step-end":
+					found.timingFunction = true
+					continue
+				}
+			}
+
+			if !found.iterationCount && strings.ToLower(t.Text) == "infinite" {
+				found.iterationCount = true
+				continue
+			}
+
+			if !found.direction {
+				switch strings.ToLower(t.Text) {
+				case "normal", "reverse", "alternate", "alternate-reverse":
+					found.direction = true
+					continue
+				}
+			}
+
+			if !found.fillMode {
+				switch strings.ToLower(t.Text) {
+				case "none", "forwards", "backwards", "both":
+					found.fillMode = true
+					continue
+				}
+			}
+
+			if !found.playState {
+				switch strings.ToLower(t.Text) {
+				case "running", "paused":
+					found.playState = true
+					continue
+				}
+			}
+
+			if !found.name {
+				p.handleSingleAnimationName(&tokens[i])
+				found.name = true
+				continue
+			}
+
+		case css_lexer.TString:
+			if !found.name {
+				p.handleSingleAnimationName(&tokens[i])
+				found.name = true
+				continue
+			}
+		}
+	}
+}
+
+func (p *parser) processAnimationName(tokens []css_ast.Token) {
+	for i, t := range tokens {
+		if t.Kind == css_lexer.TIdent || t.Kind == css_lexer.TString {
+			p.handleSingleAnimationName(&tokens[i])
+		}
+	}
+}
+
+func (p *parser) handleSingleAnimationName(token *css_ast.Token) {
+	// Do not transform CSS keywords into symbols because they have special
+	// meaning in declarations. For example, "animation-name: none" clears
+	// the animation name. It does not set it to the animation named "none".
+	// You need to use "animation-name: 'none'" to do that.
+	//
+	// Also don't transform strings containing CSS keywords into global symbols
+	// because global symbols are passed through without being renamed, which
+	// will print them as keywords. However, we still want to unconditionally
+	// transform strings into local symbols because local symbols are always
+	// renamed, so they will never be printed as keywords.
+	if (token.Kind == css_lexer.TIdent || (token.Kind == css_lexer.TString && !p.makeLocalSymbols)) && isInvalidAnimationName(token.Text) {
+		return
+	}
+
+	token.Kind = css_lexer.TSymbol
+	token.PayloadIndex = p.symbolForName(token.Loc, token.Text).Ref.InnerIndex
+}
+
+func isInvalidAnimationName(text string) bool {
+	lower := strings.ToLower(text)
+	return lower == "none" || cssWideAndReservedKeywords[lower]
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_border_radius.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_border_radius.go
new file mode 100644
index 0000000..cddc66e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_border_radius.go
@@ -0,0 +1,217 @@
+package css_parser
+
+import (
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+const (
+	borderRadiusTopLeft = iota
+	borderRadiusTopRight
+	borderRadiusBottomRight
+	borderRadiusBottomLeft
+)
+
+type borderRadiusCorner struct {
+	firstToken    css_ast.Token
+	secondToken   css_ast.Token
+	unitSafety    unitSafetyTracker
+	ruleIndex     uint32 // The index of the originating rule in the rules array
+	wasSingleRule bool   // True if the originating rule was just for this side
+}
+
+type borderRadiusTracker struct {
+	corners   [4]borderRadiusCorner
+	important bool // True if all active rules were flagged as "!important"
+}
+
+func (borderRadius *borderRadiusTracker) updateCorner(rules []css_ast.Rule, corner int, new borderRadiusCorner) {
+	if old := borderRadius.corners[corner]; old.firstToken.Kind != css_lexer.TEndOfFile &&
+		(!new.wasSingleRule || old.wasSingleRule) &&
+		old.unitSafety.status == unitSafe && new.unitSafety.status == unitSafe {
+		rules[old.ruleIndex] = css_ast.Rule{}
+	}
+	borderRadius.corners[corner] = new
+}
+
+func (borderRadius *borderRadiusTracker) mangleCorners(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool) {
+	// Reset if we see a change in the "!important" flag
+	if borderRadius.important != decl.Important {
+		borderRadius.corners = [4]borderRadiusCorner{}
+		borderRadius.important = decl.Important
+	}
+
+	tokens := decl.Value
+	beforeSplit := len(tokens)
+	afterSplit := len(tokens)
+
+	// Search for the single slash if present
+	for i, t := range tokens {
+		if t.Kind == css_lexer.TDelimSlash {
+			if beforeSplit == len(tokens) {
+				beforeSplit = i
+				afterSplit = i + 1
+			} else {
+				// Multiple slashes are an error
+				borderRadius.corners = [4]borderRadiusCorner{}
+				return
+			}
+		}
+	}
+
+	// Use a single tracker for the whole rule
+	unitSafety := unitSafetyTracker{}
+	for _, t := range tokens[:beforeSplit] {
+		unitSafety.includeUnitOf(t)
+	}
+	for _, t := range tokens[afterSplit:] {
+		unitSafety.includeUnitOf(t)
+	}
+
+	firstRadii, firstRadiiOk := expandTokenQuad(tokens[:beforeSplit], "")
+	lastRadii, lastRadiiOk := expandTokenQuad(tokens[afterSplit:], "")
+
+	// Stop now if the pattern wasn't matched
+	if !firstRadiiOk || (beforeSplit < afterSplit && !lastRadiiOk) {
+		borderRadius.corners = [4]borderRadiusCorner{}
+		return
+	}
+
+	// Handle the first radii
+	for corner, t := range firstRadii {
+		if unitSafety.status == unitSafe {
+			t.TurnLengthIntoNumberIfZero()
+		}
+		borderRadius.updateCorner(rules, corner, borderRadiusCorner{
+			firstToken:  t,
+			secondToken: t,
+			unitSafety:  unitSafety,
+			ruleIndex:   uint32(len(rules) - 1),
+		})
+	}
+
+	// Handle the last radii
+	if lastRadiiOk {
+		for corner, t := range lastRadii {
+			if unitSafety.status == unitSafe {
+				t.TurnLengthIntoNumberIfZero()
+			}
+			borderRadius.corners[corner].secondToken = t
+		}
+	}
+
+	// Success
+	borderRadius.compactRules(rules, decl.KeyRange, minifyWhitespace)
+}
+
+func (borderRadius *borderRadiusTracker) mangleCorner(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool, corner int) {
+	// Reset if we see a change in the "!important" flag
+	if borderRadius.important != decl.Important {
+		borderRadius.corners = [4]borderRadiusCorner{}
+		borderRadius.important = decl.Important
+	}
+
+	if tokens := decl.Value; (len(tokens) == 1 && tokens[0].Kind.IsNumeric()) ||
+		(len(tokens) == 2 && tokens[0].Kind.IsNumeric() && tokens[1].Kind.IsNumeric()) {
+		firstToken := tokens[0]
+		secondToken := firstToken
+		if len(tokens) == 2 {
+			secondToken = tokens[1]
+		}
+
+		// Check to see if these units are safe to use in every browser
+		unitSafety := unitSafetyTracker{}
+		unitSafety.includeUnitOf(firstToken)
+		unitSafety.includeUnitOf(secondToken)
+
+		// Only collapse "0unit" into "0" if the unit is safe
+		if unitSafety.status == unitSafe && firstToken.TurnLengthIntoNumberIfZero() {
+			tokens[0] = firstToken
+		}
+		if len(tokens) == 2 {
+			if unitSafety.status == unitSafe && secondToken.TurnLengthIntoNumberIfZero() {
+				tokens[1] = secondToken
+			}
+
+			// If both tokens are equal, merge them into one
+			if firstToken.EqualIgnoringWhitespace(secondToken) {
+				tokens[0].Whitespace &= ^css_ast.WhitespaceAfter
+				decl.Value = tokens[:1]
+			}
+		}
+
+		borderRadius.updateCorner(rules, corner, borderRadiusCorner{
+			firstToken:    firstToken,
+			secondToken:   secondToken,
+			unitSafety:    unitSafety,
+			ruleIndex:     uint32(len(rules) - 1),
+			wasSingleRule: true,
+		})
+		borderRadius.compactRules(rules, decl.KeyRange, minifyWhitespace)
+	} else {
+		borderRadius.corners = [4]borderRadiusCorner{}
+	}
+}
+
+func (borderRadius *borderRadiusTracker) compactRules(rules []css_ast.Rule, keyRange logger.Range, minifyWhitespace bool) {
+	// All tokens must be present
+	if eof := css_lexer.TEndOfFile; borderRadius.corners[0].firstToken.Kind == eof || borderRadius.corners[1].firstToken.Kind == eof ||
+		borderRadius.corners[2].firstToken.Kind == eof || borderRadius.corners[3].firstToken.Kind == eof {
+		return
+	}
+
+	// All tokens must have the same unit
+	for _, side := range borderRadius.corners[1:] {
+		if !side.unitSafety.isSafeWith(borderRadius.corners[0].unitSafety) {
+			return
+		}
+	}
+
+	// Generate the most minimal representation
+	tokens := compactTokenQuad(
+		borderRadius.corners[0].firstToken,
+		borderRadius.corners[1].firstToken,
+		borderRadius.corners[2].firstToken,
+		borderRadius.corners[3].firstToken,
+		minifyWhitespace,
+	)
+	secondTokens := compactTokenQuad(
+		borderRadius.corners[0].secondToken,
+		borderRadius.corners[1].secondToken,
+		borderRadius.corners[2].secondToken,
+		borderRadius.corners[3].secondToken,
+		minifyWhitespace,
+	)
+	if !css_ast.TokensEqualIgnoringWhitespace(tokens, secondTokens) {
+		var whitespace css_ast.WhitespaceFlags
+		if !minifyWhitespace {
+			whitespace = css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
+		}
+		tokens = append(tokens, css_ast.Token{
+			Loc:        tokens[len(tokens)-1].Loc,
+			Kind:       css_lexer.TDelimSlash,
+			Text:       "/",
+			Whitespace: whitespace,
+		})
+		tokens = append(tokens, secondTokens...)
+	}
+
+	// Remove all of the existing declarations
+	var minLoc logger.Loc
+	for i, corner := range borderRadius.corners {
+		if loc := rules[corner.ruleIndex].Loc; i == 0 || loc.Start < minLoc.Start {
+			minLoc = loc
+		}
+		rules[corner.ruleIndex] = css_ast.Rule{}
+	}
+
+	// Insert the combined declaration where the last rule was
+	rules[borderRadius.corners[3].ruleIndex] = css_ast.Rule{Loc: minLoc, Data: &css_ast.RDeclaration{
+		Key:       css_ast.DBorderRadius,
+		KeyText:   "border-radius",
+		Value:     tokens,
+		KeyRange:  keyRange,
+		Important: borderRadius.important,
+	}}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_box.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_box.go
new file mode 100644
index 0000000..9f7d7ec
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_box.go
@@ -0,0 +1,206 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+const (
+	boxTop = iota
+	boxRight
+	boxBottom
+	boxLeft
+)
+
+type boxSide struct {
+	token         css_ast.Token
+	unitSafety    unitSafetyTracker
+	ruleIndex     uint32 // The index of the originating rule in the rules array
+	wasSingleRule bool   // True if the originating rule was just for this side
+}
+
+type boxTracker struct {
+	keyText   string
+	sides     [4]boxSide
+	allowAuto bool // If true, allow the "auto" keyword
+	important bool // True if all active rules were flagged as "!important"
+	key       css_ast.D
+}
+
+type unitSafetyStatus uint8
+
+const (
+	unitSafe         unitSafetyStatus = iota // "margin: 0 1px 2cm 3%;"
+	unitUnsafeSingle                         // "margin: 0 1vw 2vw 3vw;"
+	unitUnsafeMixed                          // "margin: 0 1vw 2vh 3ch;"
+)
+
+// We can only compact rules together if they have the same unit safety level.
+// We want to avoid a situation where the browser treats some of the original
+// rules as valid and others as invalid.
+//
+//	Safe:
+//	  top: 1px; left: 0; bottom: 1px; right: 0;
+//	  top: 1Q; left: 2Q; bottom: 3Q; right: 4Q;
+//
+//	Unsafe:
+//	  top: 1vh; left: 2vw; bottom: 3vh; right: 4vw;
+//	  top: 1Q; left: 2Q; bottom: 3Q; right: 0;
+//	  inset: 1Q 0 0 0; top: 0;
+type unitSafetyTracker struct {
+	unit   string
+	status unitSafetyStatus
+}
+
+func (a unitSafetyTracker) isSafeWith(b unitSafetyTracker) bool {
+	return a.status == b.status && a.status != unitUnsafeMixed && (a.status != unitUnsafeSingle || a.unit == b.unit)
+}
+
+func (t *unitSafetyTracker) includeUnitOf(token css_ast.Token) {
+	switch token.Kind {
+	case css_lexer.TNumber:
+		if token.Text == "0" {
+			return
+		}
+
+	case css_lexer.TPercentage:
+		return
+
+	case css_lexer.TDimension:
+		if token.DimensionUnitIsSafeLength() {
+			return
+		} else if unit := token.DimensionUnit(); t.status == unitSafe {
+			t.status = unitUnsafeSingle
+			t.unit = unit
+			return
+		} else if t.status == unitUnsafeSingle && t.unit == unit {
+			return
+		}
+	}
+
+	t.status = unitUnsafeMixed
+}
+
+func (box *boxTracker) updateSide(rules []css_ast.Rule, side int, new boxSide) {
+	if old := box.sides[side]; old.token.Kind != css_lexer.TEndOfFile &&
+		(!new.wasSingleRule || old.wasSingleRule) &&
+		old.unitSafety.status == unitSafe && new.unitSafety.status == unitSafe {
+		rules[old.ruleIndex] = css_ast.Rule{}
+	}
+	box.sides[side] = new
+}
+
+func (box *boxTracker) mangleSides(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool) {
+	// Reset if we see a change in the "!important" flag
+	if box.important != decl.Important {
+		box.sides = [4]boxSide{}
+		box.important = decl.Important
+	}
+
+	allowedIdent := ""
+	if box.allowAuto {
+		allowedIdent = "auto"
+	}
+	if quad, ok := expandTokenQuad(decl.Value, allowedIdent); ok {
+		// Use a single tracker for the whole rule
+		unitSafety := unitSafetyTracker{}
+		for _, t := range quad {
+			if !box.allowAuto || t.Kind.IsNumeric() {
+				unitSafety.includeUnitOf(t)
+			}
+		}
+		for side, t := range quad {
+			if unitSafety.status == unitSafe {
+				t.TurnLengthIntoNumberIfZero()
+			}
+			box.updateSide(rules, side, boxSide{
+				token:      t,
+				ruleIndex:  uint32(len(rules) - 1),
+				unitSafety: unitSafety,
+			})
+		}
+		box.compactRules(rules, decl.KeyRange, minifyWhitespace)
+	} else {
+		box.sides = [4]boxSide{}
+	}
+}
+
+func (box *boxTracker) mangleSide(rules []css_ast.Rule, decl *css_ast.RDeclaration, minifyWhitespace bool, side int) {
+	// Reset if we see a change in the "!important" flag
+	if box.important != decl.Important {
+		box.sides = [4]boxSide{}
+		box.important = decl.Important
+	}
+
+	if tokens := decl.Value; len(tokens) == 1 {
+		if t := tokens[0]; t.Kind.IsNumeric() || (t.Kind == css_lexer.TIdent && box.allowAuto && strings.EqualFold(t.Text, "auto")) {
+			unitSafety := unitSafetyTracker{}
+			if !box.allowAuto || t.Kind.IsNumeric() {
+				unitSafety.includeUnitOf(t)
+			}
+			if unitSafety.status == unitSafe && t.TurnLengthIntoNumberIfZero() {
+				tokens[0] = t
+			}
+			box.updateSide(rules, side, boxSide{
+				token:         t,
+				ruleIndex:     uint32(len(rules) - 1),
+				wasSingleRule: true,
+				unitSafety:    unitSafety,
+			})
+			box.compactRules(rules, decl.KeyRange, minifyWhitespace)
+			return
+		}
+	}
+
+	box.sides = [4]boxSide{}
+}
+
+func (box *boxTracker) compactRules(rules []css_ast.Rule, keyRange logger.Range, minifyWhitespace bool) {
+	// Don't compact if the shorthand form is unsupported
+	if box.key == css_ast.DUnknown {
+		return
+	}
+
+	// All tokens must be present
+	if eof := css_lexer.TEndOfFile; box.sides[0].token.Kind == eof || box.sides[1].token.Kind == eof ||
+		box.sides[2].token.Kind == eof || box.sides[3].token.Kind == eof {
+		return
+	}
+
+	// All tokens must have the same unit
+	for _, side := range box.sides[1:] {
+		if !side.unitSafety.isSafeWith(box.sides[0].unitSafety) {
+			return
+		}
+	}
+
+	// Generate the most minimal representation
+	tokens := compactTokenQuad(
+		box.sides[0].token,
+		box.sides[1].token,
+		box.sides[2].token,
+		box.sides[3].token,
+		minifyWhitespace,
+	)
+
+	// Remove all of the existing declarations
+	var minLoc logger.Loc
+	for i, side := range box.sides {
+		if loc := rules[side.ruleIndex].Loc; i == 0 || loc.Start < minLoc.Start {
+			minLoc = loc
+		}
+		rules[side.ruleIndex] = css_ast.Rule{}
+	}
+
+	// Insert the combined declaration where the last rule was
+	rules[box.sides[3].ruleIndex] = css_ast.Rule{Loc: minLoc, Data: &css_ast.RDeclaration{
+		Key:       box.key,
+		KeyText:   box.keyText,
+		Value:     tokens,
+		KeyRange:  keyRange,
+		Important: box.important,
+	}}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_box_shadow.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_box_shadow.go
new file mode 100644
index 0000000..5bc730f
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_box_shadow.go
@@ -0,0 +1,106 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+)
+
+func (p *parser) lowerAndMangleBoxShadow(tokens []css_ast.Token, wouldClipColor *bool) []css_ast.Token {
+	insetCount := 0
+	colorCount := 0
+	numbersBegin := 0
+	numbersCount := 0
+	numbersDone := false
+	foundUnexpectedToken := false
+
+	for i, t := range tokens {
+		if t.Kind == css_lexer.TNumber || t.Kind == css_lexer.TDimension {
+			if numbersDone {
+				// Track if we found a non-number in between two numbers
+				foundUnexpectedToken = true
+			}
+			if p.options.minifySyntax && t.TurnLengthIntoNumberIfZero() {
+				// "0px" => "0"
+				tokens[i] = t
+			}
+			if numbersCount == 0 {
+				// Track the index of the first number
+				numbersBegin = i
+			}
+			numbersCount++
+		} else {
+			if numbersCount != 0 {
+				// Track when we find a non-number after a number
+				numbersDone = true
+			}
+
+			if looksLikeColor(t) {
+				colorCount++
+				tokens[i] = p.lowerAndMinifyColor(t, wouldClipColor)
+			} else if t.Kind == css_lexer.TIdent && strings.EqualFold(t.Text, "inset") {
+				insetCount++
+			} else {
+				// Track if we found a token other than a number, a color, or "inset"
+				foundUnexpectedToken = true
+			}
+		}
+	}
+
+	// If everything looks like a valid rule, trim trailing zeros off the numbers.
+	// There are three valid configurations of numbers:
+	//
+	//   offset-x | offset-y
+	//   offset-x | offset-y | blur-radius
+	//   offset-x | offset-y | blur-radius | spread-radius
+	//
+	// If omitted, blur-radius and spread-radius are implied to be zero.
+	if p.options.minifySyntax && insetCount <= 1 && colorCount <= 1 && numbersCount > 2 && numbersCount <= 4 && !foundUnexpectedToken {
+		numbersEnd := numbersBegin + numbersCount
+		for numbersCount > 2 && tokens[numbersBegin+numbersCount-1].IsZero() {
+			numbersCount--
+		}
+		tokens = append(tokens[:numbersBegin+numbersCount], tokens[numbersEnd:]...)
+	}
+
+	// Set the whitespace flags
+	for i := range tokens {
+		var whitespace css_ast.WhitespaceFlags
+		if i > 0 || !p.options.minifyWhitespace {
+			whitespace |= css_ast.WhitespaceBefore
+		}
+		if i+1 < len(tokens) {
+			whitespace |= css_ast.WhitespaceAfter
+		}
+		tokens[i].Whitespace = whitespace
+	}
+	return tokens
+}
+
+func (p *parser) lowerAndMangleBoxShadows(tokens []css_ast.Token, wouldClipColor *bool) []css_ast.Token {
+	n := len(tokens)
+	end := 0
+	i := 0
+
+	for i < n {
+		// Find the comma or the end of the token list
+		comma := i
+		for comma < n && tokens[comma].Kind != css_lexer.TComma {
+			comma++
+		}
+
+		// Mangle this individual shadow
+		end += copy(tokens[end:], p.lowerAndMangleBoxShadow(tokens[i:comma], wouldClipColor))
+
+		// Skip over the comma
+		if comma < n {
+			tokens[end] = tokens[comma]
+			end++
+			comma++
+		}
+		i = comma
+	}
+
+	return tokens[:end]
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_color.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_color.go
new file mode 100644
index 0000000..c6b6691
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_color.go
@@ -0,0 +1,938 @@
+package css_parser
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/helpers"
+)
+
+// These names are shorter than their hex codes
+var shortColorName = map[uint32]string{
+	0x000080ff: "navy",
+	0x008000ff: "green",
+	0x008080ff: "teal",
+	0x4b0082ff: "indigo",
+	0x800000ff: "maroon",
+	0x800080ff: "purple",
+	0x808000ff: "olive",
+	0x808080ff: "gray",
+	0xa0522dff: "sienna",
+	0xa52a2aff: "brown",
+	0xc0c0c0ff: "silver",
+	0xcd853fff: "peru",
+	0xd2b48cff: "tan",
+	0xda70d6ff: "orchid",
+	0xdda0ddff: "plum",
+	0xee82eeff: "violet",
+	0xf0e68cff: "khaki",
+	0xf0ffffff: "azure",
+	0xf5deb3ff: "wheat",
+	0xf5f5dcff: "beige",
+	0xfa8072ff: "salmon",
+	0xfaf0e6ff: "linen",
+	0xff0000ff: "red",
+	0xff6347ff: "tomato",
+	0xff7f50ff: "coral",
+	0xffa500ff: "orange",
+	0xffc0cbff: "pink",
+	0xffd700ff: "gold",
+	0xffe4c4ff: "bisque",
+	0xfffafaff: "snow",
+	0xfffff0ff: "ivory",
+}
+
+var colorNameToHex = map[string]uint32{
+	"black":                0x000000ff,
+	"silver":               0xc0c0c0ff,
+	"gray":                 0x808080ff,
+	"white":                0xffffffff,
+	"maroon":               0x800000ff,
+	"red":                  0xff0000ff,
+	"purple":               0x800080ff,
+	"fuchsia":              0xff00ffff,
+	"green":                0x008000ff,
+	"lime":                 0x00ff00ff,
+	"olive":                0x808000ff,
+	"yellow":               0xffff00ff,
+	"navy":                 0x000080ff,
+	"blue":                 0x0000ffff,
+	"teal":                 0x008080ff,
+	"aqua":                 0x00ffffff,
+	"orange":               0xffa500ff,
+	"aliceblue":            0xf0f8ffff,
+	"antiquewhite":         0xfaebd7ff,
+	"aquamarine":           0x7fffd4ff,
+	"azure":                0xf0ffffff,
+	"beige":                0xf5f5dcff,
+	"bisque":               0xffe4c4ff,
+	"blanchedalmond":       0xffebcdff,
+	"blueviolet":           0x8a2be2ff,
+	"brown":                0xa52a2aff,
+	"burlywood":            0xdeb887ff,
+	"cadetblue":            0x5f9ea0ff,
+	"chartreuse":           0x7fff00ff,
+	"chocolate":            0xd2691eff,
+	"coral":                0xff7f50ff,
+	"cornflowerblue":       0x6495edff,
+	"cornsilk":             0xfff8dcff,
+	"crimson":              0xdc143cff,
+	"cyan":                 0x00ffffff,
+	"darkblue":             0x00008bff,
+	"darkcyan":             0x008b8bff,
+	"darkgoldenrod":        0xb8860bff,
+	"darkgray":             0xa9a9a9ff,
+	"darkgreen":            0x006400ff,
+	"darkgrey":             0xa9a9a9ff,
+	"darkkhaki":            0xbdb76bff,
+	"darkmagenta":          0x8b008bff,
+	"darkolivegreen":       0x556b2fff,
+	"darkorange":           0xff8c00ff,
+	"darkorchid":           0x9932ccff,
+	"darkred":              0x8b0000ff,
+	"darksalmon":           0xe9967aff,
+	"darkseagreen":         0x8fbc8fff,
+	"darkslateblue":        0x483d8bff,
+	"darkslategray":        0x2f4f4fff,
+	"darkslategrey":        0x2f4f4fff,
+	"darkturquoise":        0x00ced1ff,
+	"darkviolet":           0x9400d3ff,
+	"deeppink":             0xff1493ff,
+	"deepskyblue":          0x00bfffff,
+	"dimgray":              0x696969ff,
+	"dimgrey":              0x696969ff,
+	"dodgerblue":           0x1e90ffff,
+	"firebrick":            0xb22222ff,
+	"floralwhite":          0xfffaf0ff,
+	"forestgreen":          0x228b22ff,
+	"gainsboro":            0xdcdcdcff,
+	"ghostwhite":           0xf8f8ffff,
+	"gold":                 0xffd700ff,
+	"goldenrod":            0xdaa520ff,
+	"greenyellow":          0xadff2fff,
+	"grey":                 0x808080ff,
+	"honeydew":             0xf0fff0ff,
+	"hotpink":              0xff69b4ff,
+	"indianred":            0xcd5c5cff,
+	"indigo":               0x4b0082ff,
+	"ivory":                0xfffff0ff,
+	"khaki":                0xf0e68cff,
+	"lavender":             0xe6e6faff,
+	"lavenderblush":        0xfff0f5ff,
+	"lawngreen":            0x7cfc00ff,
+	"lemonchiffon":         0xfffacdff,
+	"lightblue":            0xadd8e6ff,
+	"lightcoral":           0xf08080ff,
+	"lightcyan":            0xe0ffffff,
+	"lightgoldenrodyellow": 0xfafad2ff,
+	"lightgray":            0xd3d3d3ff,
+	"lightgreen":           0x90ee90ff,
+	"lightgrey":            0xd3d3d3ff,
+	"lightpink":            0xffb6c1ff,
+	"lightsalmon":          0xffa07aff,
+	"lightseagreen":        0x20b2aaff,
+	"lightskyblue":         0x87cefaff,
+	"lightslategray":       0x778899ff,
+	"lightslategrey":       0x778899ff,
+	"lightsteelblue":       0xb0c4deff,
+	"lightyellow":          0xffffe0ff,
+	"limegreen":            0x32cd32ff,
+	"linen":                0xfaf0e6ff,
+	"magenta":              0xff00ffff,
+	"mediumaquamarine":     0x66cdaaff,
+	"mediumblue":           0x0000cdff,
+	"mediumorchid":         0xba55d3ff,
+	"mediumpurple":         0x9370dbff,
+	"mediumseagreen":       0x3cb371ff,
+	"mediumslateblue":      0x7b68eeff,
+	"mediumspringgreen":    0x00fa9aff,
+	"mediumturquoise":      0x48d1ccff,
+	"mediumvioletred":      0xc71585ff,
+	"midnightblue":         0x191970ff,
+	"mintcream":            0xf5fffaff,
+	"mistyrose":            0xffe4e1ff,
+	"moccasin":             0xffe4b5ff,
+	"navajowhite":          0xffdeadff,
+	"oldlace":              0xfdf5e6ff,
+	"olivedrab":            0x6b8e23ff,
+	"orangered":            0xff4500ff,
+	"orchid":               0xda70d6ff,
+	"palegoldenrod":        0xeee8aaff,
+	"palegreen":            0x98fb98ff,
+	"paleturquoise":        0xafeeeeff,
+	"palevioletred":        0xdb7093ff,
+	"papayawhip":           0xffefd5ff,
+	"peachpuff":            0xffdab9ff,
+	"peru":                 0xcd853fff,
+	"pink":                 0xffc0cbff,
+	"plum":                 0xdda0ddff,
+	"powderblue":           0xb0e0e6ff,
+	"rosybrown":            0xbc8f8fff,
+	"royalblue":            0x4169e1ff,
+	"saddlebrown":          0x8b4513ff,
+	"salmon":               0xfa8072ff,
+	"sandybrown":           0xf4a460ff,
+	"seagreen":             0x2e8b57ff,
+	"seashell":             0xfff5eeff,
+	"sienna":               0xa0522dff,
+	"skyblue":              0x87ceebff,
+	"slateblue":            0x6a5acdff,
+	"slategray":            0x708090ff,
+	"slategrey":            0x708090ff,
+	"snow":                 0xfffafaff,
+	"springgreen":          0x00ff7fff,
+	"steelblue":            0x4682b4ff,
+	"tan":                  0xd2b48cff,
+	"thistle":              0xd8bfd8ff,
+	"tomato":               0xff6347ff,
+	"turquoise":            0x40e0d0ff,
+	"violet":               0xee82eeff,
+	"wheat":                0xf5deb3ff,
+	"whitesmoke":           0xf5f5f5ff,
+	"yellowgreen":          0x9acd32ff,
+	"rebeccapurple":        0x663399ff,
+}
+
+func parseHex(text string) (uint32, bool) {
+	hex := uint32(0)
+	for _, c := range text {
+		hex <<= 4
+		switch {
+		case c >= '0' && c <= '9':
+			hex |= uint32(c) - '0'
+		case c >= 'a' && c <= 'f':
+			hex |= uint32(c) - ('a' - 10)
+		case c >= 'A' && c <= 'F':
+			hex |= uint32(c) - ('A' - 10)
+		default:
+			return 0, false
+		}
+	}
+	return hex, true
+}
+
+// 0xAABBCCDD => 0xABCD
+func compactHex(v uint32) uint32 {
+	return ((v & 0x0FF00000) >> 12) | ((v & 0x00000FF0) >> 4)
+}
+
+// 0xABCD => 0xAABBCCDD
+func expandHex(v uint32) uint32 {
+	return ((v & 0xF000) << 16) | ((v & 0xFF00) << 12) | ((v & 0x0FF0) << 8) | ((v & 0x00FF) << 4) | (v & 0x000F)
+}
+
+func hexR(v uint32) int { return int(v >> 24) }
+func hexG(v uint32) int { return int((v >> 16) & 255) }
+func hexB(v uint32) int { return int((v >> 8) & 255) }
+func hexA(v uint32) int { return int(v & 255) }
+
+func floatToStringForColor(a float64) string {
+	text := fmt.Sprintf("%.03f", a)
+	for text[len(text)-1] == '0' {
+		text = text[:len(text)-1]
+	}
+	if text[len(text)-1] == '.' {
+		text = text[:len(text)-1]
+	}
+	return text
+}
+
+func degreesForAngle(token css_ast.Token) (float64, bool) {
+	switch token.Kind {
+	case css_lexer.TNumber:
+		if value, err := strconv.ParseFloat(token.Text, 64); err == nil {
+			return value, true
+		}
+
+	case css_lexer.TDimension:
+		if value, err := strconv.ParseFloat(token.DimensionValue(), 64); err == nil {
+			switch token.DimensionUnit() {
+			case "deg":
+				return value, true
+			case "grad":
+				return value * (360.0 / 400.0), true
+			case "rad":
+				return value * (180.0 / math.Pi), true
+			case "turn":
+				return value * 360.0, true
+			}
+		}
+	}
+	return 0, false
+}
+
+func lowerAlphaPercentageToNumber(token css_ast.Token) css_ast.Token {
+	if token.Kind == css_lexer.TPercentage {
+		if value, err := strconv.ParseFloat(token.Text[:len(token.Text)-1], 64); err == nil {
+			token.Kind = css_lexer.TNumber
+			token.Text = floatToStringForColor(value / 100.0)
+		}
+	}
+	return token
+}
+
+// Convert newer color syntax to older color syntax for older browsers
+func (p *parser) lowerAndMinifyColor(token css_ast.Token, wouldClipColor *bool) css_ast.Token {
+	text := token.Text
+
+	switch token.Kind {
+	case css_lexer.THash:
+		if p.options.unsupportedCSSFeatures.Has(compat.HexRGBA) {
+			switch len(text) {
+			case 4:
+				// "#1234" => "rgba(1, 2, 3, 0.004)"
+				if hex, ok := parseHex(text); ok {
+					hex = expandHex(hex)
+					return p.tryToGenerateColor(token, parsedColor{hex: hex}, nil)
+				}
+
+			case 8:
+				// "#12345678" => "rgba(18, 52, 86, 0.47)"
+				if hex, ok := parseHex(text); ok {
+					return p.tryToGenerateColor(token, parsedColor{hex: hex}, nil)
+				}
+			}
+		}
+
+	case css_lexer.TIdent:
+		if p.options.unsupportedCSSFeatures.Has(compat.RebeccaPurple) && strings.EqualFold(text, "rebeccapurple") {
+			token.Kind = css_lexer.THash
+			token.Text = "663399"
+		}
+
+	case css_lexer.TFunction:
+		switch strings.ToLower(text) {
+		case "rgb", "rgba", "hsl", "hsla":
+			if p.options.unsupportedCSSFeatures.Has(compat.Modern_RGB_HSL) {
+				args := *token.Children
+				removeAlpha := false
+				addAlpha := false
+
+				// "hsl(1deg, 2%, 3%)" => "hsl(1, 2%, 3%)"
+				if (text == "hsl" || text == "hsla") && len(args) > 0 {
+					if degrees, ok := degreesForAngle(args[0]); ok {
+						args[0].Kind = css_lexer.TNumber
+						args[0].Text = floatToStringForColor(degrees)
+					}
+				}
+
+				// These check for "IsNumeric" to reject "var()" since a single "var()"
+				// can substitute for multiple tokens and that messes up pattern matching
+				switch len(args) {
+				case 3:
+					// "rgba(1 2 3)" => "rgb(1, 2, 3)"
+					// "hsla(1 2% 3%)" => "hsl(1, 2%, 3%)"
+					if args[0].Kind.IsNumeric() && args[1].Kind.IsNumeric() && args[2].Kind.IsNumeric() {
+						removeAlpha = true
+						args[0].Whitespace = 0
+						args[1].Whitespace = 0
+						commaToken := p.commaToken(token.Loc)
+						token.Children = &[]css_ast.Token{
+							args[0], commaToken,
+							args[1], commaToken,
+							args[2],
+						}
+					}
+
+				case 5:
+					// "rgba(1, 2, 3)" => "rgb(1, 2, 3)"
+					// "hsla(1, 2%, 3%)" => "hsl(1%, 2%, 3%)"
+					if args[0].Kind.IsNumeric() && args[1].Kind == css_lexer.TComma &&
+						args[2].Kind.IsNumeric() && args[3].Kind == css_lexer.TComma &&
+						args[4].Kind.IsNumeric() {
+						removeAlpha = true
+						break
+					}
+
+					// "rgb(1 2 3 / 4%)" => "rgba(1, 2, 3, 0.04)"
+					// "hsl(1 2% 3% / 4%)" => "hsla(1, 2%, 3%, 0.04)"
+					if args[0].Kind.IsNumeric() && args[1].Kind.IsNumeric() && args[2].Kind.IsNumeric() &&
+						args[3].Kind == css_lexer.TDelimSlash && args[4].Kind.IsNumeric() {
+						addAlpha = true
+						args[0].Whitespace = 0
+						args[1].Whitespace = 0
+						args[2].Whitespace = 0
+						commaToken := p.commaToken(token.Loc)
+						token.Children = &[]css_ast.Token{
+							args[0], commaToken,
+							args[1], commaToken,
+							args[2], commaToken,
+							lowerAlphaPercentageToNumber(args[4]),
+						}
+					}
+
+				case 7:
+					// "rgb(1%, 2%, 3%, 4%)" => "rgba(1%, 2%, 3%, 0.04)"
+					// "hsl(1, 2%, 3%, 4%)" => "hsla(1, 2%, 3%, 0.04)"
+					if args[0].Kind.IsNumeric() && args[1].Kind == css_lexer.TComma &&
+						args[2].Kind.IsNumeric() && args[3].Kind == css_lexer.TComma &&
+						args[4].Kind.IsNumeric() && args[5].Kind == css_lexer.TComma &&
+						args[6].Kind.IsNumeric() {
+						addAlpha = true
+						args[6] = lowerAlphaPercentageToNumber(args[6])
+					}
+				}
+
+				if removeAlpha {
+					if strings.EqualFold(text, "rgba") {
+						token.Text = "rgb"
+					} else if strings.EqualFold(text, "hsla") {
+						token.Text = "hsl"
+					}
+				} else if addAlpha {
+					if strings.EqualFold(text, "rgb") {
+						token.Text = "rgba"
+					} else if strings.EqualFold(text, "hsl") {
+						token.Text = "hsla"
+					}
+				}
+			}
+
+		case "hwb":
+			if p.options.unsupportedCSSFeatures.Has(compat.HWB) {
+				if color, ok := parseColor(token); ok {
+					return p.tryToGenerateColor(token, color, wouldClipColor)
+				}
+			}
+
+		case "color", "lab", "lch", "oklab", "oklch":
+			if p.options.unsupportedCSSFeatures.Has(compat.ColorFunctions) {
+				if color, ok := parseColor(token); ok {
+					return p.tryToGenerateColor(token, color, wouldClipColor)
+				}
+			}
+		}
+	}
+
+	// When minifying, try to parse the color and print it back out. This minifies
+	// the color because we always print it out using the shortest encoding.
+	if p.options.minifySyntax {
+		if hex, ok := parseColor(token); ok {
+			token = p.tryToGenerateColor(token, hex, wouldClipColor)
+		}
+	}
+
+	return token
+}
+
+type parsedColor struct {
+	x, y, z       F64    // color if hasColorSpace == true
+	hex           uint32 // color and alpha if hasColorSpace == false, alpha if hasColorSpace == true
+	hasColorSpace bool
+}
+
+func looksLikeColor(token css_ast.Token) bool {
+	switch token.Kind {
+	case css_lexer.TIdent:
+		if _, ok := colorNameToHex[strings.ToLower(token.Text)]; ok {
+			return true
+		}
+
+	case css_lexer.THash:
+		switch len(token.Text) {
+		case 3, 4, 6, 8:
+			if _, ok := parseHex(token.Text); ok {
+				return true
+			}
+		}
+
+	case css_lexer.TFunction:
+		switch strings.ToLower(token.Text) {
+		case
+			"color-mix",
+			"color",
+			"hsl",
+			"hsla",
+			"hwb",
+			"lab",
+			"lch",
+			"oklab",
+			"oklch",
+			"rgb",
+			"rgba":
+			return true
+		}
+	}
+
+	return false
+}
+
+func parseColor(token css_ast.Token) (parsedColor, bool) {
+	text := token.Text
+
+	switch token.Kind {
+	case css_lexer.TIdent:
+		if hex, ok := colorNameToHex[strings.ToLower(text)]; ok {
+			return parsedColor{hex: hex}, true
+		}
+
+	case css_lexer.THash:
+		switch len(text) {
+		case 3:
+			// "#123"
+			if hex, ok := parseHex(text); ok {
+				return parsedColor{hex: (expandHex(hex) << 8) | 0xFF}, true
+			}
+
+		case 4:
+			// "#1234"
+			if hex, ok := parseHex(text); ok {
+				return parsedColor{hex: expandHex(hex)}, true
+			}
+
+		case 6:
+			// "#112233"
+			if hex, ok := parseHex(text); ok {
+				return parsedColor{hex: (hex << 8) | 0xFF}, true
+			}
+
+		case 8:
+			// "#11223344"
+			if hex, ok := parseHex(text); ok {
+				return parsedColor{hex: hex}, true
+			}
+		}
+
+	case css_lexer.TFunction:
+		lowerText := strings.ToLower(text)
+		switch lowerText {
+		case "rgb", "rgba":
+			args := *token.Children
+			var r, g, b, a css_ast.Token
+
+			switch len(args) {
+			case 3:
+				// "rgb(1 2 3)"
+				r, g, b = args[0], args[1], args[2]
+
+			case 5:
+				// "rgba(1, 2, 3)"
+				if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma {
+					r, g, b = args[0], args[2], args[4]
+					break
+				}
+
+				// "rgb(1 2 3 / 4%)"
+				if args[3].Kind == css_lexer.TDelimSlash {
+					r, g, b, a = args[0], args[1], args[2], args[4]
+				}
+
+			case 7:
+				// "rgb(1%, 2%, 3%, 4%)"
+				if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma && args[5].Kind == css_lexer.TComma {
+					r, g, b, a = args[0], args[2], args[4], args[6]
+				}
+			}
+
+			if r, ok := parseColorByte(r, 1); ok {
+				if g, ok := parseColorByte(g, 1); ok {
+					if b, ok := parseColorByte(b, 1); ok {
+						if a, ok := parseAlphaByte(a); ok {
+							return parsedColor{hex: (r << 24) | (g << 16) | (b << 8) | a}, true
+						}
+					}
+				}
+			}
+
+		case "hsl", "hsla":
+			args := *token.Children
+			var h, s, l, a css_ast.Token
+
+			switch len(args) {
+			case 3:
+				// "hsl(1 2 3)"
+				h, s, l = args[0], args[1], args[2]
+
+			case 5:
+				// "hsla(1, 2, 3)"
+				if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma {
+					h, s, l = args[0], args[2], args[4]
+					break
+				}
+
+				// "hsl(1 2 3 / 4%)"
+				if args[3].Kind == css_lexer.TDelimSlash {
+					h, s, l, a = args[0], args[1], args[2], args[4]
+				}
+
+			case 7:
+				// "hsl(1%, 2%, 3%, 4%)"
+				if args[1].Kind == css_lexer.TComma && args[3].Kind == css_lexer.TComma && args[5].Kind == css_lexer.TComma {
+					h, s, l, a = args[0], args[2], args[4], args[6]
+				}
+			}
+
+			// HSL => RGB
+			if h, ok := degreesForAngle(h); ok {
+				if s, ok := s.ClampedFractionForPercentage(); ok {
+					if l, ok := l.ClampedFractionForPercentage(); ok {
+						if a, ok := parseAlphaByte(a); ok {
+							r, g, b := hslToRgb(helpers.NewF64(h), helpers.NewF64(s), helpers.NewF64(l))
+							return parsedColor{hex: packRGBA(r, g, b, a)}, true
+						}
+					}
+				}
+			}
+
+		case "hwb":
+			args := *token.Children
+			var h, s, l, a css_ast.Token
+
+			switch len(args) {
+			case 3:
+				// "hwb(1 2 3)"
+				h, s, l = args[0], args[1], args[2]
+
+			case 5:
+				// "hwb(1 2 3 / 4%)"
+				if args[3].Kind == css_lexer.TDelimSlash {
+					h, s, l, a = args[0], args[1], args[2], args[4]
+				}
+			}
+
+			// HWB => RGB
+			if h, ok := degreesForAngle(h); ok {
+				if white, ok := s.ClampedFractionForPercentage(); ok {
+					if black, ok := l.ClampedFractionForPercentage(); ok {
+						if a, ok := parseAlphaByte(a); ok {
+							r, g, b := hwbToRgb(helpers.NewF64(h), helpers.NewF64(white), helpers.NewF64(black))
+							return parsedColor{hex: packRGBA(r, g, b, a)}, true
+						}
+					}
+				}
+			}
+
+		case "color":
+			args := *token.Children
+			var colorSpace, alpha css_ast.Token
+
+			switch len(args) {
+			case 4:
+				// "color(xyz 1 2 3)"
+				colorSpace = args[0]
+
+			case 6:
+				// "color(xyz 1 2 3 / 50%)"
+				if args[4].Kind == css_lexer.TDelimSlash {
+					colorSpace, alpha = args[0], args[5]
+				}
+			}
+
+			if colorSpace.Kind == css_lexer.TIdent {
+				if v0, ok := args[1].NumberOrFractionForPercentage(1, 0); ok {
+					if v1, ok := args[2].NumberOrFractionForPercentage(1, 0); ok {
+						if v2, ok := args[3].NumberOrFractionForPercentage(1, 0); ok {
+							if a, ok := parseAlphaByte(alpha); ok {
+								v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
+								switch strings.ToLower(colorSpace.Text) {
+								case "a98-rgb":
+									r, g, b := lin_a98rgb(v0, v1, v2)
+									x, y, z := lin_a98rgb_to_xyz(r, g, b)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
+
+								case "display-p3":
+									r, g, b := lin_p3(v0, v1, v2)
+									x, y, z := lin_p3_to_xyz(r, g, b)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
+
+								case "prophoto-rgb":
+									r, g, b := lin_prophoto(v0, v1, v2)
+									x, y, z := lin_prophoto_to_xyz(r, g, b)
+									x, y, z = d50_to_d65(x, y, z)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
+
+								case "rec2020":
+									r, g, b := lin_2020(v0, v1, v2)
+									x, y, z := lin_2020_to_xyz(r, g, b)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
+
+								case "srgb":
+									r, g, b := lin_srgb(v0, v1, v2)
+									x, y, z := lin_srgb_to_xyz(r, g, b)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
+
+								case "srgb-linear":
+									x, y, z := lin_srgb_to_xyz(v0, v1, v2)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
+
+								case "xyz", "xyz-d65":
+									return parsedColor{hasColorSpace: true, x: v0, y: v1, z: v2, hex: a}, true
+
+								case "xyz-d50":
+									x, y, z := d50_to_d65(v0, v1, v2)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: a}, true
+								}
+							}
+						}
+					}
+				}
+			}
+
+		case "lab", "lch", "oklab", "oklch":
+			args := *token.Children
+			var v0, v1, v2, alpha css_ast.Token
+
+			switch len(args) {
+			case 3:
+				// "lab(1 2 3)"
+				v0, v1, v2 = args[0], args[1], args[2]
+
+			case 5:
+				// "lab(1 2 3 / 50%)"
+				if args[3].Kind == css_lexer.TDelimSlash {
+					v0, v1, v2, alpha = args[0], args[1], args[2], args[4]
+				}
+			}
+
+			if v0.Kind != css_lexer.T(0) {
+				if alpha, ok := parseAlphaByte(alpha); ok {
+					switch lowerText {
+					case "lab":
+						if v0, ok := v0.NumberOrFractionForPercentage(100, 0); ok {
+							if v1, ok := v1.NumberOrFractionForPercentage(125, css_ast.AllowAnyPercentage); ok {
+								if v2, ok := v2.NumberOrFractionForPercentage(125, css_ast.AllowAnyPercentage); ok {
+									v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
+									x, y, z := lab_to_xyz(v0, v1, v2)
+									x, y, z = d50_to_d65(x, y, z)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: alpha}, true
+								}
+							}
+						}
+
+					case "lch":
+						if v0, ok := v0.NumberOrFractionForPercentage(100, 0); ok {
+							if v1, ok := v1.NumberOrFractionForPercentage(125, css_ast.AllowPercentageAbove100); ok {
+								if v2, ok := degreesForAngle(v2); ok {
+									v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
+									l, a, b := lch_to_lab(v0, v1, v2)
+									x, y, z := lab_to_xyz(l, a, b)
+									x, y, z = d50_to_d65(x, y, z)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: alpha}, true
+								}
+							}
+						}
+
+					case "oklab":
+						if v0, ok := v0.NumberOrFractionForPercentage(1, 0); ok {
+							if v1, ok := v1.NumberOrFractionForPercentage(0.4, css_ast.AllowAnyPercentage); ok {
+								if v2, ok := v2.NumberOrFractionForPercentage(0.4, css_ast.AllowAnyPercentage); ok {
+									v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
+									x, y, z := oklab_to_xyz(v0, v1, v2)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: alpha}, true
+								}
+							}
+						}
+
+					case "oklch":
+						if v0, ok := v0.NumberOrFractionForPercentage(1, 0); ok {
+							if v1, ok := v1.NumberOrFractionForPercentage(0.4, css_ast.AllowPercentageAbove100); ok {
+								if v2, ok := degreesForAngle(v2); ok {
+									v0, v1, v2 := helpers.NewF64(v0), helpers.NewF64(v1), helpers.NewF64(v2)
+									l, a, b := oklch_to_oklab(v0, v1, v2)
+									x, y, z := oklab_to_xyz(l, a, b)
+									return parsedColor{hasColorSpace: true, x: x, y: y, z: z, hex: alpha}, true
+								}
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return parsedColor{}, false
+}
+
+// Reference: https://drafts.csswg.org/css-color/#hwb-to-rgb
+func hwbToRgb(hue F64, white F64, black F64) (r F64, g F64, b F64) {
+	if white.Add(black).Value() >= 1 {
+		gray := white.Div(white.Add(black))
+		return gray, gray, gray
+	}
+	delta := white.Add(black).Neg().AddConst(1)
+	r, g, b = hslToRgb(hue, helpers.NewF64(1), helpers.NewF64(0.5))
+	r = delta.Mul(r).Add(white)
+	g = delta.Mul(g).Add(white)
+	b = delta.Mul(b).Add(white)
+	return
+}
+
+// Reference https://drafts.csswg.org/css-color/#hsl-to-rgb
+func hslToRgb(hue F64, sat F64, light F64) (r F64, g F64, b F64) {
+	hue = hue.DivConst(360.0)
+	var t2 F64
+	if light.Value() <= 0.5 {
+		t2 = sat.AddConst(1).Mul(light)
+	} else {
+		t2 = light.Add(sat).Sub(light.Mul(sat))
+	}
+	t1 := light.MulConst(2).Sub(t2)
+	r = hueToRgb(t1, t2, hue.AddConst(1.0/3.0))
+	g = hueToRgb(t1, t2, hue)
+	b = hueToRgb(t1, t2, hue.SubConst(1.0/3.0))
+	return
+}
+
+func hueToRgb(t1 F64, t2 F64, hue F64) F64 {
+	hue = hue.Sub(hue.Floor())
+	hue = hue.MulConst(6)
+	var f F64
+	if hue.Value() < 1 {
+		f = helpers.Lerp(t1, t2, hue)
+	} else if hue.Value() < 3 {
+		f = t2
+	} else if hue.Value() < 4 {
+		f = helpers.Lerp(t1, t2, hue.Neg().AddConst(4))
+	} else {
+		f = t1
+	}
+	return f
+}
+
+func packRGBA(rf F64, gf F64, bf F64, a uint32) uint32 {
+	r := floatToByte(rf.Value())
+	g := floatToByte(gf.Value())
+	b := floatToByte(bf.Value())
+	return (r << 24) | (g << 16) | (b << 8) | a
+}
+
+func floatToByte(f float64) uint32 {
+	i := int(math.Round(f * 255))
+	if i < 0 {
+		i = 0
+	} else if i > 255 {
+		i = 255
+	}
+	return uint32(i)
+}
+
+func parseAlphaByte(token css_ast.Token) (uint32, bool) {
+	if token.Kind == css_lexer.T(0) {
+		return 255, true
+	}
+	return parseColorByte(token, 255)
+}
+
+func parseColorByte(token css_ast.Token, scale float64) (uint32, bool) {
+	var i int
+	var ok bool
+
+	switch token.Kind {
+	case css_lexer.TNumber:
+		if f, err := strconv.ParseFloat(token.Text, 64); err == nil {
+			i = int(math.Round(f * scale))
+			ok = true
+		}
+
+	case css_lexer.TPercentage:
+		if f, err := strconv.ParseFloat(token.PercentageValue(), 64); err == nil {
+			i = int(math.Round(f * (255.0 / 100.0)))
+			ok = true
+		}
+	}
+
+	if i < 0 {
+		i = 0
+	} else if i > 255 {
+		i = 255
+	}
+	return uint32(i), ok
+}
+
+func tryToConvertToHexWithoutClipping(x F64, y F64, z F64, a uint32) (uint32, bool) {
+	r, g, b := gam_srgb(xyz_to_lin_srgb(x, y, z))
+	if r.Value() < -0.5/255 || r.Value() > 255.5/255 ||
+		g.Value() < -0.5/255 || g.Value() > 255.5/255 ||
+		b.Value() < -0.5/255 || b.Value() > 255.5/255 {
+		return 0, false
+	}
+	return packRGBA(r, g, b, a), true
+}
+
+func (p *parser) tryToGenerateColor(token css_ast.Token, color parsedColor, wouldClipColor *bool) css_ast.Token {
+	// Note: Do NOT remove color information from fully transparent colors.
+	// Safari behaves differently than other browsers for color interpolation:
+	// https://css-tricks.com/thing-know-gradients-transparent-black/
+
+	// Attempt to convert other color spaces to sRGB, and only continue if the
+	// result (rounded to the nearest byte) will be in the 0-to-1 sRGB range
+	var hex uint32
+	if !color.hasColorSpace {
+		hex = color.hex
+	} else if result, ok := tryToConvertToHexWithoutClipping(color.x, color.y, color.z, color.hex); ok {
+		hex = result
+	} else if wouldClipColor != nil {
+		*wouldClipColor = true
+		return token
+	} else {
+		r, g, b := gamut_mapping_xyz_to_srgb(color.x, color.y, color.z)
+		hex = packRGBA(r, g, b, color.hex)
+	}
+
+	if hexA(hex) == 255 {
+		token.Children = nil
+		if name, ok := shortColorName[hex]; ok && p.options.minifySyntax {
+			token.Kind = css_lexer.TIdent
+			token.Text = name
+		} else {
+			token.Kind = css_lexer.THash
+			hex >>= 8
+			compact := compactHex(hex)
+			if p.options.minifySyntax && hex == expandHex(compact) {
+				token.Text = fmt.Sprintf("%03x", compact)
+			} else {
+				token.Text = fmt.Sprintf("%06x", hex)
+			}
+		}
+	} else if !p.options.unsupportedCSSFeatures.Has(compat.HexRGBA) {
+		token.Children = nil
+		token.Kind = css_lexer.THash
+		compact := compactHex(hex)
+		if p.options.minifySyntax && hex == expandHex(compact) {
+			token.Text = fmt.Sprintf("%04x", compact)
+		} else {
+			token.Text = fmt.Sprintf("%08x", hex)
+		}
+	} else {
+		token.Kind = css_lexer.TFunction
+		token.Text = "rgba"
+		commaToken := p.commaToken(token.Loc)
+		index := hexA(hex) * 4
+		alpha := alphaFractionTable[index : index+4]
+		if space := strings.IndexByte(alpha, ' '); space != -1 {
+			alpha = alpha[:space]
+		}
+		token.Children = &[]css_ast.Token{
+			{Loc: token.Loc, Kind: css_lexer.TNumber, Text: strconv.Itoa(hexR(hex))}, commaToken,
+			{Loc: token.Loc, Kind: css_lexer.TNumber, Text: strconv.Itoa(hexG(hex))}, commaToken,
+			{Loc: token.Loc, Kind: css_lexer.TNumber, Text: strconv.Itoa(hexB(hex))}, commaToken,
+			{Loc: token.Loc, Kind: css_lexer.TNumber, Text: alpha},
+		}
+	}
+
+	return token
+}
+
+// Every four characters in this table is the fraction for that index
+const alphaFractionTable string = "" +
+	"0   .004.008.01 .016.02 .024.027.03 .035.04 .043.047.05 .055.06 " +
+	".063.067.07 .075.08 .082.086.09 .094.098.1  .106.11 .114.118.12 " +
+	".125.13 .133.137.14 .145.15 .153.157.16 .165.17 .173.176.18 .184" +
+	".19 .192.196.2  .204.208.21 .216.22 .224.227.23 .235.24 .243.247" +
+	".25 .255.26 .263.267.27 .275.28 .282.286.29 .294.298.3  .306.31 " +
+	".314.318.32 .325.33 .333.337.34 .345.35 .353.357.36 .365.37 .373" +
+	".376.38 .384.39 .392.396.4  .404.408.41 .416.42 .424.427.43 .435" +
+	".44 .443.447.45 .455.46 .463.467.47 .475.48 .482.486.49 .494.498" +
+	".5  .506.51 .514.518.52 .525.53 .533.537.54 .545.55 .553.557.56 " +
+	".565.57 .573.576.58 .584.59 .592.596.6  .604.608.61 .616.62 .624" +
+	".627.63 .635.64 .643.647.65 .655.66 .663.667.67 .675.68 .682.686" +
+	".69 .694.698.7  .706.71 .714.718.72 .725.73 .733.737.74 .745.75 " +
+	".753.757.76 .765.77 .773.776.78 .784.79 .792.796.8  .804.808.81 " +
+	".816.82 .824.827.83 .835.84 .843.847.85 .855.86 .863.867.87 .875" +
+	".88 .882.886.89 .894.898.9  .906.91 .914.918.92 .925.93 .933.937" +
+	".94 .945.95 .953.957.96 .965.97 .973.976.98 .984.99 .992.9961   "
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_composes.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_composes.go
new file mode 100644
index 0000000..2b8aa0a
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_composes.go
@@ -0,0 +1,103 @@
+package css_parser
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type composesContext struct {
+	parentRefs   []ast.Ref
+	parentRange  logger.Range
+	problemRange logger.Range
+}
+
+func (p *parser) handleComposesPragma(context composesContext, tokens []css_ast.Token) {
+	type nameWithLoc struct {
+		loc  logger.Loc
+		text string
+	}
+	var names []nameWithLoc
+	fromGlobal := false
+
+	for i, t := range tokens {
+		if t.Kind == css_lexer.TIdent {
+			// Check for a "from" clause at the end
+			if strings.EqualFold(t.Text, "from") && i+2 == len(tokens) {
+				last := tokens[i+1]
+
+				// A string or a URL is an external file
+				if last.Kind == css_lexer.TString || last.Kind == css_lexer.TURL {
+					var importRecordIndex uint32
+					if last.Kind == css_lexer.TString {
+						importRecordIndex = uint32(len(p.importRecords))
+						p.importRecords = append(p.importRecords, ast.ImportRecord{
+							Kind:  ast.ImportComposesFrom,
+							Path:  logger.Path{Text: last.Text},
+							Range: p.source.RangeOfString(last.Loc),
+						})
+					} else {
+						importRecordIndex = last.PayloadIndex
+						p.importRecords[importRecordIndex].Kind = ast.ImportComposesFrom
+					}
+					for _, parentRef := range context.parentRefs {
+						composes := p.composes[parentRef]
+						for _, name := range names {
+							composes.ImportedNames = append(composes.ImportedNames, css_ast.ImportedComposesName{
+								ImportRecordIndex: importRecordIndex,
+								Alias:             name.text,
+								AliasLoc:          name.loc,
+							})
+						}
+					}
+					return
+				}
+
+				// An identifier must be "global"
+				if last.Kind == css_lexer.TIdent {
+					if strings.EqualFold(last.Text, "global") {
+						fromGlobal = true
+						break
+					}
+
+					p.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, css_lexer.RangeOfIdentifier(p.source, last.Loc),
+						fmt.Sprintf("\"composes\" declaration uses invalid location %q", last.Text))
+					p.prevError = t.Loc
+					return
+				}
+			}
+
+			names = append(names, nameWithLoc{t.Loc, t.Text})
+			continue
+		}
+
+		// Any unexpected tokens are a syntax error
+		var text string
+		switch t.Kind {
+		case css_lexer.TURL, css_lexer.TBadURL, css_lexer.TString, css_lexer.TUnterminatedString:
+			text = fmt.Sprintf("Unexpected %s", t.Kind.String())
+		default:
+			text = fmt.Sprintf("Unexpected %q", t.Text)
+		}
+		p.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, logger.Range{Loc: t.Loc}, text)
+		p.prevError = t.Loc
+		return
+	}
+
+	// If we get here, all of these names are not references to another file
+	old := p.makeLocalSymbols
+	if fromGlobal {
+		p.makeLocalSymbols = false
+	}
+	for _, parentRef := range context.parentRefs {
+		composes := p.composes[parentRef]
+		for _, name := range names {
+			composes.Names = append(composes.Names, p.symbolForName(name.loc, name.text))
+		}
+	}
+	p.makeLocalSymbols = old
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_container.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_container.go
new file mode 100644
index 0000000..6c7ca3a
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_container.go
@@ -0,0 +1,53 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+)
+
+// Scan for container names in the "container" shorthand property
+func (p *parser) processContainerShorthand(tokens []css_ast.Token) {
+	// Validate the syntax
+	for i, t := range tokens {
+		if t.Kind == css_lexer.TIdent {
+			continue
+		}
+		if t.Kind == css_lexer.TDelimSlash && i+2 == len(tokens) && tokens[i+1].Kind == css_lexer.TIdent {
+			break
+		}
+		return
+	}
+
+	// Convert any local names
+	for i, t := range tokens {
+		if t.Kind != css_lexer.TIdent {
+			break
+		}
+		p.handleSingleContainerName(&tokens[i])
+	}
+}
+
+func (p *parser) processContainerName(tokens []css_ast.Token) {
+	// Validate the syntax
+	for _, t := range tokens {
+		if t.Kind != css_lexer.TIdent {
+			return
+		}
+	}
+
+	// Convert any local names
+	for i := range tokens {
+		p.handleSingleContainerName(&tokens[i])
+	}
+}
+
+func (p *parser) handleSingleContainerName(token *css_ast.Token) {
+	if lower := strings.ToLower(token.Text); lower == "none" || cssWideAndReservedKeywords[lower] {
+		return
+	}
+
+	token.Kind = css_lexer.TSymbol
+	token.PayloadIndex = p.symbolForName(token.Loc, token.Text).Ref.InnerIndex
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font.go
new file mode 100644
index 0000000..c1644b4
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font.go
@@ -0,0 +1,138 @@
+package css_parser
+
+import (
+	"strconv"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+)
+
+// Specification: https://drafts.csswg.org/css-fonts/#font-prop
+// [ <font-style> || <font-variant-css2> || <font-weight> || <font-stretch-css3> ]? <font-size> [ / <line-height> ]? <font-family>
+func (p *parser) mangleFont(tokens []css_ast.Token) []css_ast.Token {
+	var result []css_ast.Token
+
+	// Scan up to the font size
+	pos := 0
+	for ; pos < len(tokens); pos++ {
+		token := tokens[pos]
+		if isFontSize(token) {
+			break
+		}
+
+		switch token.Kind {
+		case css_lexer.TIdent:
+			switch strings.ToLower(token.Text) {
+			case "normal":
+				// "All subproperties of the font property are first reset to their initial values"
+				// This implies that "normal" doesn't do anything. Also all of the optional values
+				// contain "normal" as an option and they are unordered so it's impossible to say
+				// what property "normal" corresponds to. Just drop these tokens to save space.
+				continue
+
+			// <font-style>
+			case "italic":
+			case "oblique":
+				if pos+1 < len(tokens) && tokens[pos+1].IsAngle() {
+					result = append(result, token, tokens[pos+1])
+					pos++
+					continue
+				}
+
+			// <font-variant-css2>
+			case "small-caps":
+
+			// <font-weight>
+			case "bold", "bolder", "lighter":
+				result = append(result, p.mangleFontWeight(token))
+				continue
+
+			// <font-stretch-css3>
+			case "ultra-condensed", "extra-condensed", "condensed", "semi-condensed",
+				"semi-expanded", "expanded", "extra-expanded", "ultra-expanded":
+
+			default:
+				// All other tokens are unrecognized, so we bail if we hit one
+				return tokens
+			}
+			result = append(result, token)
+
+		case css_lexer.TNumber:
+			// "Only values greater than or equal to 1, and less than or equal to
+			// 1000, are valid, and all other values are invalid."
+			if value, err := strconv.ParseFloat(token.Text, 64); err != nil || value < 1 || value > 1000 {
+				return tokens
+			}
+			result = append(result, token)
+
+		default:
+			// All other tokens are unrecognized, so we bail if we hit one
+			return tokens
+		}
+	}
+
+	// <font-size>
+	if pos == len(tokens) {
+		return tokens
+	}
+	result = append(result, tokens[pos])
+	pos++
+
+	// / <line-height>
+	if pos < len(tokens) && tokens[pos].Kind == css_lexer.TDelimSlash {
+		if pos+1 == len(tokens) {
+			return tokens
+		}
+		result = append(result, tokens[pos], tokens[pos+1])
+		pos += 2
+
+		// Remove the whitespace around the "/" character
+		if p.options.minifyWhitespace {
+			result[len(result)-3].Whitespace &= ^css_ast.WhitespaceAfter
+			result[len(result)-2].Whitespace = 0
+			result[len(result)-1].Whitespace &= ^css_ast.WhitespaceBefore
+		}
+	}
+
+	// <font-family>
+	if family, ok := p.mangleFontFamily(tokens[pos:]); ok {
+		if len(result) > 0 && len(family) > 0 && family[0].Kind != css_lexer.TString {
+			family[0].Whitespace |= css_ast.WhitespaceBefore
+		}
+		return append(result, family...)
+	}
+	return tokens
+}
+
+var fontSizeKeywords = map[string]bool{
+	// <absolute-size>: https://drafts.csswg.org/css-fonts/#valdef-font-size-absolute-size
+	"xx-small":  true,
+	"x-small":   true,
+	"small":     true,
+	"medium":    true,
+	"large":     true,
+	"x-large":   true,
+	"xx-large":  true,
+	"xxx-large": true,
+
+	// <relative-size>: https://drafts.csswg.org/css-fonts/#valdef-font-size-relative-size
+	"larger":  true,
+	"smaller": true,
+}
+
+// Specification: https://drafts.csswg.org/css-fonts/#font-size-prop
+func isFontSize(token css_ast.Token) bool {
+	// <length-percentage>
+	if token.Kind == css_lexer.TDimension || token.Kind == css_lexer.TPercentage {
+		return true
+	}
+
+	// <absolute-size> or <relative-size>
+	if token.Kind == css_lexer.TIdent {
+		_, ok := fontSizeKeywords[strings.ToLower(token.Text)]
+		return ok
+	}
+
+	return false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font_family.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font_family.go
new file mode 100644
index 0000000..8c40382
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font_family.go
@@ -0,0 +1,162 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+)
+
+// These keywords usually require special handling when parsing.
+
+// Declaring a property to have these values explicitly specifies a particular
+// defaulting behavior instead of setting the property to that identifier value.
+// As specified in CSS Values and Units Level 3, all CSS properties can accept
+// these values.
+//
+// For example, "font-family: 'inherit'" sets the font family to the font named
+// "inherit" while "font-family: inherit" sets the font family to the inherited
+// value.
+//
+// Note that other CSS specifications can define additional CSS-wide keywords,
+// which we should copy here whenever new ones are created so we can quote those
+// identifiers to avoid collisions with any newly-created CSS-wide keywords.
+var cssWideAndReservedKeywords = map[string]bool{
+	// CSS Values and Units Level 3: https://drafts.csswg.org/css-values-3/#common-keywords
+	"initial": true, // CSS-wide keyword
+	"inherit": true, // CSS-wide keyword
+	"unset":   true, // CSS-wide keyword
+	"default": true, // CSS reserved keyword
+
+	// CSS Cascading and Inheritance Level 5: https://drafts.csswg.org/css-cascade-5/#defaulting-keywords
+	"revert":       true, // Cascade-dependent keyword
+	"revert-layer": true, // Cascade-dependent keyword
+}
+
+// Font family names that happen to be the same as a keyword value must be
+// quoted to prevent confusion with the keywords with the same names. UAs must
+// not consider these keywords as matching the <family-name> type.
+// Specification: https://drafts.csswg.org/css-fonts/#generic-font-families
+var genericFamilyNames = map[string]bool{
+	"serif":         true,
+	"sans-serif":    true,
+	"cursive":       true,
+	"fantasy":       true,
+	"monospace":     true,
+	"system-ui":     true,
+	"emoji":         true,
+	"math":          true,
+	"fangsong":      true,
+	"ui-serif":      true,
+	"ui-sans-serif": true,
+	"ui-monospace":  true,
+	"ui-rounded":    true,
+}
+
+// Specification: https://drafts.csswg.org/css-fonts/#font-family-prop
+func (p *parser) mangleFontFamily(tokens []css_ast.Token) ([]css_ast.Token, bool) {
+	result, rest, ok := p.mangleFamilyNameOrGenericName(nil, tokens)
+	if !ok {
+		return nil, false
+	}
+
+	for len(rest) > 0 && rest[0].Kind == css_lexer.TComma {
+		result, rest, ok = p.mangleFamilyNameOrGenericName(append(result, rest[0]), rest[1:])
+		if !ok {
+			return nil, false
+		}
+	}
+
+	if len(rest) > 0 {
+		return nil, false
+	}
+
+	return result, true
+}
+
+func (p *parser) mangleFamilyNameOrGenericName(result []css_ast.Token, tokens []css_ast.Token) ([]css_ast.Token, []css_ast.Token, bool) {
+	if len(tokens) > 0 {
+		t := tokens[0]
+
+		// Handle <generic-family>
+		if t.Kind == css_lexer.TIdent && genericFamilyNames[t.Text] {
+			return append(result, t), tokens[1:], true
+		}
+
+		// Handle <family-name>
+		if t.Kind == css_lexer.TString {
+			// "If a sequence of identifiers is given as a <family-name>, the computed
+			// value is the name converted to a string by joining all the identifiers
+			// in the sequence by single spaces."
+			//
+			// More information: https://mathiasbynens.be/notes/unquoted-font-family
+			names := strings.Split(t.Text, " ")
+			for _, name := range names {
+				if !isValidCustomIdent(name, genericFamilyNames) {
+					return append(result, t), tokens[1:], true
+				}
+			}
+			for i, name := range names {
+				var whitespace css_ast.WhitespaceFlags
+				if i != 0 || !p.options.minifyWhitespace {
+					whitespace = css_ast.WhitespaceBefore
+				}
+				result = append(result, css_ast.Token{
+					Loc:        t.Loc,
+					Kind:       css_lexer.TIdent,
+					Text:       name,
+					Whitespace: whitespace,
+				})
+			}
+			return result, tokens[1:], true
+		}
+
+		// "Font family names other than generic families must either be given
+		// quoted as <string>s, or unquoted as a sequence of one or more
+		// <custom-ident>."
+		if t.Kind == css_lexer.TIdent {
+			for {
+				if !isValidCustomIdent(t.Text, genericFamilyNames) {
+					return nil, nil, false
+				}
+				result = append(result, t)
+				tokens = tokens[1:]
+				if len(tokens) == 0 || tokens[0].Kind != css_lexer.TIdent {
+					break
+				}
+				t = tokens[0]
+			}
+			return result, tokens, true
+		}
+	}
+
+	// Anything other than the cases listed above causes us to bail
+	return nil, nil, false
+}
+
+// Specification: https://drafts.csswg.org/css-values-4/#custom-idents
+func isValidCustomIdent(text string, predefinedKeywords map[string]bool) bool {
+	loweredText := strings.ToLower(text)
+
+	if predefinedKeywords[loweredText] {
+		return false
+	}
+	if cssWideAndReservedKeywords[loweredText] {
+		return false
+	}
+	if loweredText == "" {
+		return false
+	}
+
+	// validate if it contains characters which needs to be escaped
+	if !css_lexer.WouldStartIdentifierWithoutEscapes(text) {
+		return false
+	}
+	for _, c := range text {
+		if !css_lexer.IsNameContinue(c) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font_weight.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font_weight.go
new file mode 100644
index 0000000..6b8fe90
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_font_weight.go
@@ -0,0 +1,25 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+)
+
+func (p *parser) mangleFontWeight(token css_ast.Token) css_ast.Token {
+	if token.Kind != css_lexer.TIdent {
+		return token
+	}
+
+	switch strings.ToLower(token.Text) {
+	case "normal":
+		token.Text = "400"
+		token.Kind = css_lexer.TNumber
+	case "bold":
+		token.Text = "700"
+		token.Kind = css_lexer.TNumber
+	}
+
+	return token
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_gradient.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_gradient.go
new file mode 100644
index 0000000..4edec47
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_gradient.go
@@ -0,0 +1,1057 @@
+package css_parser
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type gradientKind uint8
+
+const (
+	linearGradient gradientKind = iota
+	radialGradient
+	conicGradient
+)
+
+type parsedGradient struct {
+	leadingTokens []css_ast.Token
+	colorStops    []colorStop
+	kind          gradientKind
+	repeating     bool
+}
+
+type colorStop struct {
+	positions []css_ast.Token
+	color     css_ast.Token
+	midpoint  css_ast.Token // Absent if "midpoint.Kind == css_lexer.T(0)"
+}
+
+func parseGradient(token css_ast.Token) (gradient parsedGradient, success bool) {
+	if token.Kind != css_lexer.TFunction {
+		return
+	}
+
+	switch strings.ToLower(token.Text) {
+	case "linear-gradient":
+		gradient.kind = linearGradient
+
+	case "radial-gradient":
+		gradient.kind = radialGradient
+
+	case "conic-gradient":
+		gradient.kind = conicGradient
+
+	case "repeating-linear-gradient":
+		gradient.kind = linearGradient
+		gradient.repeating = true
+
+	case "repeating-radial-gradient":
+		gradient.kind = radialGradient
+		gradient.repeating = true
+
+	case "repeating-conic-gradient":
+		gradient.kind = conicGradient
+		gradient.repeating = true
+
+	default:
+		return
+	}
+
+	// Bail if any token is a "var()" since it may introduce commas
+	tokens := *token.Children
+	for _, t := range tokens {
+		if t.Kind == css_lexer.TFunction && strings.EqualFold(t.Text, "var") {
+			return
+		}
+	}
+
+	// Try to strip the initial tokens
+	if len(tokens) > 0 && !looksLikeColor(tokens[0]) {
+		i := 0
+		for i < len(tokens) && tokens[i].Kind != css_lexer.TComma {
+			i++
+		}
+		gradient.leadingTokens = tokens[:i]
+		if i < len(tokens) {
+			tokens = tokens[i+1:]
+		} else {
+			tokens = nil
+		}
+	}
+
+	// Try to parse the color stops
+	for len(tokens) > 0 {
+		// Parse the color
+		color := tokens[0]
+		if !looksLikeColor(color) {
+			return
+		}
+		tokens = tokens[1:]
+
+		// Parse up to two positions
+		var positions []css_ast.Token
+		for len(positions) < 2 && len(tokens) > 0 {
+			position := tokens[0]
+			if position.Kind.IsNumeric() || (position.Kind == css_lexer.TFunction && strings.EqualFold(position.Text, "calc")) {
+				positions = append(positions, position)
+			} else {
+				break
+			}
+			tokens = tokens[1:]
+		}
+
+		// Parse the comma
+		var midpoint css_ast.Token
+		if len(tokens) > 0 {
+			if tokens[0].Kind != css_lexer.TComma {
+				return
+			}
+			tokens = tokens[1:]
+			if len(tokens) == 0 {
+				return
+			}
+
+			// Parse the midpoint, if any
+			if len(tokens) > 0 && tokens[0].Kind.IsNumeric() {
+				midpoint = tokens[0]
+				tokens = tokens[1:]
+
+				// Followed by a mandatory comma
+				if len(tokens) == 0 || tokens[0].Kind != css_lexer.TComma {
+					return
+				}
+				tokens = tokens[1:]
+			}
+		}
+
+		// Add the color stop
+		gradient.colorStops = append(gradient.colorStops, colorStop{
+			color:     color,
+			positions: positions,
+			midpoint:  midpoint,
+		})
+	}
+
+	success = true
+	return
+}
+
+func (p *parser) generateGradient(token css_ast.Token, gradient parsedGradient) css_ast.Token {
+	var children []css_ast.Token
+	commaToken := p.commaToken(token.Loc)
+
+	children = append(children, gradient.leadingTokens...)
+	for _, stop := range gradient.colorStops {
+		if len(children) > 0 {
+			children = append(children, commaToken)
+		}
+		if len(stop.positions) == 0 && stop.midpoint.Kind == css_lexer.T(0) {
+			stop.color.Whitespace &= ^css_ast.WhitespaceAfter
+		}
+		children = append(children, stop.color)
+		children = append(children, stop.positions...)
+		if stop.midpoint.Kind != css_lexer.T(0) {
+			children = append(children, commaToken, stop.midpoint)
+		}
+	}
+
+	token.Children = &children
+	return token
+}
+
+func (p *parser) lowerAndMinifyGradient(token css_ast.Token, wouldClipColor *bool) css_ast.Token {
+	gradient, ok := parseGradient(token)
+	if !ok {
+		return token
+	}
+
+	lowerMidpoints := p.options.unsupportedCSSFeatures.Has(compat.GradientMidpoints)
+	lowerColorSpaces := p.options.unsupportedCSSFeatures.Has(compat.ColorFunctions)
+	lowerInterpolation := p.options.unsupportedCSSFeatures.Has(compat.GradientInterpolation)
+
+	// Assume that if the browser doesn't support color spaces in gradients, then
+	// it doesn't correctly interpolate non-sRGB colors even when a color space
+	// is not specified. This is the case for Firefox 120, for example, which has
+	// support for the "color()" syntax but not for color spaces in gradients.
+	// There is no entry in our feature support matrix for this edge case so we
+	// make this assumption instead.
+	//
+	// Note that this edge case means we have to _replace_ the original gradient
+	// with the expanded one instead of inserting a fallback before it. Otherwise
+	// Firefox 120 would use the original gradient instead of the fallback because
+	// it supports the syntax, but just renders it incorrectly.
+	if lowerInterpolation {
+		lowerColorSpaces = true
+	}
+
+	// Potentially expand the gradient to handle unsupported features
+	didExpand := false
+	if lowerMidpoints || lowerColorSpaces || lowerInterpolation {
+		if colorStops, ok := tryToParseColorStops(gradient); ok {
+			hasColorSpace := false
+			hasMidpoint := false
+			for _, stop := range colorStops {
+				if stop.hasColorSpace {
+					hasColorSpace = true
+				}
+				if stop.midpoint != nil {
+					hasMidpoint = true
+				}
+			}
+			remaining, colorSpace, hueMethod, hasInterpolation := removeColorInterpolation(gradient.leadingTokens)
+			if (hasInterpolation && lowerInterpolation) || (hasColorSpace && lowerColorSpaces) || (hasMidpoint && lowerMidpoints) {
+				if hasInterpolation {
+					tryToExpandGradient(token.Loc, &gradient, colorStops, remaining, colorSpace, hueMethod)
+				} else {
+					if hasColorSpace {
+						colorSpace = colorSpace_oklab
+					} else {
+						colorSpace = colorSpace_srgb
+					}
+					tryToExpandGradient(token.Loc, &gradient, colorStops, gradient.leadingTokens, colorSpace, shorterHue)
+				}
+				didExpand = true
+			}
+		}
+	}
+
+	// Lower all colors in the gradient stop
+	for i, stop := range gradient.colorStops {
+		gradient.colorStops[i].color = p.lowerAndMinifyColor(stop.color, wouldClipColor)
+	}
+
+	if p.options.unsupportedCSSFeatures.Has(compat.GradientDoublePosition) {
+		// Replace double positions with duplicated single positions
+		for _, stop := range gradient.colorStops {
+			if len(stop.positions) > 1 {
+				gradient.colorStops = switchToSinglePositions(gradient.colorStops)
+				break
+			}
+		}
+	} else if p.options.minifySyntax {
+		// Replace duplicated single positions with double positions
+		for i, stop := range gradient.colorStops {
+			if i > 0 && len(stop.positions) == 1 {
+				if prev := gradient.colorStops[i-1]; len(prev.positions) == 1 && prev.midpoint.Kind == css_lexer.T(0) &&
+					css_ast.TokensEqual([]css_ast.Token{prev.color}, []css_ast.Token{stop.color}, nil) {
+					gradient.colorStops = switchToDoublePositions(gradient.colorStops)
+					break
+				}
+			}
+		}
+	}
+
+	if p.options.minifySyntax || didExpand {
+		gradient.colorStops = removeImpliedPositions(gradient.kind, gradient.colorStops)
+	}
+
+	return p.generateGradient(token, gradient)
+}
+
+func removeImpliedPositions(kind gradientKind, colorStops []colorStop) []colorStop {
+	if len(colorStops) == 0 {
+		return colorStops
+	}
+
+	positions := make([]valueWithUnit, len(colorStops))
+	for i, stop := range colorStops {
+		if len(stop.positions) == 1 {
+			if pos, ok := tryToParseValue(stop.positions[0], kind); ok {
+				positions[i] = pos
+				continue
+			}
+		}
+		positions[i].value = helpers.NewF64(math.NaN())
+	}
+
+	start := 0
+	for start < len(colorStops) {
+		if startPos := positions[start]; !startPos.value.IsNaN() {
+			end := start + 1
+		run:
+			for colorStops[end-1].midpoint.Kind == css_lexer.T(0) && end < len(colorStops) {
+				endPos := positions[end]
+				if endPos.value.IsNaN() || endPos.unit != startPos.unit {
+					break
+				}
+
+				// Check that all values in this run are implied. Interpolation is done
+				// using the start and end positions instead of the first and second
+				// positions because it's more accurate.
+				for i := start + 1; i < end; i++ {
+					t := helpers.NewF64(float64(i - start)).DivConst(float64(end - start))
+					impliedValue := helpers.Lerp(startPos.value, endPos.value, t)
+					if positions[i].value.Sub(impliedValue).Abs().Value() > 0.01 {
+						break run
+					}
+				}
+				end++
+			}
+
+			// Clear out all implied values
+			if end-start > 1 {
+				for i := start + 1; i+1 < end; i++ {
+					colorStops[i].positions = nil
+				}
+				start = end - 1
+				continue
+			}
+		}
+		start++
+	}
+
+	if first := colorStops[0].positions; len(first) == 1 &&
+		((first[0].Kind == css_lexer.TPercentage && first[0].PercentageValue() == "0") ||
+			(first[0].Kind == css_lexer.TDimension && first[0].DimensionValue() == "0")) {
+		colorStops[0].positions = nil
+	}
+
+	if last := colorStops[len(colorStops)-1].positions; len(last) == 1 &&
+		last[0].Kind == css_lexer.TPercentage && last[0].PercentageValue() == "100" {
+		colorStops[len(colorStops)-1].positions = nil
+	}
+
+	return colorStops
+}
+
+func switchToSinglePositions(double []colorStop) (single []colorStop) {
+	for _, stop := range double {
+		for i := range stop.positions {
+			stop.positions[i].Whitespace = css_ast.WhitespaceBefore
+		}
+		for len(stop.positions) > 1 {
+			clone := stop
+			clone.positions = stop.positions[:1]
+			clone.midpoint = css_ast.Token{}
+			single = append(single, clone)
+			stop.positions = stop.positions[1:]
+		}
+		single = append(single, stop)
+	}
+	return
+}
+
+func switchToDoublePositions(single []colorStop) (double []colorStop) {
+	for i := 0; i < len(single); i++ {
+		stop := single[i]
+		if i+1 < len(single) && len(stop.positions) == 1 && stop.midpoint.Kind == css_lexer.T(0) {
+			if next := single[i+1]; len(next.positions) == 1 &&
+				css_ast.TokensEqual([]css_ast.Token{stop.color}, []css_ast.Token{next.color}, nil) {
+				double = append(double, colorStop{
+					color:     stop.color,
+					positions: []css_ast.Token{stop.positions[0], next.positions[0]},
+					midpoint:  next.midpoint,
+				})
+				i++
+				continue
+			}
+		}
+		double = append(double, stop)
+	}
+	return
+}
+
+func removeColorInterpolation(tokens []css_ast.Token) ([]css_ast.Token, colorSpace, hueMethod, bool) {
+	for i := 0; i+1 < len(tokens); i++ {
+		if in := tokens[i]; in.Kind == css_lexer.TIdent && strings.EqualFold(in.Text, "in") {
+			if space := tokens[i+1]; space.Kind == css_lexer.TIdent {
+				var colorSpace colorSpace
+				hueMethod := shorterHue
+				start := i
+				end := i + 2
+
+				// Parse the color space
+				switch strings.ToLower(space.Text) {
+				case "a98-rgb":
+					colorSpace = colorSpace_a98_rgb
+				case "display-p3":
+					colorSpace = colorSpace_display_p3
+				case "hsl":
+					colorSpace = colorSpace_hsl
+				case "hwb":
+					colorSpace = colorSpace_hwb
+				case "lab":
+					colorSpace = colorSpace_lab
+				case "lch":
+					colorSpace = colorSpace_lch
+				case "oklab":
+					colorSpace = colorSpace_oklab
+				case "oklch":
+					colorSpace = colorSpace_oklch
+				case "prophoto-rgb":
+					colorSpace = colorSpace_prophoto_rgb
+				case "rec2020":
+					colorSpace = colorSpace_rec2020
+				case "srgb":
+					colorSpace = colorSpace_srgb
+				case "srgb-linear":
+					colorSpace = colorSpace_srgb_linear
+				case "xyz":
+					colorSpace = colorSpace_xyz
+				case "xyz-d50":
+					colorSpace = colorSpace_xyz_d50
+				case "xyz-d65":
+					colorSpace = colorSpace_xyz_d65
+				default:
+					return nil, 0, 0, false
+				}
+
+				// Parse the optional hue mode for polar color spaces
+				if colorSpace.isPolar() && i+3 < len(tokens) {
+					if hue := tokens[i+3]; hue.Kind == css_lexer.TIdent && strings.EqualFold(hue.Text, "hue") {
+						if method := tokens[i+2]; method.Kind == css_lexer.TIdent {
+							switch strings.ToLower(method.Text) {
+							case "shorter":
+								hueMethod = shorterHue
+							case "longer":
+								hueMethod = longerHue
+							case "increasing":
+								hueMethod = increasingHue
+							case "decreasing":
+								hueMethod = decreasingHue
+							default:
+								return nil, 0, 0, false
+							}
+							end = i + 4
+						}
+					}
+				}
+
+				// Remove all parsed tokens
+				remaining := append(append([]css_ast.Token{}, tokens[:start]...), tokens[end:]...)
+				if n := len(remaining); n > 0 {
+					remaining[0].Whitespace &= ^css_ast.WhitespaceBefore
+					remaining[n-1].Whitespace &= ^css_ast.WhitespaceAfter
+				}
+				return remaining, colorSpace, hueMethod, true
+			}
+		}
+	}
+
+	return nil, 0, 0, false
+}
+
+type valueWithUnit struct {
+	unit  string
+	value F64
+}
+
+type parsedColorStop struct {
+	// Position information (may be a sum of two different units)
+	positionTerms []valueWithUnit
+
+	// Color midpoint (a.k.a. transition hint) information
+	midpoint *valueWithUnit
+
+	// Non-premultiplied color information in XYZ space
+	x, y, z, alpha F64
+
+	// Non-premultiplied color information in sRGB space
+	r, g, b F64
+
+	// Premultiplied color information in the interpolation color space
+	v0, v1, v2 F64
+
+	// True if the original color has a color space
+	hasColorSpace bool
+}
+
+func tryToParseColorStops(gradient parsedGradient) ([]parsedColorStop, bool) {
+	var colorStops []parsedColorStop
+
+	for _, stop := range gradient.colorStops {
+		color, ok := parseColor(stop.color)
+		if !ok {
+			return nil, false
+		}
+		var r, g, b F64
+		if !color.hasColorSpace {
+			r = helpers.NewF64(float64(hexR(color.hex))).DivConst(255)
+			g = helpers.NewF64(float64(hexG(color.hex))).DivConst(255)
+			b = helpers.NewF64(float64(hexB(color.hex))).DivConst(255)
+			color.x, color.y, color.z = lin_srgb_to_xyz(lin_srgb(r, g, b))
+		} else {
+			r, g, b = gam_srgb(xyz_to_lin_srgb(color.x, color.y, color.z))
+		}
+		parsedStop := parsedColorStop{
+			x:             color.x,
+			y:             color.y,
+			z:             color.z,
+			r:             r,
+			g:             g,
+			b:             b,
+			alpha:         helpers.NewF64(float64(hexA(color.hex))).DivConst(255),
+			hasColorSpace: color.hasColorSpace,
+		}
+
+		for i, position := range stop.positions {
+			if position, ok := tryToParseValue(position, gradient.kind); ok {
+				parsedStop.positionTerms = []valueWithUnit{position}
+			} else {
+				return nil, false
+			}
+
+			// Expand double positions
+			if i+1 < len(stop.positions) {
+				colorStops = append(colorStops, parsedStop)
+			}
+		}
+
+		if stop.midpoint.Kind != css_lexer.T(0) {
+			if midpoint, ok := tryToParseValue(stop.midpoint, gradient.kind); ok {
+				parsedStop.midpoint = &midpoint
+			} else {
+				return nil, false
+			}
+		}
+
+		colorStops = append(colorStops, parsedStop)
+	}
+
+	// Automatically fill in missing positions
+	if len(colorStops) > 0 {
+		type stopInfo struct {
+			fromPos   valueWithUnit
+			toPos     valueWithUnit
+			fromCount int32
+			toCount   int32
+		}
+
+		// Fill in missing positions for the endpoints first
+		if first := &colorStops[0]; len(first.positionTerms) == 0 {
+			first.positionTerms = []valueWithUnit{{value: helpers.NewF64(0), unit: "%"}}
+		}
+		if last := &colorStops[len(colorStops)-1]; len(last.positionTerms) == 0 {
+			last.positionTerms = []valueWithUnit{{value: helpers.NewF64(100), unit: "%"}}
+		}
+
+		// Set all positions to be greater than the position before them
+		for i, stop := range colorStops {
+			var prevPos valueWithUnit
+			for j := i - 1; j >= 0; j-- {
+				prev := colorStops[j]
+				if prev.midpoint != nil {
+					prevPos = *prev.midpoint
+					break
+				}
+				if len(prev.positionTerms) == 1 {
+					prevPos = prev.positionTerms[0]
+					break
+				}
+			}
+			if len(stop.positionTerms) == 1 {
+				if prevPos.unit == stop.positionTerms[0].unit {
+					stop.positionTerms[0].value = helpers.Max2(prevPos.value, stop.positionTerms[0].value)
+				}
+				prevPos = stop.positionTerms[0]
+			}
+			if stop.midpoint != nil && prevPos.unit == stop.midpoint.unit {
+				stop.midpoint.value = helpers.Max2(prevPos.value, stop.midpoint.value)
+			}
+		}
+
+		// Scan over all other stops with missing positions
+		infos := make([]stopInfo, len(colorStops))
+		for i, stop := range colorStops {
+			if len(stop.positionTerms) == 1 {
+				continue
+			}
+			info := &infos[i]
+
+			// Scan backward
+			for from := i - 1; from >= 0; from-- {
+				fromStop := colorStops[from]
+				info.fromCount++
+				if fromStop.midpoint != nil {
+					info.fromPos = *fromStop.midpoint
+					break
+				}
+				if len(fromStop.positionTerms) == 1 {
+					info.fromPos = fromStop.positionTerms[0]
+					break
+				}
+			}
+
+			// Scan forward
+			for to := i; to < len(colorStops); to++ {
+				info.toCount++
+				if toStop := colorStops[to]; toStop.midpoint != nil {
+					info.toPos = *toStop.midpoint
+					break
+				}
+				if to+1 < len(colorStops) {
+					if toStop := colorStops[to+1]; len(toStop.positionTerms) == 1 {
+						info.toPos = toStop.positionTerms[0]
+						break
+					}
+				}
+			}
+		}
+
+		// Then fill in all other missing positions
+		for i, stop := range colorStops {
+			if len(stop.positionTerms) != 1 {
+				info := infos[i]
+				t := helpers.NewF64(float64(info.fromCount)).DivConst(float64(info.fromCount + info.toCount))
+				if info.fromPos.unit == info.toPos.unit {
+					colorStops[i].positionTerms = []valueWithUnit{{
+						value: helpers.Lerp(info.fromPos.value, info.toPos.value, t),
+						unit:  info.fromPos.unit,
+					}}
+				} else {
+					colorStops[i].positionTerms = []valueWithUnit{{
+						value: t.Neg().AddConst(1).Mul(info.fromPos.value),
+						unit:  info.fromPos.unit,
+					}, {
+						value: t.Mul(info.toPos.value),
+						unit:  info.toPos.unit,
+					}}
+				}
+			}
+		}
+
+		// Midpoints are only supported if they use the same units as their neighbors
+		for i, stop := range colorStops {
+			if stop.midpoint != nil {
+				next := colorStops[i+1]
+				if len(stop.positionTerms) != 1 || stop.midpoint.unit != stop.positionTerms[0].unit ||
+					len(next.positionTerms) != 1 || stop.midpoint.unit != next.positionTerms[0].unit {
+					return nil, false
+				}
+			}
+		}
+	}
+
+	return colorStops, true
+}
+
+func tryToParseValue(token css_ast.Token, kind gradientKind) (result valueWithUnit, success bool) {
+	if kind == conicGradient {
+		// <angle-percentage>
+		switch token.Kind {
+		case css_lexer.TDimension:
+			degrees, ok := degreesForAngle(token)
+			if !ok {
+				return
+			}
+			result.value = helpers.NewF64(degrees).MulConst(100.0 / 360)
+			result.unit = "%"
+
+		case css_lexer.TPercentage:
+			percent, err := strconv.ParseFloat(token.PercentageValue(), 64)
+			if err != nil {
+				return
+			}
+			result.value = helpers.NewF64(percent)
+			result.unit = "%"
+
+		default:
+			return
+		}
+	} else {
+		// <length-percentage>
+		switch token.Kind {
+		case css_lexer.TNumber:
+			zero, err := strconv.ParseFloat(token.Text, 64)
+			if err != nil || zero != 0 {
+				return
+			}
+			result.value = helpers.NewF64(0)
+			result.unit = "%"
+
+		case css_lexer.TDimension:
+			dimensionValue, err := strconv.ParseFloat(token.DimensionValue(), 64)
+			if err != nil {
+				return
+			}
+			result.value = helpers.NewF64(dimensionValue)
+			result.unit = token.DimensionUnit()
+
+		case css_lexer.TPercentage:
+			percentageValue, err := strconv.ParseFloat(token.PercentageValue(), 64)
+			if err != nil {
+				return
+			}
+			result.value = helpers.NewF64(percentageValue)
+			result.unit = "%"
+
+		default:
+			return
+		}
+	}
+
+	success = true
+	return
+}
+
+func tryToExpandGradient(
+	loc logger.Loc,
+	gradient *parsedGradient,
+	colorStops []parsedColorStop,
+	remaining []css_ast.Token,
+	colorSpace colorSpace,
+	hueMethod hueMethod,
+) bool {
+	// Convert color stops into the interpolation color space
+	for i := range colorStops {
+		stop := &colorStops[i]
+		v0, v1, v2 := xyz_to_colorSpace(stop.x, stop.y, stop.z, colorSpace)
+		stop.v0, stop.v1, stop.v2 = premultiply(v0, v1, v2, stop.alpha, colorSpace)
+	}
+
+	// Duplicate the endpoints if they should wrap around to themselves
+	if hueMethod == longerHue && colorSpace.isPolar() && len(colorStops) > 0 {
+		if first := colorStops[0]; len(first.positionTerms) == 1 {
+			if first.positionTerms[0].value.Value() < 0 {
+				colorStops[0].positionTerms[0].value = helpers.NewF64(0)
+			} else if first.positionTerms[0].value.Value() > 0 {
+				first.midpoint = nil
+				first.positionTerms = []valueWithUnit{{value: helpers.NewF64(0), unit: first.positionTerms[0].unit}}
+				colorStops = append([]parsedColorStop{first}, colorStops...)
+			}
+		}
+		if last := colorStops[len(colorStops)-1]; len(last.positionTerms) == 1 {
+			if last.positionTerms[0].unit != "%" || last.positionTerms[0].value.Value() < 100 {
+				last.positionTerms = []valueWithUnit{{value: helpers.NewF64(100), unit: "%"}}
+				colorStops = append(colorStops, last)
+			}
+		}
+	}
+
+	var newColorStops []colorStop
+	var generateColorStops func(
+		int, parsedColorStop, parsedColorStop,
+		F64, F64, F64, F64, F64, F64, F64, F64,
+		F64, F64, F64, F64, F64, F64, F64, F64,
+	)
+
+	generateColorStops = func(
+		depth int,
+		from parsedColorStop, to parsedColorStop,
+		prevX, prevY, prevZ, prevR, prevG, prevB, prevA, prevT F64,
+		nextX, nextY, nextZ, nextR, nextG, nextB, nextA, nextT F64,
+	) {
+		if depth > 4 {
+			return
+		}
+
+		t := prevT.Add(nextT).DivConst(2)
+		positionT := t
+
+		// Handle midpoints (which we have already checked uses the same units)
+		if from.midpoint != nil {
+			fromPos := from.positionTerms[0].value
+			toPos := to.positionTerms[0].value
+			stopPos := helpers.Lerp(fromPos, toPos, t)
+			H := from.midpoint.value.Sub(fromPos).Div(toPos.Sub(fromPos))
+			P := stopPos.Sub(fromPos).Div(toPos.Sub(fromPos))
+			if H.Value() <= 0 {
+				positionT = helpers.NewF64(1)
+			} else if H.Value() >= 1 {
+				positionT = helpers.NewF64(0)
+			} else {
+				positionT = P.Pow(helpers.NewF64(-1).Div(H.Log2()))
+			}
+		}
+
+		v0, v1, v2 := interpolateColors(from.v0, from.v1, from.v2, to.v0, to.v1, to.v2, colorSpace, hueMethod, positionT)
+		a := helpers.Lerp(from.alpha, to.alpha, positionT)
+		v0, v1, v2 = unpremultiply(v0, v1, v2, a, colorSpace)
+		x, y, z := colorSpace_to_xyz(v0, v1, v2, colorSpace)
+
+		// Stop when the color is similar enough to the sRGB midpoint
+		const epsilon = 4.0 / 255
+		r, g, b := gam_srgb(xyz_to_lin_srgb(x, y, z))
+		dr := r.Mul(a).Sub(prevR.Mul(prevA).Add(nextR.Mul(nextA)).DivConst(2))
+		dg := g.Mul(a).Sub(prevG.Mul(prevA).Add(nextG.Mul(nextA)).DivConst(2))
+		db := b.Mul(a).Sub(prevB.Mul(prevA).Add(nextB.Mul(nextA)).DivConst(2))
+		if d := dr.Squared().Add(dg.Squared()).Add(db.Squared()); d.Value() < epsilon*epsilon {
+			return
+		}
+
+		// Recursive split before this stop
+		generateColorStops(depth+1, from, to,
+			prevX, prevY, prevZ, prevR, prevG, prevB, prevA, prevT,
+			x, y, z, r, g, b, a, t)
+
+		// Generate this stop
+		color := makeColorToken(loc, x, y, z, a)
+		positionTerms := interpolatePositions(from.positionTerms, to.positionTerms, t)
+		position := makePositionToken(loc, positionTerms)
+		position.Whitespace = css_ast.WhitespaceBefore
+		newColorStops = append(newColorStops, colorStop{
+			color:     color,
+			positions: []css_ast.Token{position},
+		})
+
+		// Recursive split after this stop
+		generateColorStops(depth+1, from, to,
+			x, y, z, r, g, b, a, t,
+			nextX, nextY, nextZ, nextR, nextG, nextB, nextA, nextT)
+	}
+
+	for i, stop := range colorStops {
+		color := makeColorToken(loc, stop.x, stop.y, stop.z, stop.alpha)
+		position := makePositionToken(loc, stop.positionTerms)
+		position.Whitespace = css_ast.WhitespaceBefore
+		newColorStops = append(newColorStops, colorStop{
+			color:     color,
+			positions: []css_ast.Token{position},
+		})
+
+		// Generate new color stops in between as needed
+		if i+1 < len(colorStops) {
+			next := colorStops[i+1]
+			generateColorStops(0, stop, next,
+				stop.x, stop.y, stop.z, stop.r, stop.g, stop.b, stop.alpha, helpers.NewF64(0),
+				next.x, next.y, next.z, next.r, next.g, next.b, next.alpha, helpers.NewF64(1))
+		}
+	}
+
+	gradient.leadingTokens = remaining
+	gradient.colorStops = newColorStops
+	return true
+}
+
+func formatFloat(value F64, decimals int) string {
+	return strings.TrimSuffix(strings.TrimRight(strconv.FormatFloat(value.Value(), 'f', decimals, 64), "0"), ".")
+}
+
+func makeDimensionOrPercentToken(loc logger.Loc, value F64, unit string) (token css_ast.Token) {
+	token.Loc = loc
+	token.Text = formatFloat(value, 2)
+	if unit == "%" {
+		token.Kind = css_lexer.TPercentage
+	} else {
+		token.Kind = css_lexer.TDimension
+		token.UnitOffset = uint16(len(token.Text))
+	}
+	token.Text += unit
+	return
+}
+
+func makePositionToken(loc logger.Loc, positionTerms []valueWithUnit) css_ast.Token {
+	if len(positionTerms) == 1 {
+		return makeDimensionOrPercentToken(loc, positionTerms[0].value, positionTerms[0].unit)
+	}
+
+	children := make([]css_ast.Token, 0, 1+2*len(positionTerms))
+	for i, term := range positionTerms {
+		if i > 0 {
+			children = append(children, css_ast.Token{
+				Loc:        loc,
+				Kind:       css_lexer.TDelimPlus,
+				Text:       "+",
+				Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
+			})
+		}
+		children = append(children, makeDimensionOrPercentToken(loc, term.value, term.unit))
+	}
+
+	return css_ast.Token{
+		Loc:      loc,
+		Kind:     css_lexer.TFunction,
+		Text:     "calc",
+		Children: &children,
+	}
+}
+
+func makeColorToken(loc logger.Loc, x F64, y F64, z F64, a F64) (color css_ast.Token) {
+	color.Loc = loc
+	alpha := uint32(a.MulConst(255).Round().Value())
+	if hex, ok := tryToConvertToHexWithoutClipping(x, y, z, alpha); ok {
+		color.Kind = css_lexer.THash
+		if alpha == 255 {
+			color.Text = fmt.Sprintf("%06x", hex>>8)
+		} else {
+			color.Text = fmt.Sprintf("%08x", hex)
+		}
+	} else {
+		children := []css_ast.Token{
+			{
+				Loc:        loc,
+				Kind:       css_lexer.TIdent,
+				Text:       "xyz",
+				Whitespace: css_ast.WhitespaceAfter,
+			},
+			{
+				Loc:        loc,
+				Kind:       css_lexer.TNumber,
+				Text:       formatFloat(x, 3),
+				Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
+			},
+			{
+				Loc:        loc,
+				Kind:       css_lexer.TNumber,
+				Text:       formatFloat(y, 3),
+				Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
+			},
+			{
+				Loc:        loc,
+				Kind:       css_lexer.TNumber,
+				Text:       formatFloat(z, 3),
+				Whitespace: css_ast.WhitespaceBefore,
+			},
+		}
+		if a.Value() < 1 {
+			children = append(children,
+				css_ast.Token{
+					Loc:        loc,
+					Kind:       css_lexer.TDelimSlash,
+					Text:       "/",
+					Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
+				},
+				css_ast.Token{
+					Loc:        loc,
+					Kind:       css_lexer.TNumber,
+					Text:       formatFloat(a, 3),
+					Whitespace: css_ast.WhitespaceBefore,
+				},
+			)
+		}
+		color.Kind = css_lexer.TFunction
+		color.Text = "color"
+		color.Children = &children
+	}
+	return
+}
+
+func interpolateHues(a, b, t F64, hueMethod hueMethod) F64 {
+	a = a.DivConst(360)
+	b = b.DivConst(360)
+	a = a.Sub(a.Floor())
+	b = b.Sub(b.Floor())
+
+	switch hueMethod {
+	case shorterHue:
+		delta := b.Sub(a)
+		if delta.Value() > 0.5 {
+			a = a.AddConst(1)
+		}
+		if delta.Value() < -0.5 {
+			b = b.AddConst(1)
+		}
+
+	case longerHue:
+		delta := b.Sub(a)
+		if delta.Value() > 0 && delta.Value() < 0.5 {
+			a = a.AddConst(1)
+		}
+		if delta.Value() > -0.5 && delta.Value() <= 0 {
+			b = b.AddConst(1)
+		}
+
+	case increasingHue:
+		if b.Value() < a.Value() {
+			b = b.AddConst(1)
+		}
+
+	case decreasingHue:
+		if a.Value() < b.Value() {
+			a = a.AddConst(1)
+		}
+	}
+
+	return helpers.Lerp(a, b, t).MulConst(360)
+}
+
+func interpolateColors(
+	a0, a1, a2 F64, b0, b1, b2 F64,
+	colorSpace colorSpace, hueMethod hueMethod, t F64,
+) (v0 F64, v1 F64, v2 F64) {
+	v1 = helpers.Lerp(a1, b1, t)
+
+	switch colorSpace {
+	case colorSpace_hsl, colorSpace_hwb:
+		v2 = helpers.Lerp(a2, b2, t)
+		v0 = interpolateHues(a0, b0, t, hueMethod)
+
+	case colorSpace_lch, colorSpace_oklch:
+		v0 = helpers.Lerp(a0, b0, t)
+		v2 = interpolateHues(a2, b2, t, hueMethod)
+
+	default:
+		v0 = helpers.Lerp(a0, b0, t)
+		v2 = helpers.Lerp(a2, b2, t)
+	}
+
+	return v0, v1, v2
+}
+
+func interpolatePositions(a []valueWithUnit, b []valueWithUnit, t F64) (result []valueWithUnit) {
+	findUnit := func(unit string) int {
+		for i, x := range result {
+			if x.unit == unit {
+				return i
+			}
+		}
+		result = append(result, valueWithUnit{unit: unit})
+		return len(result) - 1
+	}
+
+	// "result += a * (1 - t)"
+	for _, term := range a {
+		ptr := &result[findUnit(term.unit)]
+		ptr.value = t.Neg().AddConst(1).Mul(term.value).Add(ptr.value)
+	}
+
+	// "result += b * t"
+	for _, term := range b {
+		ptr := &result[findUnit(term.unit)]
+		ptr.value = t.Mul(term.value).Add(ptr.value)
+	}
+
+	// Remove an extra zero value for neatness. We don't remove all
+	// of them because it may be important to retain a single zero.
+	if len(result) > 1 {
+		for i, term := range result {
+			if term.value.Value() == 0 {
+				copy(result[i:], result[i+1:])
+				result = result[:len(result)-1]
+				break
+			}
+		}
+	}
+
+	return
+}
+
+func premultiply(v0, v1, v2, alpha F64, colorSpace colorSpace) (F64, F64, F64) {
+	if alpha.Value() < 1 {
+		switch colorSpace {
+		case colorSpace_hsl, colorSpace_hwb:
+			v2 = v2.Mul(alpha)
+		case colorSpace_lch, colorSpace_oklch:
+			v0 = v0.Mul(alpha)
+		default:
+			v0 = v0.Mul(alpha)
+			v2 = v2.Mul(alpha)
+		}
+		v1 = v1.Mul(alpha)
+	}
+	return v0, v1, v2
+}
+
+func unpremultiply(v0, v1, v2, alpha F64, colorSpace colorSpace) (F64, F64, F64) {
+	if alpha.Value() > 0 && alpha.Value() < 1 {
+		switch colorSpace {
+		case colorSpace_hsl, colorSpace_hwb:
+			v2 = v2.Div(alpha)
+		case colorSpace_lch, colorSpace_oklch:
+			v0 = v0.Div(alpha)
+		default:
+			v0 = v0.Div(alpha)
+			v2 = v2.Div(alpha)
+		}
+		v1 = v1.Div(alpha)
+	}
+	return v0, v1, v2
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_list_style.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_list_style.go
new file mode 100644
index 0000000..113769d
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_list_style.go
@@ -0,0 +1,179 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+)
+
+// list-style-image: <image> | none
+// <image>: <url> | <gradient>
+// <url>: <url()> | <src()>
+// <gradient>: <linear-gradient()> | <repeating-linear-gradient()> | <radial-gradient()> | <repeating-radial-gradient()>
+//
+// list-style-type: <counter-style> | <string> | none (where the string is a literal bullet marker)
+// <counter-style>: <counter-style-name> | <symbols()>
+// <counter-style-name>: not: decimal | disc | square | circle | disclosure-open | disclosure-closed | <wide keyword>
+// when parsing a <custom-ident> with conflicts, only parse one if no other thing can claim it
+
+func (p *parser) processListStyleShorthand(tokens []css_ast.Token) {
+	if len(tokens) < 1 || len(tokens) > 3 {
+		return
+	}
+
+	foundImage := false
+	foundPosition := false
+	typeIndex := -1
+	noneCount := 0
+
+	for i, t := range tokens {
+		switch t.Kind {
+		case css_lexer.TString:
+			// "list-style-type" is definitely not a <custom-ident>
+			return
+
+		case css_lexer.TURL:
+			if !foundImage {
+				foundImage = true
+				continue
+			}
+
+		case css_lexer.TFunction:
+			if !foundImage {
+				switch strings.ToLower(t.Text) {
+				case "src", "linear-gradient", "repeating-linear-gradient", "radial-gradient", "radial-linear-gradient":
+					foundImage = true
+					continue
+				}
+			}
+
+		case css_lexer.TIdent:
+			lower := strings.ToLower(t.Text)
+
+			// Note: If "none" is present, it's ambiguous whether it applies to
+			// "list-style-image" or "list-style-type". To resolve ambiguity it's
+			// applied at the end to whichever property isn't otherwise set.
+			if lower == "none" {
+				noneCount++
+				continue
+			}
+
+			if !foundPosition && (lower == "inside" || lower == "outside") {
+				foundPosition = true
+				continue
+			}
+
+			if typeIndex == -1 {
+				if cssWideAndReservedKeywords[lower] || predefinedCounterStyles[lower] {
+					// "list-style-type" is definitely not a <custom-ident>
+					return
+				}
+				typeIndex = i
+				continue
+			}
+		}
+
+		// Bail if we hit an unexpected token
+		return
+	}
+
+	if typeIndex != -1 {
+		// The first "none" applies to "list-style-image" if it's missing
+		if !foundImage && noneCount > 0 {
+			noneCount--
+		}
+
+		if noneCount > 0 {
+			// "list-style-type" is "none", not a <custom-ident>
+			return
+		}
+
+		if t := &tokens[typeIndex]; t.Kind == css_lexer.TIdent {
+			t.Kind = css_lexer.TSymbol
+			t.PayloadIndex = p.symbolForName(t.Loc, t.Text).Ref.InnerIndex
+		}
+	}
+}
+
+func (p *parser) processListStyleType(t *css_ast.Token) {
+	if t.Kind == css_lexer.TIdent {
+		if lower := strings.ToLower(t.Text); lower != "none" && !cssWideAndReservedKeywords[lower] && !predefinedCounterStyles[lower] {
+			t.Kind = css_lexer.TSymbol
+			t.PayloadIndex = p.symbolForName(t.Loc, t.Text).Ref.InnerIndex
+		}
+	}
+}
+
+// https://drafts.csswg.org/css-counter-styles-3/#predefined-counters
+var predefinedCounterStyles = map[string]bool{
+	// 6.1. Numeric:
+	"arabic-indic":         true,
+	"armenian":             true,
+	"bengali":              true,
+	"cambodian":            true,
+	"cjk-decimal":          true,
+	"decimal-leading-zero": true,
+	"decimal":              true,
+	"devanagari":           true,
+	"georgian":             true,
+	"gujarati":             true,
+	"gurmukhi":             true,
+	"hebrew":               true,
+	"kannada":              true,
+	"khmer":                true,
+	"lao":                  true,
+	"lower-armenian":       true,
+	"lower-roman":          true,
+	"malayalam":            true,
+	"mongolian":            true,
+	"myanmar":              true,
+	"oriya":                true,
+	"persian":              true,
+	"tamil":                true,
+	"telugu":               true,
+	"thai":                 true,
+	"tibetan":              true,
+	"upper-armenian":       true,
+	"upper-roman":          true,
+
+	// 6.2. Alphabetic:
+	"hiragana-iroha": true,
+	"hiragana":       true,
+	"katakana-iroha": true,
+	"katakana":       true,
+	"lower-alpha":    true,
+	"lower-greek":    true,
+	"lower-latin":    true,
+	"upper-alpha":    true,
+	"upper-latin":    true,
+
+	// 6.3. Symbolic:
+	"circle":            true,
+	"disc":              true,
+	"disclosure-closed": true,
+	"disclosure-open":   true,
+	"square":            true,
+
+	// 6.4. Fixed:
+	"cjk-earthly-branch": true,
+	"cjk-heavenly-stem":  true,
+
+	// 7.1.1. Japanese:
+	"japanese-formal":   true,
+	"japanese-informal": true,
+
+	// 7.1.2. Korean:
+	"korean-hangul-formal":  true,
+	"korean-hanja-formal":   true,
+	"korean-hanja-informal": true,
+
+	// 7.1.3. Chinese:
+	"simp-chinese-formal":   true,
+	"simp-chinese-informal": true,
+	"trad-chinese-formal":   true,
+	"trad-chinese-informal": true,
+
+	// 7.2. Ethiopic Numeric Counter Style:
+	"ethiopic-numeric": true,
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_transform.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_transform.go
new file mode 100644
index 0000000..e5cdbae
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_decls_transform.go
@@ -0,0 +1,347 @@
+package css_parser
+
+import (
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+)
+
+func turnPercentIntoNumberIfShorter(t *css_ast.Token) {
+	if t.Kind == css_lexer.TPercentage {
+		if shifted, ok := shiftDot(t.PercentageValue(), -2); ok && len(shifted) < len(t.Text) {
+			t.Kind = css_lexer.TNumber
+			t.Text = shifted
+		}
+	}
+}
+
+// https://www.w3.org/TR/css-transforms-1/#two-d-transform-functions
+// https://drafts.csswg.org/css-transforms-2/#transform-functions
+func (p *parser) mangleTransforms(tokens []css_ast.Token) []css_ast.Token {
+	for i := range tokens {
+		if token := &tokens[i]; token.Kind == css_lexer.TFunction {
+			if args := *token.Children; css_ast.TokensAreCommaSeparated(args) {
+				n := len(args)
+
+				switch strings.ToLower(token.Text) {
+				////////////////////////////////////////////////////////////////////////////////
+				// 2D transforms
+
+				case "matrix":
+					// specifies a 2D transformation in the form of a transformation
+					// matrix of the six values a, b, c, d, e, f.
+					if n == 11 {
+						// | a c 0 e |
+						// | b d 0 f |
+						// | 0 0 1 0 |
+						// | 0 0 0 1 |
+						a, b, c, d, e, f := args[0], args[2], args[4], args[6], args[8], args[10]
+						if b.IsZero() && c.IsZero() && e.IsZero() && f.IsZero() {
+							// | a 0 0 0 |
+							// | 0 d 0 0 |
+							// | 0 0 1 0 |
+							// | 0 0 0 1 |
+							if a.EqualIgnoringWhitespace(d) {
+								// "matrix(a, 0, 0, a, 0, 0)" => "scale(a)"
+								token.Text = "scale"
+								*token.Children = args[:1]
+							} else if d.IsOne() {
+								// "matrix(a, 0, 0, 1, 0, 0)" => "scaleX(a)"
+								token.Text = "scaleX"
+								*token.Children = args[:1]
+							} else if a.IsOne() {
+								// "matrix(1, 0, 0, d, 0, 0)" => "scaleY(d)"
+								token.Text = "scaleY"
+								*token.Children = args[6:7]
+							} else {
+								// "matrix(a, 0, 0, d, 0, 0)" => "scale(a, d)"
+								token.Text = "scale"
+								*token.Children = append(args[:2], d)
+							}
+
+							// Note: A "matrix" cannot be directly converted into a "translate"
+							// because "translate" requires units while "matrix" requires no
+							// units. I'm not sure exactly what the semantics are so I'm not
+							// sure if you can just add "px" or not. Even if that did work,
+							// you still couldn't substitute values containing "var()" since
+							// units would still not be substituted in that case.
+						}
+					}
+
+				case "translate":
+					// specifies a 2D translation by the vector [tx, ty], where tx is the
+					// first translation-value parameter and ty is the optional second
+					// translation-value parameter. If <ty> is not provided, ty has zero
+					// as a value.
+					if n == 1 {
+						args[0].TurnLengthOrPercentageIntoNumberIfZero()
+					} else if n == 3 {
+						tx, ty := &args[0], &args[2]
+						tx.TurnLengthOrPercentageIntoNumberIfZero()
+						ty.TurnLengthOrPercentageIntoNumberIfZero()
+						if ty.IsZero() {
+							// "translate(tx, 0)" => "translate(tx)"
+							*token.Children = args[:1]
+						} else if tx.IsZero() {
+							// "translate(0, ty)" => "translateY(ty)"
+							token.Text = "translateY"
+							*token.Children = args[2:]
+						}
+					}
+
+				case "translatex":
+					// specifies a translation by the given amount in the X direction.
+					if n == 1 {
+						// "translateX(tx)" => "translate(tx)"
+						token.Text = "translate"
+						args[0].TurnLengthOrPercentageIntoNumberIfZero()
+					}
+
+				case "translatey":
+					// specifies a translation by the given amount in the Y direction.
+					if n == 1 {
+						args[0].TurnLengthOrPercentageIntoNumberIfZero()
+					}
+
+				case "scale":
+					// specifies a 2D scale operation by the [sx,sy] scaling vector
+					// described by the 2 parameters. If the second parameter is not
+					// provided, it takes a value equal to the first. For example,
+					// scale(1, 1) would leave an element unchanged, while scale(2, 2)
+					// would cause it to appear twice as long in both the X and Y axes,
+					// or four times its typical geometric size.
+					if n == 1 {
+						turnPercentIntoNumberIfShorter(&args[0])
+					} else if n == 3 {
+						sx, sy := &args[0], &args[2]
+						turnPercentIntoNumberIfShorter(sx)
+						turnPercentIntoNumberIfShorter(sy)
+						if sx.EqualIgnoringWhitespace(*sy) {
+							// "scale(s, s)" => "scale(s)"
+							*token.Children = args[:1]
+						} else if sy.IsOne() {
+							// "scale(s, 1)" => "scaleX(s)"
+							token.Text = "scaleX"
+							*token.Children = args[:1]
+						} else if sx.IsOne() {
+							// "scale(1, s)" => "scaleY(s)"
+							token.Text = "scaleY"
+							*token.Children = args[2:]
+						}
+					}
+
+				case "scalex":
+					// specifies a 2D scale operation using the [sx,1] scaling vector,
+					// where sx is given as the parameter.
+					if n == 1 {
+						turnPercentIntoNumberIfShorter(&args[0])
+					}
+
+				case "scaley":
+					// specifies a 2D scale operation using the [1,sy] scaling vector,
+					// where sy is given as the parameter.
+					if n == 1 {
+						turnPercentIntoNumberIfShorter(&args[0])
+					}
+
+				case "rotate":
+					// specifies a 2D rotation by the angle specified in the parameter
+					// about the origin of the element, as defined by the
+					// transform-origin property. For example, rotate(90deg) would
+					// cause elements to appear rotated one-quarter of a turn in the
+					// clockwise direction.
+					if n == 1 {
+						args[0].TurnLengthIntoNumberIfZero()
+					}
+
+				// Note: This is considered a 2D transform even though it's specified
+				// in terms of a 3D transform because it doesn't trigger Safari's 3D
+				// transform bugs.
+				case "rotatez":
+					// same as rotate3d(0, 0, 1, <angle>), which is a 3d transform
+					// equivalent to the 2d transform rotate(<angle>).
+					if n == 1 {
+						// "rotateZ(angle)" => "rotate(angle)"
+						token.Text = "rotate"
+						args[0].TurnLengthIntoNumberIfZero()
+					}
+
+				case "skew":
+					// specifies a 2D skew by [ax,ay] for X and Y. If the second
+					// parameter is not provided, it has a zero value.
+					if n == 1 {
+						args[0].TurnLengthIntoNumberIfZero()
+					} else if n == 3 {
+						ax, ay := &args[0], &args[2]
+						ax.TurnLengthIntoNumberIfZero()
+						ay.TurnLengthIntoNumberIfZero()
+						if ay.IsZero() {
+							// "skew(ax, 0)" => "skew(ax)"
+							*token.Children = args[:1]
+						}
+					}
+
+				case "skewx":
+					// specifies a 2D skew transformation along the X axis by the given
+					// angle.
+					if n == 1 {
+						// "skewX(ax)" => "skew(ax)"
+						token.Text = "skew"
+						args[0].TurnLengthIntoNumberIfZero()
+					}
+
+				case "skewy":
+					// specifies a 2D skew transformation along the Y axis by the given
+					// angle.
+					if n == 1 {
+						args[0].TurnLengthIntoNumberIfZero()
+					}
+
+					////////////////////////////////////////////////////////////////////////////////
+					// 3D transforms
+
+					// Note: Safari has a bug where 3D transforms render differently than
+					// other transforms. This means we should not minify a 3D transform
+					// into a 2D transform or it will cause a rendering difference in
+					// Safari.
+
+				case "matrix3d":
+					// specifies a 3D transformation as a 4x4 homogeneous matrix of 16
+					// values in column-major order.
+					if n == 31 {
+						// | m0 m4 m8  m12 |
+						// | m1 m5 m9  m13 |
+						// | m2 m6 m10 m14 |
+						// | m3 m7 m11 m15 |
+						mask := uint32(0)
+						for i := 0; i < 16; i++ {
+							if arg := args[i*2]; arg.IsZero() {
+								mask |= 1 << i
+							} else if arg.IsOne() {
+								mask |= (1 << 16) << i
+							}
+						}
+						const onlyScale = 0b1000_0000_0000_0000_0111_1011_1101_1110
+						if (mask & onlyScale) == onlyScale {
+							// | m0 0  0   0 |
+							// | 0  m5 0   0 |
+							// | 0  0  m10 0 |
+							// | 0  0  0   1 |
+							sx, sy := args[0], args[10]
+							if sx.IsOne() && sy.IsOne() {
+								token.Text = "scaleZ"
+								*token.Children = args[20:21]
+							} else {
+								token.Text = "scale3d"
+								*token.Children = append(append(args[0:2], args[10:12]...), args[20])
+							}
+						}
+
+						// Note: A "matrix3d" cannot be directly converted into a "translate3d"
+						// because "translate3d" requires units while "matrix3d" requires no
+						// units. I'm not sure exactly what the semantics are so I'm not
+						// sure if you can just add "px" or not. Even if that did work,
+						// you still couldn't substitute values containing "var()" since
+						// units would still not be substituted in that case.
+					}
+
+				case "translate3d":
+					// specifies a 3D translation by the vector [tx,ty,tz], with tx,
+					// ty and tz being the first, second and third translation-value
+					// parameters respectively.
+					if n == 5 {
+						tx, ty, tz := &args[0], &args[2], &args[4]
+						tx.TurnLengthOrPercentageIntoNumberIfZero()
+						ty.TurnLengthOrPercentageIntoNumberIfZero()
+						tz.TurnLengthIntoNumberIfZero()
+						if tx.IsZero() && ty.IsZero() {
+							// "translate3d(0, 0, tz)" => "translateZ(tz)"
+							token.Text = "translateZ"
+							*token.Children = args[4:]
+						}
+					}
+
+				case "translatez":
+					// specifies a 3D translation by the vector [0,0,tz] with the given
+					// amount in the Z direction.
+					if n == 1 {
+						args[0].TurnLengthIntoNumberIfZero()
+					}
+
+				case "scale3d":
+					// specifies a 3D scale operation by the [sx,sy,sz] scaling vector
+					// described by the 3 parameters.
+					if n == 5 {
+						sx, sy, sz := &args[0], &args[2], &args[4]
+						turnPercentIntoNumberIfShorter(sx)
+						turnPercentIntoNumberIfShorter(sy)
+						turnPercentIntoNumberIfShorter(sz)
+						if sx.IsOne() && sy.IsOne() {
+							// "scale3d(1, 1, sz)" => "scaleZ(sz)"
+							token.Text = "scaleZ"
+							*token.Children = args[4:]
+						}
+					}
+
+				case "scalez":
+					// specifies a 3D scale operation using the [1,1,sz] scaling vector,
+					// where sz is given as the parameter.
+					if n == 1 {
+						turnPercentIntoNumberIfShorter(&args[0])
+					}
+
+				case "rotate3d":
+					// specifies a 3D rotation by the angle specified in last parameter
+					// about the [x,y,z] direction vector described by the first three
+					// parameters. A direction vector that cannot be normalized, such as
+					// [0,0,0], will cause the rotation to not be applied.
+					if n == 7 {
+						x, y, z, angle := &args[0], &args[2], &args[4], &args[6]
+						angle.TurnLengthIntoNumberIfZero()
+						if x.IsOne() && y.IsZero() && z.IsZero() {
+							// "rotate3d(1, 0, 0, angle)" => "rotateX(angle)"
+							token.Text = "rotateX"
+							*token.Children = args[6:]
+						} else if x.IsZero() && y.IsOne() && z.IsZero() {
+							// "rotate3d(0, 1, 0, angle)" => "rotateY(angle)"
+							token.Text = "rotateY"
+							*token.Children = args[6:]
+						}
+					}
+
+				case "rotatex":
+					// same as rotate3d(1, 0, 0, <angle>).
+					if n == 1 {
+						args[0].TurnLengthIntoNumberIfZero()
+					}
+
+				case "rotatey":
+					// same as rotate3d(0, 1, 0, <angle>).
+					if n == 1 {
+						args[0].TurnLengthIntoNumberIfZero()
+					}
+
+				case "perspective":
+					// specifies a perspective projection matrix. This matrix scales
+					// points in X and Y based on their Z value, scaling points with
+					// positive Z values away from the origin, and those with negative Z
+					// values towards the origin. Points on the z=0 plane are unchanged.
+					// The parameter represents the distance of the z=0 plane from the
+					// viewer.
+					if n == 1 {
+						args[0].TurnLengthIntoNumberIfZero()
+					}
+				}
+
+				// Trim whitespace at the ends
+				if args := *token.Children; len(args) > 0 {
+					args[0].Whitespace &= ^css_ast.WhitespaceBefore
+					args[len(args)-1].Whitespace &= ^css_ast.WhitespaceAfter
+				}
+			}
+		}
+	}
+
+	return tokens
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_nesting.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_nesting.go
new file mode 100644
index 0000000..a95da13
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_nesting.go
@@ -0,0 +1,490 @@
+package css_parser
+
+import (
+	"fmt"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+func (p *parser) lowerNestingInRule(rule css_ast.Rule, results []css_ast.Rule) []css_ast.Rule {
+	switch r := rule.Data.(type) {
+	case *css_ast.RSelector:
+		scope := func(loc logger.Loc) css_ast.ComplexSelector {
+			return css_ast.ComplexSelector{
+				Selectors: []css_ast.CompoundSelector{{
+					SubclassSelectors: []css_ast.SubclassSelector{{
+						Range: logger.Range{Loc: loc},
+						Data:  &css_ast.SSPseudoClass{Name: "scope"},
+					}},
+				}},
+			}
+		}
+
+		parentSelectors := make([]css_ast.ComplexSelector, 0, len(r.Selectors))
+		for i, sel := range r.Selectors {
+			// Top-level "&" should be replaced with ":scope" to avoid recursion.
+			// From https://www.w3.org/TR/css-nesting-1/#nest-selector:
+			//
+			//   "When used in the selector of a nested style rule, the nesting
+			//   selector represents the elements matched by the parent rule. When
+			//   used in any other context, it represents the same elements as
+			//   :scope in that context (unless otherwise defined)."
+			//
+			substituted := make([]css_ast.CompoundSelector, 0, len(sel.Selectors))
+			for _, x := range sel.Selectors {
+				substituted = p.substituteAmpersandsInCompoundSelector(x, scope, substituted, keepLeadingCombinator)
+			}
+			r.Selectors[i] = css_ast.ComplexSelector{Selectors: substituted}
+
+			// Filter out pseudo elements because they are ignored by nested style
+			// rules. This is because pseudo-elements are not valid within :is():
+			// https://www.w3.org/TR/selectors-4/#matches-pseudo. This restriction
+			// may be relaxed in the future, but this restriction hash shipped so
+			// we're stuck with it: https://github.com/w3c/csswg-drafts/issues/7433.
+			//
+			// Note: This is only for the parent selector list that is used to
+			// substitute "&" within child rules. Do not filter out the pseudo
+			// element from the top-level selector list.
+			if !sel.UsesPseudoElement() {
+				parentSelectors = append(parentSelectors, css_ast.ComplexSelector{Selectors: substituted})
+			}
+		}
+
+		// Emit this selector before its nested children
+		start := len(results)
+		results = append(results, rule)
+
+		// Lower all children and filter out ones that become empty
+		context := lowerNestingContext{
+			parentSelectors: parentSelectors,
+			loweredRules:    results,
+		}
+		r.Rules = p.lowerNestingInRulesAndReturnRemaining(r.Rules, &context)
+
+		// Omit this selector entirely if it's now empty
+		if len(r.Rules) == 0 {
+			copy(context.loweredRules[start:], context.loweredRules[start+1:])
+			context.loweredRules = context.loweredRules[:len(context.loweredRules)-1]
+		}
+		return context.loweredRules
+
+	case *css_ast.RKnownAt:
+		var rules []css_ast.Rule
+		for _, child := range r.Rules {
+			rules = p.lowerNestingInRule(child, rules)
+		}
+		r.Rules = rules
+
+	case *css_ast.RAtLayer:
+		var rules []css_ast.Rule
+		for _, child := range r.Rules {
+			rules = p.lowerNestingInRule(child, rules)
+		}
+		r.Rules = rules
+	}
+
+	return append(results, rule)
+}
+
+// Lower all children and filter out ones that become empty
+func (p *parser) lowerNestingInRulesAndReturnRemaining(rules []css_ast.Rule, context *lowerNestingContext) []css_ast.Rule {
+	n := 0
+	for _, child := range rules {
+		child = p.lowerNestingInRuleWithContext(child, context)
+		if child.Data != nil {
+			rules[n] = child
+			n++
+		}
+	}
+	return rules[:n]
+}
+
+type lowerNestingContext struct {
+	parentSelectors []css_ast.ComplexSelector
+	loweredRules    []css_ast.Rule
+}
+
+func (p *parser) lowerNestingInRuleWithContext(rule css_ast.Rule, context *lowerNestingContext) css_ast.Rule {
+	switch r := rule.Data.(type) {
+	case *css_ast.RSelector:
+		// "a { & b {} }" => "a b {}"
+		// "a { &b {} }" => "a:is(b) {}"
+		// "a { &:hover {} }" => "a:hover {}"
+		// ".x { &b {} }" => "b.x {}"
+		// "a, b { .c, d {} }" => ":is(a, b) :is(.c, d) {}"
+		// "a, b { &.c, & d, e & {} }" => ":is(a, b).c, :is(a, b) d, e :is(a, b) {}"
+
+		// Pass 1: Canonicalize and analyze our selectors
+		canUseGroupDescendantCombinator := true // Can we do "parent «space» :is(...selectors)"?
+		canUseGroupSubSelector := true          // Can we do "parent«nospace»:is(...selectors)"?
+		var commonLeadingCombinator css_ast.Combinator
+		for i := range r.Selectors {
+			sel := &r.Selectors[i]
+
+			// Inject the implicit "&" now for simplicity later on
+			if sel.IsRelative() {
+				sel.Selectors = append([]css_ast.CompoundSelector{{NestingSelectorLoc: ast.MakeIndex32(uint32(rule.Loc.Start))}}, sel.Selectors...)
+			}
+
+			// Pseudo-elements aren't supported by ":is" (i.e. ":is(div, div::before)"
+			// is the same as ":is(div)") so we need to avoid generating ":is" if a
+			// pseudo-element is present.
+			if sel.UsesPseudoElement() {
+				canUseGroupDescendantCombinator = false
+				canUseGroupSubSelector = false
+			}
+
+			// Are all children of the form "& «something»"?
+			if len(sel.Selectors) < 2 || !sel.Selectors[0].IsSingleAmpersand() {
+				canUseGroupDescendantCombinator = false
+			} else {
+				// If all children are of the form "& «COMBINATOR» «something»", is «COMBINATOR» the same in all cases?
+				var combinator css_ast.Combinator
+				if len(sel.Selectors) >= 2 {
+					combinator = sel.Selectors[1].Combinator
+				}
+				if i == 0 {
+					commonLeadingCombinator = combinator
+				} else if commonLeadingCombinator.Byte != combinator.Byte {
+					canUseGroupDescendantCombinator = false
+				}
+			}
+
+			// Are all children of the form "&«something»"?
+			if first := sel.Selectors[0]; !first.HasNestingSelector() || first.IsSingleAmpersand() {
+				canUseGroupSubSelector = false
+			}
+		}
+
+		// Avoid generating ":is" if it's not supported
+		if p.options.unsupportedCSSFeatures.Has(compat.IsPseudoClass) && len(r.Selectors) > 1 {
+			canUseGroupDescendantCombinator = false
+			canUseGroupSubSelector = false
+		}
+
+		// Try to apply simplifications for shorter output
+		if canUseGroupDescendantCombinator {
+			// "& a, & b {}" => "& :is(a, b) {}"
+			// "& > a, & > b {}" => "& > :is(a, b) {}"
+			nestingSelectorLoc := r.Selectors[0].Selectors[0].NestingSelectorLoc
+			for i := range r.Selectors {
+				sel := &r.Selectors[i]
+				sel.Selectors = sel.Selectors[1:]
+			}
+			merged := p.multipleComplexSelectorsToSingleComplexSelector(r.Selectors)(rule.Loc)
+			merged.Selectors = append([]css_ast.CompoundSelector{{NestingSelectorLoc: nestingSelectorLoc}}, merged.Selectors...)
+			r.Selectors = []css_ast.ComplexSelector{merged}
+		} else if canUseGroupSubSelector {
+			// "&a, &b {}" => "&:is(a, b) {}"
+			// "> &a, > &b {}" => "> &:is(a, b) {}"
+			nestingSelectorLoc := r.Selectors[0].Selectors[0].NestingSelectorLoc
+			for i := range r.Selectors {
+				sel := &r.Selectors[i]
+				sel.Selectors[0].NestingSelectorLoc = ast.Index32{}
+			}
+			merged := p.multipleComplexSelectorsToSingleComplexSelector(r.Selectors)(rule.Loc)
+			merged.Selectors[0].NestingSelectorLoc = nestingSelectorLoc
+			r.Selectors = []css_ast.ComplexSelector{merged}
+		}
+
+		// Pass 2: Substitute "&" for the parent selector
+		if !p.options.unsupportedCSSFeatures.Has(compat.IsPseudoClass) || len(context.parentSelectors) <= 1 {
+			// If we can use ":is", or we don't have to because there's only one
+			// parent selector, or we are using ":is()" to match zero parent selectors
+			// (even if ":is" is unsupported), then substituting "&" for the parent
+			// selector is easy.
+			for i := range r.Selectors {
+				complex := &r.Selectors[i]
+				results := make([]css_ast.CompoundSelector, 0, len(complex.Selectors))
+				parent := p.multipleComplexSelectorsToSingleComplexSelector(context.parentSelectors)
+				for _, compound := range complex.Selectors {
+					results = p.substituteAmpersandsInCompoundSelector(compound, parent, results, keepLeadingCombinator)
+				}
+				complex.Selectors = results
+			}
+		} else {
+			// Otherwise if we can't use ":is", the transform is more complicated.
+			// Avoiding ":is" can lead to a combinatorial explosion of cases so we
+			// want to avoid this if possible. For example:
+			//
+			//   .first, .second, .third {
+			//     & > & {
+			//       color: red;
+			//     }
+			//   }
+			//
+			// If we can use ":is" (the easy case above) then we can do this:
+			//
+			//   :is(.first, .second, .third) > :is(.first, .second, .third) {
+			//     color: red;
+			//   }
+			//
+			// But if we can't use ":is" then we have to do this instead:
+			//
+			//   .first > .first,
+			//   .first > .second,
+			//   .first > .third,
+			//   .second > .first,
+			//   .second > .second,
+			//   .second > .third,
+			//   .third > .first,
+			//   .third > .second,
+			//   .third > .third {
+			//     color: red;
+			//   }
+			//
+			// That combinatorial explosion is what the loop below implements. Note
+			// that PostCSS's implementation of nesting gets this wrong. It generates
+			// this instead:
+			//
+			//   .first > .first,
+			//   .second > .second,
+			//   .third > .third {
+			//     color: red;
+			//   }
+			//
+			// That's not equivalent, so that's an incorrect transformation.
+			var selectors []css_ast.ComplexSelector
+			var indices []int
+			for {
+				// Every time we encounter another "&", add another dimension
+				offset := 0
+				parent := func(loc logger.Loc) css_ast.ComplexSelector {
+					if offset == len(indices) {
+						indices = append(indices, 0)
+					}
+					index := indices[offset]
+					offset++
+					return context.parentSelectors[index]
+				}
+
+				// Do the substitution for this particular combination
+				for i := range r.Selectors {
+					complex := r.Selectors[i]
+					results := make([]css_ast.CompoundSelector, 0, len(complex.Selectors))
+					for _, compound := range complex.Selectors {
+						results = p.substituteAmpersandsInCompoundSelector(compound, parent, results, keepLeadingCombinator)
+					}
+					complex.Selectors = results
+					selectors = append(selectors, complex)
+					offset = 0
+				}
+
+				// Do addition with carry on the indices across dimensions
+				carry := len(indices)
+				for carry > 0 {
+					index := &indices[carry-1]
+					if *index+1 < len(context.parentSelectors) {
+						*index++
+						break
+					}
+					*index = 0
+					carry--
+				}
+				if carry == 0 {
+					break
+				}
+			}
+			r.Selectors = selectors
+		}
+
+		// Lower all child rules using our newly substituted selector
+		context.loweredRules = p.lowerNestingInRule(rule, context.loweredRules)
+		return css_ast.Rule{}
+
+	case *css_ast.RKnownAt:
+		childContext := lowerNestingContext{parentSelectors: context.parentSelectors}
+		r.Rules = p.lowerNestingInRulesAndReturnRemaining(r.Rules, &childContext)
+
+		// "div { @media screen { color: red } }" "@media screen { div { color: red } }"
+		if len(r.Rules) > 0 {
+			childContext.loweredRules = append([]css_ast.Rule{{Loc: rule.Loc, Data: &css_ast.RSelector{
+				Selectors: context.parentSelectors,
+				Rules:     r.Rules,
+			}}}, childContext.loweredRules...)
+		}
+
+		// "div { @media screen { &:hover { color: red } } }" "@media screen { div:hover { color: red } }"
+		if len(childContext.loweredRules) > 0 {
+			r.Rules = childContext.loweredRules
+			context.loweredRules = append(context.loweredRules, rule)
+		}
+
+		return css_ast.Rule{}
+
+	case *css_ast.RAtLayer:
+		// Lower all children and filter out ones that become empty
+		childContext := lowerNestingContext{parentSelectors: context.parentSelectors}
+		r.Rules = p.lowerNestingInRulesAndReturnRemaining(r.Rules, &childContext)
+
+		// "div { @layer foo { color: red } }" "@layer foo { div { color: red } }"
+		if len(r.Rules) > 0 {
+			childContext.loweredRules = append([]css_ast.Rule{{Loc: rule.Loc, Data: &css_ast.RSelector{
+				Selectors: context.parentSelectors,
+				Rules:     r.Rules,
+			}}}, childContext.loweredRules...)
+		}
+
+		// "div { @layer foo { &:hover { color: red } } }" "@layer foo { div:hover { color: red } }"
+		// "div { @layer foo {} }" => "@layer foo {}" (layers have side effects, so don't remove empty ones)
+		r.Rules = childContext.loweredRules
+		context.loweredRules = append(context.loweredRules, rule)
+		return css_ast.Rule{}
+	}
+
+	return rule
+}
+
+type leadingCombinatorStrip uint8
+
+const (
+	keepLeadingCombinator leadingCombinatorStrip = iota
+	stripLeadingCombinator
+)
+
+func (p *parser) substituteAmpersandsInCompoundSelector(
+	sel css_ast.CompoundSelector,
+	replacementFn func(logger.Loc) css_ast.ComplexSelector,
+	results []css_ast.CompoundSelector,
+	strip leadingCombinatorStrip,
+) []css_ast.CompoundSelector {
+	if sel.HasNestingSelector() {
+		nestingSelectorLoc := logger.Loc{Start: int32(sel.NestingSelectorLoc.GetIndex())}
+		sel.NestingSelectorLoc = ast.Index32{}
+		replacement := replacementFn(nestingSelectorLoc)
+
+		// Convert the replacement to a single compound selector
+		var single css_ast.CompoundSelector
+		if sel.Combinator.Byte == 0 && (len(replacement.Selectors) == 1 || len(results) == 0) {
+			// ".foo { :hover & {} }" => ":hover .foo {}"
+			// ".foo .bar { &:hover {} }" => ".foo .bar:hover {}"
+			last := len(replacement.Selectors) - 1
+			results = append(results, replacement.Selectors[:last]...)
+			single = replacement.Selectors[last]
+			if strip == stripLeadingCombinator {
+				single.Combinator = css_ast.Combinator{}
+			}
+			sel.Combinator = single.Combinator
+		} else if len(replacement.Selectors) == 1 {
+			// ".foo { > &:hover {} }" => ".foo > .foo:hover {}"
+			single = replacement.Selectors[0]
+			if strip == stripLeadingCombinator {
+				single.Combinator = css_ast.Combinator{}
+			}
+		} else {
+			// ".foo .bar { :hover & {} }" => ":hover :is(.foo .bar) {}"
+			// ".foo .bar { > &:hover {} }" => ".foo .bar > :is(.foo .bar):hover {}"
+			p.reportNestingWithGeneratedPseudoClassIs(nestingSelectorLoc)
+			single = css_ast.CompoundSelector{
+				SubclassSelectors: []css_ast.SubclassSelector{{
+					Range: logger.Range{Loc: nestingSelectorLoc},
+					Data: &css_ast.SSPseudoClassWithSelectorList{
+						Kind:      css_ast.PseudoClassIs,
+						Selectors: []css_ast.ComplexSelector{replacement.CloneWithoutLeadingCombinator()},
+					},
+				}},
+			}
+		}
+
+		var subclassSelectorPrefix []css_ast.SubclassSelector
+
+		// Insert the type selector
+		if single.TypeSelector != nil {
+			if sel.TypeSelector != nil {
+				p.reportNestingWithGeneratedPseudoClassIs(nestingSelectorLoc)
+				subclassSelectorPrefix = append(subclassSelectorPrefix, css_ast.SubclassSelector{
+					Range: sel.TypeSelector.Range(),
+					Data: &css_ast.SSPseudoClassWithSelectorList{
+						Kind:      css_ast.PseudoClassIs,
+						Selectors: []css_ast.ComplexSelector{{Selectors: []css_ast.CompoundSelector{{TypeSelector: sel.TypeSelector}}}},
+					},
+				})
+			}
+			sel.TypeSelector = single.TypeSelector
+		}
+
+		// Insert the subclass selectors
+		subclassSelectorPrefix = append(subclassSelectorPrefix, single.SubclassSelectors...)
+
+		// Write the changes back
+		if len(subclassSelectorPrefix) > 0 {
+			sel.SubclassSelectors = append(subclassSelectorPrefix, sel.SubclassSelectors...)
+		}
+	}
+
+	// "div { :is(&.foo) {} }" => ":is(div.foo) {}"
+	for _, ss := range sel.SubclassSelectors {
+		if class, ok := ss.Data.(*css_ast.SSPseudoClassWithSelectorList); ok {
+			outer := make([]css_ast.ComplexSelector, 0, len(class.Selectors))
+			for _, complex := range class.Selectors {
+				inner := make([]css_ast.CompoundSelector, 0, len(complex.Selectors))
+				for _, sel := range complex.Selectors {
+					inner = p.substituteAmpersandsInCompoundSelector(sel, replacementFn, inner, stripLeadingCombinator)
+				}
+				outer = append(outer, css_ast.ComplexSelector{Selectors: inner})
+			}
+			class.Selectors = outer
+		}
+	}
+
+	return append(results, sel)
+}
+
+// Turn the list of selectors into a single selector by wrapping lists
+// without a single element with ":is(...)". Note that this may result
+// in an empty ":is()" selector (which matches nothing).
+func (p *parser) multipleComplexSelectorsToSingleComplexSelector(selectors []css_ast.ComplexSelector) func(logger.Loc) css_ast.ComplexSelector {
+	if len(selectors) == 1 {
+		return func(logger.Loc) css_ast.ComplexSelector {
+			return selectors[0]
+		}
+	}
+
+	var leadingCombinator css_ast.Combinator
+	clones := make([]css_ast.ComplexSelector, len(selectors))
+
+	for i, sel := range selectors {
+		// "> a, > b" => "> :is(a, b)" (the caller should have already checked that all leading combinators are the same)
+		leadingCombinator = sel.Selectors[0].Combinator
+		clones[i] = sel.CloneWithoutLeadingCombinator()
+	}
+
+	return func(loc logger.Loc) css_ast.ComplexSelector {
+		return css_ast.ComplexSelector{
+			Selectors: []css_ast.CompoundSelector{{
+				Combinator: leadingCombinator,
+				SubclassSelectors: []css_ast.SubclassSelector{{
+					Range: logger.Range{Loc: loc},
+					Data: &css_ast.SSPseudoClassWithSelectorList{
+						Kind:      css_ast.PseudoClassIs,
+						Selectors: clones,
+					},
+				}},
+			}},
+		}
+	}
+}
+
+func (p *parser) reportNestingWithGeneratedPseudoClassIs(nestingSelectorLoc logger.Loc) {
+	if p.options.unsupportedCSSFeatures.Has(compat.IsPseudoClass) {
+		_, didWarn := p.nestingWarnings[nestingSelectorLoc]
+		if didWarn {
+			// Only warn at each location once
+			return
+		}
+		if p.nestingWarnings == nil {
+			p.nestingWarnings = make(map[logger.Loc]struct{})
+		}
+		p.nestingWarnings[nestingSelectorLoc] = struct{}{}
+		text := "Transforming this CSS nesting syntax is not supported in the configured target environment"
+		if p.options.originalTargetEnv != "" {
+			text = fmt.Sprintf("%s (%s)", text, p.options.originalTargetEnv)
+		}
+		r := logger.Range{Loc: nestingSelectorLoc, Len: 1}
+		p.log.AddIDWithNotes(logger.MsgID_CSS_UnsupportedCSSNesting, logger.Warning, &p.tracker, r, text, []logger.MsgData{{
+			Text: "The nesting transform for this case must generate an \":is(...)\" but the configured target environment does not support the \":is\" pseudo-class."}})
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_parser.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_parser.go
new file mode 100644
index 0000000..131ec5e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_parser.go
@@ -0,0 +1,2374 @@
+package css_parser
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+// This is mostly a normal CSS parser with one exception: the addition of
+// support for parsing https://drafts.csswg.org/css-nesting-1/.
+
+type parser struct {
+	log               logger.Log
+	source            logger.Source
+	tokens            []css_lexer.Token
+	allComments       []logger.Range
+	legalComments     []css_lexer.Comment
+	stack             []css_lexer.T
+	importRecords     []ast.ImportRecord
+	symbols           []ast.Symbol
+	composes          map[ast.Ref]*css_ast.Composes
+	localSymbols      []ast.LocRef
+	localScope        map[string]ast.LocRef
+	globalScope       map[string]ast.LocRef
+	nestingWarnings   map[logger.Loc]struct{}
+	tracker           logger.LineColumnTracker
+	enclosingAtMedia  [][]css_ast.Token
+	layersPreImport   [][]string
+	layersPostImport  [][]string
+	enclosingLayer    []string
+	anonLayerCount    int
+	index             int
+	legalCommentIndex int
+	inSelectorSubtree int
+	prevError         logger.Loc
+	options           Options
+	nestingIsPresent  bool
+	makeLocalSymbols  bool
+	hasSeenAtImport   bool
+}
+
+type Options struct {
+	cssPrefixData map[css_ast.D]compat.CSSPrefix
+
+	// This is an embedded struct. Always access these directly instead of off
+	// the name "optionsThatSupportStructuralEquality". This is only grouped like
+	// this to make the equality comparison easier and safer (and hopefully faster).
+	optionsThatSupportStructuralEquality
+}
+
+type symbolMode uint8
+
+const (
+	symbolModeDisabled symbolMode = iota
+	symbolModeGlobal
+	symbolModeLocal
+)
+
+type optionsThatSupportStructuralEquality struct {
+	originalTargetEnv      string
+	unsupportedCSSFeatures compat.CSSFeature
+	minifySyntax           bool
+	minifyWhitespace       bool
+	minifyIdentifiers      bool
+	symbolMode             symbolMode
+}
+
+func OptionsFromConfig(loader config.Loader, options *config.Options) Options {
+	var symbolMode symbolMode
+	switch loader {
+	case config.LoaderGlobalCSS:
+		symbolMode = symbolModeGlobal
+	case config.LoaderLocalCSS:
+		symbolMode = symbolModeLocal
+	}
+
+	return Options{
+		cssPrefixData: options.CSSPrefixData,
+
+		optionsThatSupportStructuralEquality: optionsThatSupportStructuralEquality{
+			minifySyntax:           options.MinifySyntax,
+			minifyWhitespace:       options.MinifyWhitespace,
+			minifyIdentifiers:      options.MinifyIdentifiers,
+			unsupportedCSSFeatures: options.UnsupportedCSSFeatures,
+			originalTargetEnv:      options.OriginalTargetEnv,
+			symbolMode:             symbolMode,
+		},
+	}
+}
+
+func (a *Options) Equal(b *Options) bool {
+	// Compare "optionsThatSupportStructuralEquality"
+	if a.optionsThatSupportStructuralEquality != b.optionsThatSupportStructuralEquality {
+		return false
+	}
+
+	// Compare "cssPrefixData"
+	if len(a.cssPrefixData) != len(b.cssPrefixData) {
+		return false
+	}
+	for k, va := range a.cssPrefixData {
+		vb, ok := b.cssPrefixData[k]
+		if !ok || va != vb {
+			return false
+		}
+	}
+	for k := range b.cssPrefixData {
+		if _, ok := b.cssPrefixData[k]; !ok {
+			return false
+		}
+	}
+
+	return true
+}
+
+func Parse(log logger.Log, source logger.Source, options Options) css_ast.AST {
+	result := css_lexer.Tokenize(log, source, css_lexer.Options{
+		RecordAllComments: options.minifyIdentifiers,
+	})
+	p := parser{
+		log:              log,
+		source:           source,
+		tracker:          logger.MakeLineColumnTracker(&source),
+		options:          options,
+		tokens:           result.Tokens,
+		allComments:      result.AllComments,
+		legalComments:    result.LegalComments,
+		prevError:        logger.Loc{Start: -1},
+		composes:         make(map[ast.Ref]*css_ast.Composes),
+		localScope:       make(map[string]ast.LocRef),
+		globalScope:      make(map[string]ast.LocRef),
+		makeLocalSymbols: options.symbolMode == symbolModeLocal,
+	}
+	rules := p.parseListOfRules(ruleContext{
+		isTopLevel:     true,
+		parseSelectors: true,
+	})
+	p.expect(css_lexer.TEndOfFile)
+	return css_ast.AST{
+		Rules:                rules,
+		CharFreq:             p.computeCharacterFrequency(),
+		Symbols:              p.symbols,
+		ImportRecords:        p.importRecords,
+		ApproximateLineCount: result.ApproximateLineCount,
+		SourceMapComment:     result.SourceMapComment,
+		LocalSymbols:         p.localSymbols,
+		LocalScope:           p.localScope,
+		GlobalScope:          p.globalScope,
+		Composes:             p.composes,
+		LayersPreImport:      p.layersPreImport,
+		LayersPostImport:     p.layersPostImport,
+	}
+}
+
+// Compute a character frequency histogram for everything that's not a bound
+// symbol. This is used to modify how minified names are generated for slightly
+// better gzip compression. Even though it's a very small win, we still do it
+// because it's simple to do and very cheap to compute.
+func (p *parser) computeCharacterFrequency() *ast.CharFreq {
+	if !p.options.minifyIdentifiers {
+		return nil
+	}
+
+	// Add everything in the file to the histogram
+	charFreq := &ast.CharFreq{}
+	charFreq.Scan(p.source.Contents, 1)
+
+	// Subtract out all comments
+	for _, commentRange := range p.allComments {
+		charFreq.Scan(p.source.TextForRange(commentRange), -1)
+	}
+
+	// Subtract out all import paths
+	for _, record := range p.importRecords {
+		if !record.SourceIndex.IsValid() {
+			charFreq.Scan(record.Path.Text, -1)
+		}
+	}
+
+	// Subtract out all symbols that will be minified
+	for _, symbol := range p.symbols {
+		if symbol.Kind == ast.SymbolLocalCSS {
+			charFreq.Scan(symbol.OriginalName, -int32(symbol.UseCountEstimate))
+		}
+	}
+
+	return charFreq
+}
+
+func (p *parser) advance() {
+	if p.index < len(p.tokens) {
+		p.index++
+	}
+}
+
+func (p *parser) at(index int) css_lexer.Token {
+	if index < len(p.tokens) {
+		return p.tokens[index]
+	}
+	return css_lexer.Token{
+		Kind:  css_lexer.TEndOfFile,
+		Range: logger.Range{Loc: logger.Loc{Start: int32(len(p.source.Contents))}},
+	}
+}
+
+func (p *parser) current() css_lexer.Token {
+	return p.at(p.index)
+}
+
+func (p *parser) next() css_lexer.Token {
+	return p.at(p.index + 1)
+}
+
+func (p *parser) raw() string {
+	t := p.current()
+	return p.source.Contents[t.Range.Loc.Start:t.Range.End()]
+}
+
+func (p *parser) decoded() string {
+	return p.current().DecodedText(p.source.Contents)
+}
+
+func (p *parser) peek(kind css_lexer.T) bool {
+	return kind == p.current().Kind
+}
+
+func (p *parser) eat(kind css_lexer.T) bool {
+	if p.peek(kind) {
+		p.advance()
+		return true
+	}
+	return false
+}
+
+func (p *parser) expect(kind css_lexer.T) bool {
+	return p.expectWithMatchingLoc(kind, logger.Loc{Start: -1})
+}
+
+func (p *parser) expectWithMatchingLoc(kind css_lexer.T, matchingLoc logger.Loc) bool {
+	if p.eat(kind) {
+		return true
+	}
+	t := p.current()
+	if (t.Flags & css_lexer.DidWarnAboutSingleLineComment) != 0 {
+		return false
+	}
+
+	var text string
+	var suggestion string
+	var notes []logger.MsgData
+
+	expected := kind.String()
+	if strings.HasPrefix(expected, "\"") && strings.HasSuffix(expected, "\"") {
+		suggestion = expected[1 : len(expected)-1]
+	}
+
+	if (kind == css_lexer.TSemicolon || kind == css_lexer.TColon) && p.index > 0 && p.at(p.index-1).Kind == css_lexer.TWhitespace {
+		// Have a nice error message for forgetting a trailing semicolon or colon
+		text = fmt.Sprintf("Expected %s", expected)
+		t = p.at(p.index - 1)
+	} else if (kind == css_lexer.TCloseBrace || kind == css_lexer.TCloseBracket || kind == css_lexer.TCloseParen) &&
+		matchingLoc.Start != -1 && int(matchingLoc.Start)+1 <= len(p.source.Contents) {
+		// Have a nice error message for forgetting a closing brace/bracket/parenthesis
+		c := p.source.Contents[matchingLoc.Start : matchingLoc.Start+1]
+		text = fmt.Sprintf("Expected %s to go with %q", expected, c)
+		notes = append(notes, p.tracker.MsgData(logger.Range{Loc: matchingLoc, Len: 1}, fmt.Sprintf("The unbalanced %q is here:", c)))
+	} else {
+		switch t.Kind {
+		case css_lexer.TEndOfFile, css_lexer.TWhitespace:
+			text = fmt.Sprintf("Expected %s but found %s", expected, t.Kind.String())
+			t.Range.Len = 0
+		case css_lexer.TBadURL, css_lexer.TUnterminatedString:
+			text = fmt.Sprintf("Expected %s but found %s", expected, t.Kind.String())
+		default:
+			text = fmt.Sprintf("Expected %s but found %q", expected, p.raw())
+		}
+	}
+
+	if t.Range.Loc.Start > p.prevError.Start {
+		data := p.tracker.MsgData(t.Range, text)
+		data.Location.Suggestion = suggestion
+		p.log.AddMsgID(logger.MsgID_CSS_CSSSyntaxError, logger.Msg{Kind: logger.Warning, Data: data, Notes: notes})
+		p.prevError = t.Range.Loc
+	}
+	return false
+}
+
+func (p *parser) unexpected() {
+	if t := p.current(); t.Range.Loc.Start > p.prevError.Start && (t.Flags&css_lexer.DidWarnAboutSingleLineComment) == 0 {
+		var text string
+		switch t.Kind {
+		case css_lexer.TEndOfFile, css_lexer.TWhitespace:
+			text = fmt.Sprintf("Unexpected %s", t.Kind.String())
+			t.Range.Len = 0
+		case css_lexer.TBadURL, css_lexer.TUnterminatedString:
+			text = fmt.Sprintf("Unexpected %s", t.Kind.String())
+		default:
+			text = fmt.Sprintf("Unexpected %q", p.raw())
+		}
+		p.log.AddID(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, t.Range, text)
+		p.prevError = t.Range.Loc
+	}
+}
+
+func (p *parser) symbolForName(loc logger.Loc, name string) ast.LocRef {
+	var kind ast.SymbolKind
+	var scope map[string]ast.LocRef
+
+	if p.makeLocalSymbols {
+		kind = ast.SymbolLocalCSS
+		scope = p.localScope
+	} else {
+		kind = ast.SymbolGlobalCSS
+		scope = p.globalScope
+	}
+
+	entry, ok := scope[name]
+	if !ok {
+		entry = ast.LocRef{
+			Loc: loc,
+			Ref: ast.Ref{
+				SourceIndex: p.source.Index,
+				InnerIndex:  uint32(len(p.symbols)),
+			},
+		}
+		p.symbols = append(p.symbols, ast.Symbol{
+			Kind:         kind,
+			OriginalName: name,
+			Link:         ast.InvalidRef,
+		})
+		scope[name] = entry
+		if kind == ast.SymbolLocalCSS {
+			p.localSymbols = append(p.localSymbols, entry)
+		}
+	}
+
+	p.symbols[entry.Ref.InnerIndex].UseCountEstimate++
+	return entry
+}
+
+func (p *parser) recordAtLayerRule(layers [][]string) {
+	if p.anonLayerCount > 0 {
+		return
+	}
+
+	for _, layer := range layers {
+		if len(p.enclosingLayer) > 0 {
+			clone := make([]string, 0, len(p.enclosingLayer)+len(layer))
+			layer = append(append(clone, p.enclosingLayer...), layer...)
+		}
+		p.layersPostImport = append(p.layersPostImport, layer)
+	}
+}
+
+type ruleContext struct {
+	isTopLevel     bool
+	parseSelectors bool
+}
+
+func (p *parser) parseListOfRules(context ruleContext) []css_ast.Rule {
+	atRuleContext := atRuleContext{}
+	if context.isTopLevel {
+		atRuleContext.charsetValidity = atRuleValid
+		atRuleContext.importValidity = atRuleValid
+		atRuleContext.isTopLevel = true
+	}
+	rules := []css_ast.Rule{}
+	didFindAtImport := false
+
+loop:
+	for {
+		if context.isTopLevel {
+			p.nestingIsPresent = false
+		}
+
+		// If there are any legal comments immediately before the current token,
+		// turn them all into comment rules and append them to the current rule list
+		for p.legalCommentIndex < len(p.legalComments) {
+			comment := p.legalComments[p.legalCommentIndex]
+			if comment.TokenIndexAfter > uint32(p.index) {
+				break
+			}
+			if comment.TokenIndexAfter == uint32(p.index) {
+				rules = append(rules, css_ast.Rule{Loc: comment.Loc, Data: &css_ast.RComment{Text: comment.Text}})
+			}
+			p.legalCommentIndex++
+		}
+
+		switch p.current().Kind {
+		case css_lexer.TEndOfFile:
+			break loop
+
+		case css_lexer.TCloseBrace:
+			if !context.isTopLevel {
+				break loop
+			}
+
+		case css_lexer.TWhitespace:
+			p.advance()
+			continue
+
+		case css_lexer.TAtKeyword:
+			rule := p.parseAtRule(atRuleContext)
+
+			// Disallow "@charset" and "@import" after other rules
+			if context.isTopLevel {
+				switch r := rule.Data.(type) {
+				case *css_ast.RAtCharset:
+					// This doesn't invalidate anything because it always comes first
+
+				case *css_ast.RAtImport:
+					didFindAtImport = true
+					if atRuleContext.charsetValidity == atRuleValid {
+						atRuleContext.afterLoc = rule.Loc
+						atRuleContext.charsetValidity = atRuleInvalidAfter
+					}
+
+				case *css_ast.RAtLayer:
+					if atRuleContext.charsetValidity == atRuleValid {
+						atRuleContext.afterLoc = rule.Loc
+						atRuleContext.charsetValidity = atRuleInvalidAfter
+					}
+
+					// From the specification: "Note: No @layer rules are allowed between
+					// @import and @namespace rules. Any @layer rule that comes after an
+					// @import or @namespace rule will cause any subsequent @import or
+					// @namespace rules to be ignored."
+					if atRuleContext.importValidity == atRuleValid && (r.Rules != nil || didFindAtImport) {
+						atRuleContext.afterLoc = rule.Loc
+						atRuleContext.charsetValidity = atRuleInvalidAfter
+						atRuleContext.importValidity = atRuleInvalidAfter
+					}
+
+				default:
+					if atRuleContext.importValidity == atRuleValid {
+						atRuleContext.afterLoc = rule.Loc
+						atRuleContext.charsetValidity = atRuleInvalidAfter
+						atRuleContext.importValidity = atRuleInvalidAfter
+					}
+				}
+			}
+
+			// Lower CSS nesting if it's not supported (but only at the top level)
+			if p.nestingIsPresent && p.options.unsupportedCSSFeatures.Has(compat.Nesting) && context.isTopLevel {
+				rules = p.lowerNestingInRule(rule, rules)
+			} else {
+				rules = append(rules, rule)
+			}
+			continue
+
+		case css_lexer.TCDO, css_lexer.TCDC:
+			if context.isTopLevel {
+				p.advance()
+				continue
+			}
+		}
+
+		if atRuleContext.importValidity == atRuleValid {
+			atRuleContext.afterLoc = p.current().Range.Loc
+			atRuleContext.charsetValidity = atRuleInvalidAfter
+			atRuleContext.importValidity = atRuleInvalidAfter
+		}
+
+		// Note: CSS recently changed to parse and discard declarations
+		// here instead of treating them as the start of a qualified rule.
+		// See also: https://github.com/w3c/csswg-drafts/issues/8834
+		if !context.isTopLevel {
+			if scan, index := p.scanForEndOfRule(); scan == endOfRuleSemicolon {
+				tokens := p.convertTokens(p.tokens[p.index:index])
+				rules = append(rules, css_ast.Rule{Loc: p.current().Range.Loc, Data: &css_ast.RBadDeclaration{Tokens: tokens}})
+				p.index = index + 1
+				continue
+			}
+		}
+
+		var rule css_ast.Rule
+		if context.parseSelectors {
+			rule = p.parseSelectorRule(context.isTopLevel, parseSelectorOpts{})
+		} else {
+			rule = p.parseQualifiedRule(parseQualifiedRuleOpts{isTopLevel: context.isTopLevel})
+		}
+
+		// Lower CSS nesting if it's not supported (but only at the top level)
+		if p.nestingIsPresent && p.options.unsupportedCSSFeatures.Has(compat.Nesting) && context.isTopLevel {
+			rules = p.lowerNestingInRule(rule, rules)
+		} else {
+			rules = append(rules, rule)
+		}
+	}
+
+	if p.options.minifySyntax {
+		rules = p.mangleRules(rules, context.isTopLevel)
+	}
+	return rules
+}
+
+type listOfDeclarationsOpts struct {
+	composesContext      *composesContext
+	canInlineNoOpNesting bool
+}
+
+func (p *parser) parseListOfDeclarations(opts listOfDeclarationsOpts) (list []css_ast.Rule) {
+	list = []css_ast.Rule{}
+	foundNesting := false
+
+	for {
+		switch p.current().Kind {
+		case css_lexer.TWhitespace, css_lexer.TSemicolon:
+			p.advance()
+
+		case css_lexer.TEndOfFile, css_lexer.TCloseBrace:
+			list = p.processDeclarations(list, opts.composesContext)
+			if p.options.minifySyntax {
+				list = p.mangleRules(list, false /* isTopLevel */)
+
+				// Pull out all unnecessarily-nested declarations and stick them at the end
+				if opts.canInlineNoOpNesting {
+					// "a { & { x: y } }" => "a { x: y }"
+					// "a { & { b: c } d: e }" => "a { d: e; b: c }"
+					if foundNesting {
+						var inlineDecls []css_ast.Rule
+						n := 0
+						for _, rule := range list {
+							if rule, ok := rule.Data.(*css_ast.RSelector); ok && len(rule.Selectors) == 1 {
+								if sel := rule.Selectors[0]; len(sel.Selectors) == 1 && sel.Selectors[0].IsSingleAmpersand() {
+									inlineDecls = append(inlineDecls, rule.Rules...)
+									continue
+								}
+							}
+							list[n] = rule
+							n++
+						}
+						list = append(list[:n], inlineDecls...)
+					}
+				} else {
+					// "a, b::before { & { x: y } }" => "a, b::before { & { x: y } }"
+				}
+			}
+			return
+
+		case css_lexer.TAtKeyword:
+			if p.inSelectorSubtree > 0 {
+				p.nestingIsPresent = true
+			}
+			list = append(list, p.parseAtRule(atRuleContext{
+				isDeclarationList:    true,
+				canInlineNoOpNesting: opts.canInlineNoOpNesting,
+			}))
+
+		// Reference: https://drafts.csswg.org/css-nesting-1/
+		default:
+			if scan, _ := p.scanForEndOfRule(); scan == endOfRuleOpenBrace {
+				p.nestingIsPresent = true
+				foundNesting = true
+				rule := p.parseSelectorRule(false, parseSelectorOpts{
+					isDeclarationContext: true,
+					composesContext:      opts.composesContext,
+				})
+
+				// If this rule was a single ":global" or ":local", inline it here. This
+				// is handled differently than a bare "&" with normal CSS nesting because
+				// that would be inlined at the end of the parent rule's body instead,
+				// which is probably unexpected (e.g. it would trip people up when trying
+				// to write rules in a specific order).
+				if sel, ok := rule.Data.(*css_ast.RSelector); ok && len(sel.Selectors) == 1 {
+					if first := sel.Selectors[0]; len(first.Selectors) == 1 {
+						if first := first.Selectors[0]; first.WasEmptyFromLocalOrGlobal && first.IsSingleAmpersand() {
+							list = append(list, sel.Rules...)
+							continue
+						}
+					}
+				}
+
+				list = append(list, rule)
+			} else {
+				list = append(list, p.parseDeclaration())
+			}
+		}
+	}
+}
+
+func (p *parser) mangleRules(rules []css_ast.Rule, isTopLevel bool) []css_ast.Rule {
+	// Remove empty rules
+	mangledRules := make([]css_ast.Rule, 0, len(rules))
+	var prevNonComment css_ast.R
+next:
+	for _, rule := range rules {
+		nextNonComment := rule.Data
+
+		switch r := rule.Data.(type) {
+		case *css_ast.RAtKeyframes:
+			// Do not remove empty "@keyframe foo {}" rules. Even empty rules still
+			// dispatch JavaScript animation events, so removing them changes
+			// behavior: https://bugzilla.mozilla.org/show_bug.cgi?id=1004377.
+
+		case *css_ast.RAtLayer:
+			if len(r.Rules) == 0 && len(r.Names) > 0 {
+				// Do not remove empty "@layer foo {}" rules. The specification says:
+				// "Cascade layers are sorted by the order in which they first are
+				// declared, with nested layers grouped within their parent layers
+				// before any unlayered rules." So removing empty rules could change
+				// the order in which they are first declared, and is therefore invalid.
+				//
+				// We can turn "@layer foo {}" into "@layer foo;" to be shorter. But
+				// don't collapse anonymous "@layer {}" into "@layer;" because that is
+				// a syntax error.
+				r.Rules = nil
+			} else if len(r.Rules) == 1 && len(r.Names) == 1 {
+				// Only collapse layers if each layer has exactly one name
+				if r2, ok := r.Rules[0].Data.(*css_ast.RAtLayer); ok && len(r2.Names) == 1 {
+					// "@layer a { @layer b {} }" => "@layer a.b;"
+					// "@layer a { @layer b { c {} } }" => "@layer a.b { c {} }"
+					r.Names[0] = append(r.Names[0], r2.Names[0]...)
+					r.Rules = r2.Rules
+				}
+			}
+
+		case *css_ast.RKnownAt:
+			if len(r.Rules) == 0 && atKnownRuleCanBeRemovedIfEmpty[r.AtToken] {
+				continue
+			}
+
+			// Unwrap "@media" rules that duplicate conditions from a parent "@media"
+			// rule. This is unlikely to be authored manually but can be automatically
+			// generated when using a CSS framework such as Tailwind.
+			//
+			//   @media (min-width: 1024px) {
+			//     .md\:class {
+			//       color: red;
+			//     }
+			//     @media (min-width: 1024px) {
+			//       .md\:class {
+			//         color: red;
+			//       }
+			//     }
+			//   }
+			//
+			// This converts that code into the following:
+			//
+			//   @media (min-width: 1024px) {
+			//     .md\:class {
+			//       color: red;
+			//     }
+			//     .md\:class {
+			//       color: red;
+			//     }
+			//   }
+			//
+			// Which can then be mangled further.
+			if strings.EqualFold(r.AtToken, "media") {
+				for _, prelude := range p.enclosingAtMedia {
+					if css_ast.TokensEqualIgnoringWhitespace(r.Prelude, prelude) {
+						mangledRules = append(mangledRules, r.Rules...)
+						continue next
+					}
+				}
+			}
+
+		case *css_ast.RSelector:
+			if len(r.Rules) == 0 {
+				continue
+			}
+
+			// Merge adjacent selectors with the same content
+			// "a { color: red; } b { color: red; }" => "a, b { color: red; }"
+			if prevNonComment != nil {
+				if r, ok := rule.Data.(*css_ast.RSelector); ok {
+					if prev, ok := prevNonComment.(*css_ast.RSelector); ok && css_ast.RulesEqual(r.Rules, prev.Rules, nil) &&
+						isSafeSelectors(r.Selectors) && isSafeSelectors(prev.Selectors) {
+					nextSelector:
+						for _, sel := range r.Selectors {
+							for _, prevSel := range prev.Selectors {
+								if sel.Equal(prevSel, nil) {
+									// Don't add duplicate selectors more than once
+									continue nextSelector
+								}
+							}
+							prev.Selectors = append(prev.Selectors, sel)
+						}
+						continue
+					}
+				}
+			}
+
+		case *css_ast.RComment:
+			nextNonComment = nil
+		}
+
+		if nextNonComment != nil {
+			prevNonComment = nextNonComment
+		}
+
+		mangledRules = append(mangledRules, rule)
+	}
+
+	// Mangle non-top-level rules using a back-to-front pass. Top-level rules
+	// will be mangled by the linker instead for cross-file rule mangling.
+	if !isTopLevel {
+		remover := MakeDuplicateRuleMangler(ast.SymbolMap{})
+		mangledRules = remover.RemoveDuplicateRulesInPlace(p.source.Index, mangledRules, p.importRecords)
+	}
+
+	return mangledRules
+}
+
+type ruleEntry struct {
+	data        css_ast.R
+	callCounter uint32
+}
+
+type hashEntry struct {
+	rules []ruleEntry
+}
+
+type callEntry struct {
+	importRecords []ast.ImportRecord
+	sourceIndex   uint32
+}
+
+type DuplicateRuleRemover struct {
+	entries map[uint32]hashEntry
+	calls   []callEntry
+	check   css_ast.CrossFileEqualityCheck
+}
+
+func MakeDuplicateRuleMangler(symbols ast.SymbolMap) DuplicateRuleRemover {
+	return DuplicateRuleRemover{
+		entries: make(map[uint32]hashEntry),
+		check:   css_ast.CrossFileEqualityCheck{Symbols: symbols},
+	}
+}
+
+func (remover *DuplicateRuleRemover) RemoveDuplicateRulesInPlace(sourceIndex uint32, rules []css_ast.Rule, importRecords []ast.ImportRecord) []css_ast.Rule {
+	// The caller may call this function multiple times, each with a different
+	// set of import records. Remember each set of import records for equality
+	// checks later.
+	callCounter := uint32(len(remover.calls))
+	remover.calls = append(remover.calls, callEntry{importRecords, sourceIndex})
+
+	// Remove duplicate rules, scanning from the back so we keep the last
+	// duplicate. Note that the linker calls this, so we do not want to do
+	// anything that modifies the rules themselves. One reason is that ASTs
+	// are immutable at the linking stage. Another reason is that merging
+	// CSS ASTs from separate files will mess up source maps because a single
+	// AST cannot simultaneously represent offsets from multiple files.
+	n := len(rules)
+	start := n
+skipRule:
+	for i := n - 1; i >= 0; i-- {
+		rule := rules[i]
+
+		// For duplicate rules, omit all but the last copy
+		if hash, ok := rule.Data.Hash(); ok {
+			entry := remover.entries[hash]
+			for _, current := range entry.rules {
+				var check *css_ast.CrossFileEqualityCheck
+
+				// If this rule was from another file, then pass along both arrays
+				// of import records so that the equality check for "url()" tokens
+				// can use them to check for equality.
+				if current.callCounter != callCounter {
+					// Reuse the same memory allocation
+					check = &remover.check
+					call := remover.calls[current.callCounter]
+					check.ImportRecordsA = importRecords
+					check.ImportRecordsB = call.importRecords
+					check.SourceIndexA = sourceIndex
+					check.SourceIndexB = call.sourceIndex
+				}
+
+				if rule.Data.Equal(current.data, check) {
+					continue skipRule
+				}
+			}
+			entry.rules = append(entry.rules, ruleEntry{
+				data:        rule.Data,
+				callCounter: callCounter,
+			})
+			remover.entries[hash] = entry
+		}
+
+		start--
+		rules[start] = rule
+	}
+
+	return rules[start:]
+}
+
+// Reference: https://developer.mozilla.org/en-US/docs/Web/HTML/Element
+var nonDeprecatedElementsSupportedByIE7 = map[string]bool{
+	"a":          true,
+	"abbr":       true,
+	"address":    true,
+	"area":       true,
+	"b":          true,
+	"base":       true,
+	"blockquote": true,
+	"body":       true,
+	"br":         true,
+	"button":     true,
+	"caption":    true,
+	"cite":       true,
+	"code":       true,
+	"col":        true,
+	"colgroup":   true,
+	"dd":         true,
+	"del":        true,
+	"dfn":        true,
+	"div":        true,
+	"dl":         true,
+	"dt":         true,
+	"em":         true,
+	"embed":      true,
+	"fieldset":   true,
+	"form":       true,
+	"h1":         true,
+	"h2":         true,
+	"h3":         true,
+	"h4":         true,
+	"h5":         true,
+	"h6":         true,
+	"head":       true,
+	"hr":         true,
+	"html":       true,
+	"i":          true,
+	"iframe":     true,
+	"img":        true,
+	"input":      true,
+	"ins":        true,
+	"kbd":        true,
+	"label":      true,
+	"legend":     true,
+	"li":         true,
+	"link":       true,
+	"map":        true,
+	"menu":       true,
+	"meta":       true,
+	"noscript":   true,
+	"object":     true,
+	"ol":         true,
+	"optgroup":   true,
+	"option":     true,
+	"p":          true,
+	"param":      true,
+	"pre":        true,
+	"q":          true,
+	"ruby":       true,
+	"s":          true,
+	"samp":       true,
+	"script":     true,
+	"select":     true,
+	"small":      true,
+	"span":       true,
+	"strong":     true,
+	"style":      true,
+	"sub":        true,
+	"sup":        true,
+	"table":      true,
+	"tbody":      true,
+	"td":         true,
+	"textarea":   true,
+	"tfoot":      true,
+	"th":         true,
+	"thead":      true,
+	"title":      true,
+	"tr":         true,
+	"u":          true,
+	"ul":         true,
+	"var":        true,
+}
+
+// This only returns true if all of these selectors are considered "safe" which
+// means that they are very likely to work in any browser a user might reasonably
+// be using. We do NOT want to merge adjacent qualified rules with the same body
+// if any of the selectors are unsafe, since then browsers which don't support
+// that particular feature would ignore the entire merged qualified rule:
+//
+//	Input:
+//	  a { color: red }
+//	  b { color: red }
+//	  input::-moz-placeholder { color: red }
+//
+//	Valid output:
+//	  a, b { color: red }
+//	  input::-moz-placeholder { color: red }
+//
+//	Invalid output:
+//	  a, b, input::-moz-placeholder { color: red }
+//
+// This considers IE 7 and above to be a browser that a user could possibly use.
+// Versions of IE less than 6 are not considered.
+func isSafeSelectors(complexSelectors []css_ast.ComplexSelector) bool {
+	for _, complex := range complexSelectors {
+		for _, compound := range complex.Selectors {
+			if compound.HasNestingSelector() {
+				// Bail because this is an extension: https://drafts.csswg.org/css-nesting-1/
+				return false
+			}
+
+			if compound.Combinator.Byte != 0 {
+				// "Before Internet Explorer 10, the combinator only works in standards mode"
+				// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/CSS_Selectors
+				return false
+			}
+
+			if compound.TypeSelector != nil {
+				if compound.TypeSelector.NamespacePrefix != nil {
+					// Bail if we hit a namespace, which doesn't work in IE before version 9
+					// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/Type_selectors
+					return false
+				}
+
+				if compound.TypeSelector.Name.Kind == css_lexer.TIdent && !nonDeprecatedElementsSupportedByIE7[compound.TypeSelector.Name.Text] {
+					// Bail if this element is either deprecated or not supported in IE 7
+					return false
+				}
+			}
+
+			for _, ss := range compound.SubclassSelectors {
+				switch s := ss.Data.(type) {
+				case *css_ast.SSAttribute:
+					if s.MatcherModifier != 0 {
+						// Bail if we hit a case modifier, which doesn't work in IE at all
+						// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/Attribute_selectors
+						return false
+					}
+
+				case *css_ast.SSPseudoClass:
+					// Bail if this pseudo class doesn't match a hard-coded list that's
+					// known to work everywhere. For example, ":focus" doesn't work in IE 7.
+					// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/Pseudo-classes
+					if s.Args == nil && !s.IsElement {
+						switch s.Name {
+						case "active", "first-child", "hover", "link", "visited":
+							continue
+						}
+					}
+					return false
+
+				case *css_ast.SSPseudoClassWithSelectorList:
+					// These definitely don't work in IE 7
+					return false
+				}
+			}
+		}
+	}
+	return true
+}
+
+func (p *parser) parseURLOrString() (string, logger.Range, bool) {
+	t := p.current()
+	switch t.Kind {
+	case css_lexer.TString:
+		text := p.decoded()
+		p.advance()
+		return text, t.Range, true
+
+	case css_lexer.TURL:
+		text := p.decoded()
+		p.advance()
+		return text, t.Range, true
+
+	case css_lexer.TFunction:
+		if strings.EqualFold(p.decoded(), "url") {
+			matchingLoc := logger.Loc{Start: p.current().Range.End() - 1}
+			i := p.index + 1
+
+			// Skip over whitespace
+			for p.at(i).Kind == css_lexer.TWhitespace {
+				i++
+			}
+
+			// Consume a string
+			if p.at(i).Kind == css_lexer.TString {
+				stringIndex := i
+				i++
+
+				// Skip over whitespace
+				for p.at(i).Kind == css_lexer.TWhitespace {
+					i++
+				}
+
+				// Consume a closing parenthesis
+				if close := p.at(i).Kind; close == css_lexer.TCloseParen || close == css_lexer.TEndOfFile {
+					t := p.at(stringIndex)
+					text := t.DecodedText(p.source.Contents)
+					p.index = i
+					p.expectWithMatchingLoc(css_lexer.TCloseParen, matchingLoc)
+					return text, t.Range, true
+				}
+			}
+		}
+	}
+
+	return "", logger.Range{}, false
+}
+
+func (p *parser) expectURLOrString() (url string, r logger.Range, ok bool) {
+	url, r, ok = p.parseURLOrString()
+	if !ok {
+		p.expect(css_lexer.TURL)
+	}
+	return
+}
+
+type atRuleKind uint8
+
+const (
+	atRuleUnknown atRuleKind = iota
+	atRuleDeclarations
+	atRuleInheritContext
+	atRuleQualifiedOrEmpty
+	atRuleEmpty
+)
+
+var specialAtRules = map[string]atRuleKind{
+	"media":    atRuleInheritContext,
+	"supports": atRuleInheritContext,
+
+	"font-face": atRuleDeclarations,
+	"page":      atRuleDeclarations,
+
+	// These go inside "@page": https://www.w3.org/TR/css-page-3/#syntax-page-selector
+	"bottom-center":       atRuleDeclarations,
+	"bottom-left-corner":  atRuleDeclarations,
+	"bottom-left":         atRuleDeclarations,
+	"bottom-right-corner": atRuleDeclarations,
+	"bottom-right":        atRuleDeclarations,
+	"left-bottom":         atRuleDeclarations,
+	"left-middle":         atRuleDeclarations,
+	"left-top":            atRuleDeclarations,
+	"right-bottom":        atRuleDeclarations,
+	"right-middle":        atRuleDeclarations,
+	"right-top":           atRuleDeclarations,
+	"top-center":          atRuleDeclarations,
+	"top-left-corner":     atRuleDeclarations,
+	"top-left":            atRuleDeclarations,
+	"top-right-corner":    atRuleDeclarations,
+	"top-right":           atRuleDeclarations,
+
+	// These properties are very deprecated and appear to only be useful for
+	// mobile versions of internet explorer (which may no longer exist?), but
+	// they are used by the https://ant.design/ design system so we recognize
+	// them to avoid the warning.
+	//
+	//   Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@viewport
+	//   Discussion: https://github.com/w3c/csswg-drafts/issues/4766
+	//
+	"viewport":     atRuleDeclarations,
+	"-ms-viewport": atRuleDeclarations,
+
+	// This feature has been removed from the web because it's actively harmful.
+	// However, there is one exception where "@-moz-document url-prefix() {" is
+	// accepted by Firefox to basically be an "if Firefox" conditional rule.
+	//
+	//   Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@document
+	//   Discussion: https://bugzilla.mozilla.org/show_bug.cgi?id=1035091
+	//
+	"document":      atRuleInheritContext,
+	"-moz-document": atRuleInheritContext,
+
+	// This is a new feature that changes how the CSS rule cascade works. It can
+	// end in either a "{}" block or a ";" rule terminator so we need this special
+	// case to support both.
+	//
+	//   Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@layer
+	//   Motivation: https://developer.chrome.com/blog/cascade-layers/
+	//
+	"layer": atRuleQualifiedOrEmpty,
+
+	// Reference: https://drafts.csswg.org/css-cascade-6/#scoped-styles
+	"scope": atRuleInheritContext,
+
+	// Reference: https://drafts.csswg.org/css-fonts-4/#font-palette-values
+	"font-palette-values": atRuleDeclarations,
+
+	// Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@counter-style
+	// Reference: https://drafts.csswg.org/css-counter-styles/#the-counter-style-rule
+	"counter-style": atRuleDeclarations,
+
+	// Documentation: https://developer.mozilla.org/en-US/docs/Web/CSS/@font-feature-values
+	// Reference: https://drafts.csswg.org/css-fonts/#font-feature-values
+	"font-feature-values": atRuleDeclarations,
+	"annotation":          atRuleDeclarations,
+	"character-variant":   atRuleDeclarations,
+	"historical-forms":    atRuleDeclarations,
+	"ornaments":           atRuleDeclarations,
+	"styleset":            atRuleDeclarations,
+	"stylistic":           atRuleDeclarations,
+	"swash":               atRuleDeclarations,
+
+	// Container Queries
+	// Reference: https://drafts.csswg.org/css-contain-3/#container-rule
+	"container": atRuleInheritContext,
+
+	// Defining before-change style: the @starting-style rule
+	// Reference: https://drafts.csswg.org/css-transitions-2/#defining-before-change-style-the-starting-style-rule
+	"starting-style": atRuleInheritContext,
+
+	// Anchor Positioning
+	// Reference: https://drafts.csswg.org/css-anchor-position-1/#at-ruledef-position-try
+	"position-try": atRuleDeclarations,
+}
+
+var atKnownRuleCanBeRemovedIfEmpty = map[string]bool{
+	"media":     true,
+	"supports":  true,
+	"font-face": true,
+	"page":      true,
+
+	// https://www.w3.org/TR/css-page-3/#syntax-page-selector
+	"bottom-center":       true,
+	"bottom-left-corner":  true,
+	"bottom-left":         true,
+	"bottom-right-corner": true,
+	"bottom-right":        true,
+	"left-bottom":         true,
+	"left-middle":         true,
+	"left-top":            true,
+	"right-bottom":        true,
+	"right-middle":        true,
+	"right-top":           true,
+	"top-center":          true,
+	"top-left-corner":     true,
+	"top-left":            true,
+	"top-right-corner":    true,
+	"top-right":           true,
+
+	// https://drafts.csswg.org/css-cascade-6/#scoped-styles
+	"scope": true,
+
+	// https://drafts.csswg.org/css-fonts-4/#font-palette-values
+	"font-palette-values": true,
+
+	// https://drafts.csswg.org/css-contain-3/#container-rule
+	"container": true,
+}
+
+type atRuleValidity uint8
+
+const (
+	atRuleInvalid atRuleValidity = iota
+	atRuleValid
+	atRuleInvalidAfter
+)
+
+type atRuleContext struct {
+	afterLoc             logger.Loc
+	charsetValidity      atRuleValidity
+	importValidity       atRuleValidity
+	canInlineNoOpNesting bool
+	isDeclarationList    bool
+	isTopLevel           bool
+}
+
+func (p *parser) parseAtRule(context atRuleContext) css_ast.Rule {
+	// Parse the name
+	atToken := p.decoded()
+	atRange := p.current().Range
+	lowerAtToken := strings.ToLower(atToken)
+	kind := specialAtRules[lowerAtToken]
+	p.advance()
+
+	// Parse the prelude
+	preludeStart := p.index
+abortRuleParser:
+	switch lowerAtToken {
+	case "charset":
+		switch context.charsetValidity {
+		case atRuleInvalid:
+			p.log.AddID(logger.MsgID_CSS_InvalidAtCharset, logger.Warning, &p.tracker, atRange, "\"@charset\" must be the first rule in the file")
+
+		case atRuleInvalidAfter:
+			p.log.AddIDWithNotes(logger.MsgID_CSS_InvalidAtCharset, logger.Warning, &p.tracker, atRange,
+				"\"@charset\" must be the first rule in the file",
+				[]logger.MsgData{p.tracker.MsgData(logger.Range{Loc: context.afterLoc},
+					"This rule cannot come before a \"@charset\" rule")})
+
+		case atRuleValid:
+			kind = atRuleEmpty
+			p.expect(css_lexer.TWhitespace)
+			if p.peek(css_lexer.TString) {
+				encoding := p.decoded()
+				if !strings.EqualFold(encoding, "UTF-8") {
+					p.log.AddID(logger.MsgID_CSS_UnsupportedAtCharset, logger.Warning, &p.tracker, p.current().Range,
+						fmt.Sprintf("\"UTF-8\" will be used instead of unsupported charset %q", encoding))
+				}
+				p.advance()
+				p.expect(css_lexer.TSemicolon)
+				return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RAtCharset{Encoding: encoding}}
+			}
+			p.expect(css_lexer.TString)
+		}
+
+	case "import":
+		switch context.importValidity {
+		case atRuleInvalid:
+			p.log.AddID(logger.MsgID_CSS_InvalidAtImport, logger.Warning, &p.tracker, atRange, "\"@import\" is only valid at the top level")
+
+		case atRuleInvalidAfter:
+			p.log.AddIDWithNotes(logger.MsgID_CSS_InvalidAtImport, logger.Warning, &p.tracker, atRange,
+				"All \"@import\" rules must come first",
+				[]logger.MsgData{p.tracker.MsgData(logger.Range{Loc: context.afterLoc},
+					"This rule cannot come before an \"@import\" rule")})
+
+		case atRuleValid:
+			kind = atRuleEmpty
+			p.eat(css_lexer.TWhitespace)
+			if path, r, ok := p.expectURLOrString(); ok {
+				var conditions css_ast.ImportConditions
+				importConditionsStart := p.index
+				for {
+					if kind := p.current().Kind; kind == css_lexer.TSemicolon || kind == css_lexer.TOpenBrace ||
+						kind == css_lexer.TCloseBrace || kind == css_lexer.TEndOfFile {
+						break
+					}
+					p.parseComponentValue()
+				}
+				if p.current().Kind == css_lexer.TOpenBrace {
+					break // Avoid parsing an invalid "@import" rule
+				}
+				conditions.Media = p.convertTokens(p.tokens[importConditionsStart:p.index])
+
+				// Insert or remove whitespace before the first token
+				var importConditions *css_ast.ImportConditions
+				if len(conditions.Media) > 0 {
+					importConditions = &conditions
+
+					// Handle "layer()"
+					if t := conditions.Media[0]; (t.Kind == css_lexer.TIdent || t.Kind == css_lexer.TFunction) && strings.EqualFold(t.Text, "layer") {
+						conditions.Layers = conditions.Media[:1]
+						conditions.Media = conditions.Media[1:]
+					}
+
+					// Handle "supports()"
+					if len(conditions.Media) > 0 {
+						if t := conditions.Media[0]; t.Kind == css_lexer.TFunction && strings.EqualFold(t.Text, "supports") {
+							conditions.Supports = conditions.Media[:1]
+							conditions.Media = conditions.Media[1:]
+						}
+					}
+
+					// Remove leading and trailing whitespace
+					if len(conditions.Layers) > 0 {
+						conditions.Layers[0].Whitespace &= ^(css_ast.WhitespaceBefore | css_ast.WhitespaceAfter)
+					}
+					if len(conditions.Supports) > 0 {
+						conditions.Supports[0].Whitespace &= ^(css_ast.WhitespaceBefore | css_ast.WhitespaceAfter)
+					}
+					if n := len(conditions.Media); n > 0 {
+						conditions.Media[0].Whitespace &= ^css_ast.WhitespaceBefore
+						conditions.Media[n-1].Whitespace &= ^css_ast.WhitespaceAfter
+					}
+				}
+
+				p.expect(css_lexer.TSemicolon)
+				importRecordIndex := uint32(len(p.importRecords))
+				p.importRecords = append(p.importRecords, ast.ImportRecord{
+					Kind:  ast.ImportAt,
+					Path:  logger.Path{Text: path},
+					Range: r,
+				})
+
+				// Fill in the pre-import layers once we see the first "@import"
+				if !p.hasSeenAtImport {
+					p.hasSeenAtImport = true
+					p.layersPreImport = p.layersPostImport
+					p.layersPostImport = nil
+				}
+
+				return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RAtImport{
+					ImportRecordIndex: importRecordIndex,
+					ImportConditions:  importConditions,
+				}}
+			}
+		}
+
+	case "keyframes", "-webkit-keyframes", "-moz-keyframes", "-ms-keyframes", "-o-keyframes":
+		p.eat(css_lexer.TWhitespace)
+		nameLoc := p.current().Range.Loc
+		var name string
+
+		if p.peek(css_lexer.TIdent) {
+			name = p.decoded()
+			if isInvalidAnimationName(name) {
+				msg := logger.Msg{
+					ID:    logger.MsgID_CSS_CSSSyntaxError,
+					Kind:  logger.Warning,
+					Data:  p.tracker.MsgData(p.current().Range, fmt.Sprintf("Cannot use %q as a name for \"@keyframes\" without quotes", name)),
+					Notes: []logger.MsgData{{Text: fmt.Sprintf("You can put %q in quotes to prevent it from becoming a CSS keyword.", name)}},
+				}
+				msg.Data.Location.Suggestion = fmt.Sprintf("%q", name)
+				p.log.AddMsg(msg)
+				break
+			}
+			p.advance()
+		} else if p.peek(css_lexer.TString) {
+			// Note: Strings as names is allowed in the CSS specification and works in
+			// Firefox and Safari but Chrome has strangely decided to deliberately not
+			// support this. We always turn all string names into identifiers to avoid
+			// them silently breaking in Chrome.
+			name = p.decoded()
+			p.advance()
+			if !p.makeLocalSymbols && isInvalidAnimationName(name) {
+				break
+			}
+		} else if !p.expect(css_lexer.TIdent) {
+			break
+		}
+
+		p.eat(css_lexer.TWhitespace)
+		blockStart := p.index
+
+		matchingLoc := p.current().Range.Loc
+		if p.expect(css_lexer.TOpenBrace) {
+			var blocks []css_ast.KeyframeBlock
+
+		badSyntax:
+			for {
+				switch p.current().Kind {
+				case css_lexer.TWhitespace:
+					p.advance()
+					continue
+
+				case css_lexer.TCloseBrace:
+					closeBraceLoc := p.current().Range.Loc
+					p.advance()
+					return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RAtKeyframes{
+						AtToken:       atToken,
+						Name:          p.symbolForName(nameLoc, name),
+						Blocks:        blocks,
+						CloseBraceLoc: closeBraceLoc,
+					}}
+
+				case css_lexer.TEndOfFile:
+					break badSyntax
+
+				case css_lexer.TOpenBrace:
+					p.expect(css_lexer.TPercentage)
+					break badSyntax
+
+				default:
+					var selectors []string
+					var firstSelectorLoc logger.Loc
+
+				selectors:
+					for {
+						t := p.current()
+						switch t.Kind {
+						case css_lexer.TWhitespace:
+							p.advance()
+							continue
+
+						case css_lexer.TOpenBrace:
+							blockMatchingLoc := p.current().Range.Loc
+							p.advance()
+							rules := p.parseListOfDeclarations(listOfDeclarationsOpts{})
+							closeBraceLoc := p.current().Range.Loc
+							if !p.expectWithMatchingLoc(css_lexer.TCloseBrace, blockMatchingLoc) {
+								closeBraceLoc = logger.Loc{}
+							}
+
+							// "@keyframes { from {} to { color: red } }" => "@keyframes { to { color: red } }"
+							if !p.options.minifySyntax || len(rules) > 0 {
+								blocks = append(blocks, css_ast.KeyframeBlock{
+									Selectors:     selectors,
+									Rules:         rules,
+									Loc:           firstSelectorLoc,
+									CloseBraceLoc: closeBraceLoc,
+								})
+							}
+							break selectors
+
+						case css_lexer.TCloseBrace, css_lexer.TEndOfFile:
+							p.expect(css_lexer.TOpenBrace)
+							break badSyntax
+
+						case css_lexer.TIdent, css_lexer.TPercentage:
+							if firstSelectorLoc.Start == 0 {
+								firstSelectorLoc = p.current().Range.Loc
+							}
+							text := p.decoded()
+							if t.Kind == css_lexer.TIdent {
+								if strings.EqualFold(text, "from") {
+									if p.options.minifySyntax {
+										text = "0%" // "0%" is equivalent to but shorter than "from"
+									}
+								} else if !strings.EqualFold(text, "to") {
+									p.expect(css_lexer.TPercentage)
+								}
+							} else if p.options.minifySyntax && text == "100%" {
+								text = "to" // "to" is equivalent to but shorter than "100%"
+							}
+							selectors = append(selectors, text)
+							p.advance()
+
+							// Keyframe selectors are comma-separated
+							p.eat(css_lexer.TWhitespace)
+							if p.eat(css_lexer.TComma) {
+								p.eat(css_lexer.TWhitespace)
+								if k := p.current().Kind; k != css_lexer.TIdent && k != css_lexer.TPercentage {
+									p.expect(css_lexer.TPercentage)
+									break badSyntax
+								}
+							} else if k := p.current().Kind; k != css_lexer.TOpenBrace && k != css_lexer.TCloseBrace && k != css_lexer.TEndOfFile {
+								p.expect(css_lexer.TComma)
+								break badSyntax
+							}
+
+						default:
+							p.expect(css_lexer.TPercentage)
+							break badSyntax
+						}
+					}
+				}
+			}
+
+			// Otherwise, finish parsing the body and return an unknown rule
+			for !p.peek(css_lexer.TCloseBrace) && !p.peek(css_lexer.TEndOfFile) {
+				p.parseComponentValue()
+			}
+			p.expectWithMatchingLoc(css_lexer.TCloseBrace, matchingLoc)
+			prelude := p.convertTokens(p.tokens[preludeStart:blockStart])
+			block, _ := p.convertTokensHelper(p.tokens[blockStart:p.index], css_lexer.TEndOfFile, convertTokensOpts{allowImports: true})
+			return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude, Block: block}}
+		}
+
+	case "layer":
+		// Reference: https://developer.mozilla.org/en-US/docs/Web/CSS/@layer
+
+		// Read the layer name list
+		var names [][]string
+		p.eat(css_lexer.TWhitespace)
+		if p.peek(css_lexer.TIdent) {
+			for {
+				ident, ok := p.expectValidLayerNameIdent()
+				if !ok {
+					break abortRuleParser
+				}
+				name := []string{ident}
+				for {
+					p.eat(css_lexer.TWhitespace)
+					if !p.eat(css_lexer.TDelimDot) {
+						break
+					}
+					p.eat(css_lexer.TWhitespace)
+					ident, ok := p.expectValidLayerNameIdent()
+					if !ok {
+						break abortRuleParser
+					}
+					name = append(name, ident)
+				}
+				names = append(names, name)
+				p.eat(css_lexer.TWhitespace)
+				if !p.eat(css_lexer.TComma) {
+					break
+				}
+				p.eat(css_lexer.TWhitespace)
+			}
+		}
+
+		// Read the optional block
+		matchingLoc := p.current().Range.Loc
+		if len(names) <= 1 && p.eat(css_lexer.TOpenBrace) {
+			p.recordAtLayerRule(names)
+			oldEnclosingLayer := p.enclosingLayer
+			if len(names) == 1 {
+				p.enclosingLayer = append(p.enclosingLayer, names[0]...)
+			} else {
+				p.anonLayerCount++
+			}
+			var rules []css_ast.Rule
+			if context.isDeclarationList {
+				rules = p.parseListOfDeclarations(listOfDeclarationsOpts{
+					canInlineNoOpNesting: context.canInlineNoOpNesting,
+				})
+			} else {
+				rules = p.parseListOfRules(ruleContext{
+					parseSelectors: true,
+				})
+			}
+			if len(names) != 1 {
+				p.anonLayerCount--
+			}
+			p.enclosingLayer = oldEnclosingLayer
+			closeBraceLoc := p.current().Range.Loc
+			if !p.expectWithMatchingLoc(css_lexer.TCloseBrace, matchingLoc) {
+				closeBraceLoc = logger.Loc{}
+			}
+			return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RAtLayer{Names: names, Rules: rules, CloseBraceLoc: closeBraceLoc}}
+		}
+
+		// Handle lack of a block
+		if len(names) >= 1 && p.eat(css_lexer.TSemicolon) {
+			p.recordAtLayerRule(names)
+			return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RAtLayer{Names: names}}
+		}
+
+		// Otherwise there's some kind of syntax error
+		switch p.current().Kind {
+		case css_lexer.TEndOfFile:
+			p.expect(css_lexer.TSemicolon)
+			p.recordAtLayerRule(names)
+			return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RAtLayer{Names: names}}
+
+		case css_lexer.TCloseBrace:
+			p.expect(css_lexer.TSemicolon)
+			if !context.isTopLevel {
+				p.recordAtLayerRule(names)
+				return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RAtLayer{Names: names}}
+			}
+
+		case css_lexer.TOpenBrace:
+			p.expect(css_lexer.TSemicolon)
+
+		default:
+			p.unexpected()
+		}
+
+	default:
+		if kind == atRuleUnknown && lowerAtToken == "namespace" {
+			// CSS namespaces are a weird feature that appears to only really be
+			// useful for styling XML. And the world has moved on from XHTML to
+			// HTML5 so pretty much no one uses CSS namespaces anymore. They are
+			// also complicated to support in a bundler because CSS namespaces are
+			// file-scoped, which means:
+			//
+			// * Default namespaces can be different in different files, in which
+			//   case some default namespaces would have to be converted to prefixed
+			//   namespaces to avoid collisions.
+			//
+			// * Prefixed namespaces from different files can use the same name, in
+			//   which case some prefixed namespaces would need to be renamed to
+			//   avoid collisions.
+			//
+			// Instead of implementing all of that for an extremely obscure feature,
+			// CSS namespaces are just explicitly not supported.
+			p.log.AddID(logger.MsgID_CSS_UnsupportedAtNamespace, logger.Warning, &p.tracker, atRange, "\"@namespace\" rules are not supported")
+		}
+	}
+
+	// Parse an unknown prelude
+prelude:
+	for {
+		switch p.current().Kind {
+		case css_lexer.TOpenBrace, css_lexer.TEndOfFile:
+			break prelude
+
+		case css_lexer.TSemicolon, css_lexer.TCloseBrace:
+			prelude := p.convertTokens(p.tokens[preludeStart:p.index])
+
+			switch kind {
+			case atRuleQualifiedOrEmpty:
+				// Parse a known at rule below
+				break prelude
+
+			case atRuleEmpty, atRuleUnknown:
+				// Parse an unknown at rule
+				p.expect(css_lexer.TSemicolon)
+				return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude}}
+
+			default:
+				// Report an error for rules that should have blocks
+				p.expect(css_lexer.TOpenBrace)
+				p.eat(css_lexer.TSemicolon)
+				return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude}}
+			}
+
+		default:
+			p.parseComponentValue()
+		}
+	}
+	prelude := p.convertTokens(p.tokens[preludeStart:p.index])
+	blockStart := p.index
+
+	switch kind {
+	case atRuleEmpty:
+		// Report an error for rules that shouldn't have blocks
+		p.expect(css_lexer.TSemicolon)
+		p.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace)
+		block := p.convertTokens(p.tokens[blockStart:p.index])
+		return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude, Block: block}}
+
+	case atRuleDeclarations:
+		// Parse known rules whose blocks always consist of declarations
+		matchingLoc := p.current().Range.Loc
+		p.expect(css_lexer.TOpenBrace)
+		rules := p.parseListOfDeclarations(listOfDeclarationsOpts{})
+		closeBraceLoc := p.current().Range.Loc
+		if !p.expectWithMatchingLoc(css_lexer.TCloseBrace, matchingLoc) {
+			closeBraceLoc = logger.Loc{}
+		}
+
+		// Handle local names for "@counter-style"
+		if len(prelude) == 1 && lowerAtToken == "counter-style" {
+			if t := &prelude[0]; t.Kind == css_lexer.TIdent {
+				t.Kind = css_lexer.TSymbol
+				t.PayloadIndex = p.symbolForName(t.Loc, t.Text).Ref.InnerIndex
+			}
+		}
+
+		return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RKnownAt{AtToken: atToken, Prelude: prelude, Rules: rules, CloseBraceLoc: closeBraceLoc}}
+
+	case atRuleInheritContext:
+		// Parse known rules whose blocks consist of whatever the current context is
+		matchingLoc := p.current().Range.Loc
+		p.expect(css_lexer.TOpenBrace)
+		var rules []css_ast.Rule
+
+		// Push the "@media" conditions
+		isAtMedia := lowerAtToken == "media"
+		if isAtMedia {
+			p.enclosingAtMedia = append(p.enclosingAtMedia, prelude)
+		}
+
+		// Parse the block for this rule
+		if context.isDeclarationList {
+			rules = p.parseListOfDeclarations(listOfDeclarationsOpts{
+				canInlineNoOpNesting: context.canInlineNoOpNesting,
+			})
+		} else {
+			rules = p.parseListOfRules(ruleContext{
+				parseSelectors: true,
+			})
+		}
+
+		// Pop the "@media" conditions
+		if isAtMedia {
+			p.enclosingAtMedia = p.enclosingAtMedia[:len(p.enclosingAtMedia)-1]
+		}
+
+		closeBraceLoc := p.current().Range.Loc
+		if !p.expectWithMatchingLoc(css_lexer.TCloseBrace, matchingLoc) {
+			closeBraceLoc = logger.Loc{}
+		}
+
+		// Handle local names for "@container"
+		if len(prelude) >= 1 && lowerAtToken == "container" {
+			if t := &prelude[0]; t.Kind == css_lexer.TIdent && strings.ToLower(t.Text) != "not" {
+				t.Kind = css_lexer.TSymbol
+				t.PayloadIndex = p.symbolForName(t.Loc, t.Text).Ref.InnerIndex
+			}
+		}
+
+		return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RKnownAt{AtToken: atToken, Prelude: prelude, Rules: rules, CloseBraceLoc: closeBraceLoc}}
+
+	case atRuleQualifiedOrEmpty:
+		matchingLoc := p.current().Range.Loc
+		if p.eat(css_lexer.TOpenBrace) {
+			rules := p.parseListOfRules(ruleContext{
+				parseSelectors: true,
+			})
+			closeBraceLoc := p.current().Range.Loc
+			if !p.expectWithMatchingLoc(css_lexer.TCloseBrace, matchingLoc) {
+				closeBraceLoc = logger.Loc{}
+			}
+			return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RKnownAt{AtToken: atToken, Prelude: prelude, Rules: rules, CloseBraceLoc: closeBraceLoc}}
+		}
+		p.expect(css_lexer.TSemicolon)
+		return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RKnownAt{AtToken: atToken, Prelude: prelude}}
+
+	default:
+		// Otherwise, parse an unknown rule
+		p.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace)
+		block, _ := p.convertTokensHelper(p.tokens[blockStart:p.index], css_lexer.TEndOfFile, convertTokensOpts{allowImports: true})
+		return css_ast.Rule{Loc: atRange.Loc, Data: &css_ast.RUnknownAt{AtToken: atToken, Prelude: prelude, Block: block}}
+	}
+}
+
+func (p *parser) expectValidLayerNameIdent() (string, bool) {
+	r := p.current().Range
+	text := p.decoded()
+	if !p.expect(css_lexer.TIdent) {
+		return "", false
+	}
+	switch text {
+	case "initial", "inherit", "unset":
+		p.log.AddID(logger.MsgID_CSS_InvalidAtLayer, logger.Warning, &p.tracker, r, fmt.Sprintf("%q cannot be used as a layer name", text))
+		p.prevError = r.Loc
+		return "", false
+	}
+	return text, true
+}
+
+func (p *parser) convertTokens(tokens []css_lexer.Token) []css_ast.Token {
+	result, _ := p.convertTokensHelper(tokens, css_lexer.TEndOfFile, convertTokensOpts{})
+	return result
+}
+
+type convertTokensOpts struct {
+	allowImports         bool
+	verbatimWhitespace   bool
+	isInsideCalcFunction bool
+}
+
+func (p *parser) convertTokensHelper(tokens []css_lexer.Token, close css_lexer.T, opts convertTokensOpts) ([]css_ast.Token, []css_lexer.Token) {
+	result := []css_ast.Token{}
+	var nextWhitespace css_ast.WhitespaceFlags
+
+	// Enable verbatim whitespace mode when the first two non-whitespace tokens
+	// are a CSS variable name followed by a colon. This is because it could be
+	// a form of CSS variable usage, and removing whitespace could potentially
+	// break this usage. For example, the following CSS is ignored by Chrome if
+	// the whitespace isn't preserved:
+	//
+	//   @supports (--foo: ) {
+	//     html { background: green; }
+	//   }
+	//
+	// Strangely whitespace removal doesn't cause the declaration to be ignored
+	// in Firefox or Safari, so there's definitely a browser bug somewhere.
+	if !opts.verbatimWhitespace {
+		for i, t := range tokens {
+			if t.Kind == css_lexer.TWhitespace {
+				continue
+			}
+			if t.Kind == css_lexer.TIdent && strings.HasPrefix(t.DecodedText(p.source.Contents), "--") {
+				for _, t := range tokens[i+1:] {
+					if t.Kind == css_lexer.TWhitespace {
+						continue
+					}
+					if t.Kind == css_lexer.TColon {
+						opts.verbatimWhitespace = true
+					}
+					break
+				}
+			}
+			break
+		}
+	}
+
+loop:
+	for len(tokens) > 0 {
+		t := tokens[0]
+		tokens = tokens[1:]
+		if t.Kind == close {
+			break loop
+		}
+		token := css_ast.Token{
+			Loc:        t.Range.Loc,
+			Kind:       t.Kind,
+			Text:       t.DecodedText(p.source.Contents),
+			Whitespace: nextWhitespace,
+		}
+		nextWhitespace = 0
+
+		// Warn about invalid "+" and "-" operators that break the containing "calc()"
+		if opts.isInsideCalcFunction && t.Kind.IsNumeric() && len(result) > 0 && result[len(result)-1].Kind.IsNumeric() &&
+			(strings.HasPrefix(token.Text, "+") || strings.HasPrefix(token.Text, "-")) {
+			// "calc(1+2)" and "calc(1-2)" are invalid
+			p.log.AddID(logger.MsgID_CSS_InvalidCalc, logger.Warning, &p.tracker, logger.Range{Loc: t.Range.Loc, Len: 1},
+				fmt.Sprintf("The %q operator only works if there is whitespace on both sides", token.Text[:1]))
+		}
+
+		switch t.Kind {
+		case css_lexer.TWhitespace:
+			if last := len(result) - 1; last >= 0 {
+				result[last].Whitespace |= css_ast.WhitespaceAfter
+			}
+			nextWhitespace = css_ast.WhitespaceBefore
+			continue
+
+		case css_lexer.TDelimPlus, css_lexer.TDelimMinus:
+			// Warn about invalid "+" and "-" operators that break the containing "calc()"
+			if opts.isInsideCalcFunction && len(tokens) > 0 {
+				if len(result) == 0 || result[len(result)-1].Kind == css_lexer.TComma {
+					// "calc(-(1 + 2))" is invalid
+					p.log.AddID(logger.MsgID_CSS_InvalidCalc, logger.Warning, &p.tracker, t.Range,
+						fmt.Sprintf("%q can only be used as an infix operator, not a prefix operator", token.Text))
+				} else if token.Whitespace != css_ast.WhitespaceBefore || tokens[0].Kind != css_lexer.TWhitespace {
+					// "calc(1- 2)" and "calc(1 -(2))" are invalid
+					p.log.AddID(logger.MsgID_CSS_InvalidCalc, logger.Warning, &p.tracker, t.Range,
+						fmt.Sprintf("The %q operator only works if there is whitespace on both sides", token.Text))
+				}
+			}
+
+		case css_lexer.TNumber:
+			if p.options.minifySyntax {
+				if text, ok := mangleNumber(token.Text); ok {
+					token.Text = text
+				}
+			}
+
+		case css_lexer.TPercentage:
+			if p.options.minifySyntax {
+				if text, ok := mangleNumber(token.PercentageValue()); ok {
+					token.Text = text + "%"
+				}
+			}
+
+		case css_lexer.TDimension:
+			token.UnitOffset = t.UnitOffset
+
+			if p.options.minifySyntax {
+				if text, ok := mangleNumber(token.DimensionValue()); ok {
+					token.Text = text + token.DimensionUnit()
+					token.UnitOffset = uint16(len(text))
+				}
+
+				if value, unit, ok := mangleDimension(token.DimensionValue(), token.DimensionUnit()); ok {
+					token.Text = value + unit
+					token.UnitOffset = uint16(len(value))
+				}
+			}
+
+		case css_lexer.TURL:
+			token.PayloadIndex = uint32(len(p.importRecords))
+			var flags ast.ImportRecordFlags
+			if !opts.allowImports {
+				flags |= ast.IsUnused
+			}
+			p.importRecords = append(p.importRecords, ast.ImportRecord{
+				Kind:  ast.ImportURL,
+				Path:  logger.Path{Text: token.Text},
+				Range: t.Range,
+				Flags: flags,
+			})
+			token.Text = ""
+
+		case css_lexer.TFunction:
+			var nested []css_ast.Token
+			original := tokens
+			nestedOpts := opts
+			if strings.EqualFold(token.Text, "var") {
+				// CSS variables require verbatim whitespace for correctness
+				nestedOpts.verbatimWhitespace = true
+			}
+			if strings.EqualFold(token.Text, "calc") {
+				nestedOpts.isInsideCalcFunction = true
+			}
+			nested, tokens = p.convertTokensHelper(tokens, css_lexer.TCloseParen, nestedOpts)
+			token.Children = &nested
+
+			// Apply "calc" simplification rules when minifying
+			if p.options.minifySyntax && strings.EqualFold(token.Text, "calc") {
+				token = p.tryToReduceCalcExpression(token)
+			}
+
+			// Treat a URL function call with a string just like a URL token
+			if strings.EqualFold(token.Text, "url") && len(nested) == 1 && nested[0].Kind == css_lexer.TString {
+				token.Kind = css_lexer.TURL
+				token.Text = ""
+				token.Children = nil
+				token.PayloadIndex = uint32(len(p.importRecords))
+				var flags ast.ImportRecordFlags
+				if !opts.allowImports {
+					flags |= ast.IsUnused
+				}
+				p.importRecords = append(p.importRecords, ast.ImportRecord{
+					Kind:  ast.ImportURL,
+					Path:  logger.Path{Text: nested[0].Text},
+					Range: original[0].Range,
+					Flags: flags,
+				})
+			}
+
+		case css_lexer.TOpenParen:
+			var nested []css_ast.Token
+			nested, tokens = p.convertTokensHelper(tokens, css_lexer.TCloseParen, opts)
+			token.Children = &nested
+
+		case css_lexer.TOpenBrace:
+			var nested []css_ast.Token
+			nested, tokens = p.convertTokensHelper(tokens, css_lexer.TCloseBrace, opts)
+
+			// Pretty-printing: insert leading and trailing whitespace when not minifying
+			if !opts.verbatimWhitespace && !p.options.minifyWhitespace && len(nested) > 0 {
+				nested[0].Whitespace |= css_ast.WhitespaceBefore
+				nested[len(nested)-1].Whitespace |= css_ast.WhitespaceAfter
+			}
+
+			token.Children = &nested
+
+		case css_lexer.TOpenBracket:
+			var nested []css_ast.Token
+			nested, tokens = p.convertTokensHelper(tokens, css_lexer.TCloseBracket, opts)
+			token.Children = &nested
+		}
+
+		result = append(result, token)
+	}
+
+	if !opts.verbatimWhitespace {
+		for i := range result {
+			token := &result[i]
+
+			// Always remove leading and trailing whitespace
+			if i == 0 {
+				token.Whitespace &= ^css_ast.WhitespaceBefore
+			}
+			if i+1 == len(result) {
+				token.Whitespace &= ^css_ast.WhitespaceAfter
+			}
+
+			switch token.Kind {
+			case css_lexer.TComma:
+				// Assume that whitespace can always be removed before a comma
+				token.Whitespace &= ^css_ast.WhitespaceBefore
+				if i > 0 {
+					result[i-1].Whitespace &= ^css_ast.WhitespaceAfter
+				}
+
+				// Assume whitespace can always be added after a comma
+				if p.options.minifyWhitespace {
+					token.Whitespace &= ^css_ast.WhitespaceAfter
+					if i+1 < len(result) {
+						result[i+1].Whitespace &= ^css_ast.WhitespaceBefore
+					}
+				} else {
+					token.Whitespace |= css_ast.WhitespaceAfter
+					if i+1 < len(result) {
+						result[i+1].Whitespace |= css_ast.WhitespaceBefore
+					}
+				}
+			}
+		}
+	}
+
+	// Insert an explicit whitespace token if we're in verbatim mode and all
+	// tokens were whitespace. In this case there is no token to attach the
+	// whitespace before/after flags so this is the only way to represent this.
+	// This is the only case where this function generates an explicit whitespace
+	// token. It represents whitespace as flags in all other cases.
+	if opts.verbatimWhitespace && len(result) == 0 && nextWhitespace == css_ast.WhitespaceBefore {
+		result = append(result, css_ast.Token{
+			Kind: css_lexer.TWhitespace,
+		})
+	}
+
+	return result, tokens
+}
+
+func shiftDot(text string, dotOffset int) (string, bool) {
+	// This doesn't handle numbers with exponents
+	if strings.ContainsAny(text, "eE") {
+		return "", false
+	}
+
+	// Handle a leading sign
+	sign := ""
+	if len(text) > 0 && (text[0] == '-' || text[0] == '+') {
+		sign = text[:1]
+		text = text[1:]
+	}
+
+	// Remove the dot
+	dot := strings.IndexByte(text, '.')
+	if dot == -1 {
+		dot = len(text)
+	} else {
+		text = text[:dot] + text[dot+1:]
+	}
+
+	// Move the dot
+	dot += dotOffset
+
+	// Remove any leading zeros before the dot
+	for len(text) > 0 && dot > 0 && text[0] == '0' {
+		text = text[1:]
+		dot--
+	}
+
+	// Remove any trailing zeros after the dot
+	for len(text) > 0 && len(text) > dot && text[len(text)-1] == '0' {
+		text = text[:len(text)-1]
+	}
+
+	// Does this number have no fractional component?
+	if dot >= len(text) {
+		trailingZeros := strings.Repeat("0", dot-len(text))
+		return fmt.Sprintf("%s%s%s", sign, text, trailingZeros), true
+	}
+
+	// Potentially add leading zeros
+	if dot < 0 {
+		text = strings.Repeat("0", -dot) + text
+		dot = 0
+	}
+
+	// Insert the dot again
+	return fmt.Sprintf("%s%s.%s", sign, text[:dot], text[dot:]), true
+}
+
+func mangleDimension(value string, unit string) (string, string, bool) {
+	const msLen = 2
+	const sLen = 1
+
+	// Mangle times: https://developer.mozilla.org/en-US/docs/Web/CSS/time
+	if strings.EqualFold(unit, "ms") {
+		if shifted, ok := shiftDot(value, -3); ok && len(shifted)+sLen < len(value)+msLen {
+			// Convert "ms" to "s" if shorter
+			return shifted, "s", true
+		}
+	}
+	if strings.EqualFold(unit, "s") {
+		if shifted, ok := shiftDot(value, 3); ok && len(shifted)+msLen < len(value)+sLen {
+			// Convert "s" to "ms" if shorter
+			return shifted, "ms", true
+		}
+	}
+
+	return "", "", false
+}
+
+func mangleNumber(t string) (string, bool) {
+	original := t
+
+	if dot := strings.IndexByte(t, '.'); dot != -1 {
+		// Remove trailing zeros
+		for len(t) > 0 && t[len(t)-1] == '0' {
+			t = t[:len(t)-1]
+		}
+
+		// Remove the decimal point if it's unnecessary
+		if dot+1 == len(t) {
+			t = t[:dot]
+			if t == "" || t == "+" || t == "-" {
+				t += "0"
+			}
+		} else {
+			// Remove a leading zero
+			if len(t) >= 3 && t[0] == '0' && t[1] == '.' && t[2] >= '0' && t[2] <= '9' {
+				t = t[1:]
+			} else if len(t) >= 4 && (t[0] == '+' || t[0] == '-') && t[1] == '0' && t[2] == '.' && t[3] >= '0' && t[3] <= '9' {
+				t = t[0:1] + t[2:]
+			}
+		}
+	}
+
+	return t, t != original
+}
+
+func (p *parser) parseSelectorRule(isTopLevel bool, opts parseSelectorOpts) css_ast.Rule {
+	// Save and restore the local symbol state in case there are any bare
+	// ":global" or ":local" annotations. The effect of these should be scoped
+	// to within the selector rule.
+	local := p.makeLocalSymbols
+	preludeStart := p.index
+
+	// Try parsing the prelude as a selector list
+	if list, ok := p.parseSelectorList(opts); ok {
+		canInlineNoOpNesting := true
+		for _, sel := range list {
+			// We cannot transform the CSS "a, b::before { & { color: red } }" into
+			// "a, b::before { color: red }" because it's basically equivalent to
+			// ":is(a, b::before) { color: red }" which only applies to "a", not to
+			// "b::before" because pseudo-elements are not valid within :is():
+			// https://www.w3.org/TR/selectors-4/#matches-pseudo. This restriction
+			// may be relaxed in the future, but this restriction hash shipped so
+			// we're stuck with it: https://github.com/w3c/csswg-drafts/issues/7433.
+			if sel.UsesPseudoElement() {
+				canInlineNoOpNesting = false
+				break
+			}
+		}
+		selector := css_ast.RSelector{Selectors: list}
+		matchingLoc := p.current().Range.Loc
+		if p.expect(css_lexer.TOpenBrace) {
+			p.inSelectorSubtree++
+			declOpts := listOfDeclarationsOpts{
+				canInlineNoOpNesting: canInlineNoOpNesting,
+			}
+
+			// Prepare for "composes" declarations
+			if opts.composesContext != nil && len(list) == 1 && len(list[0].Selectors) == 1 && list[0].Selectors[0].IsSingleAmpersand() {
+				// Support code like this:
+				//
+				//   .foo {
+				//     :local { composes: bar }
+				//     :global { composes: baz }
+				//   }
+				//
+				declOpts.composesContext = opts.composesContext
+			} else {
+				composesContext := composesContext{parentRange: list[0].Selectors[0].Range()}
+				if opts.composesContext != nil {
+					composesContext.problemRange = opts.composesContext.parentRange
+				}
+				for _, sel := range list {
+					first := sel.Selectors[0]
+					if first.Combinator.Byte != 0 {
+						composesContext.problemRange = logger.Range{Loc: first.Combinator.Loc, Len: 1}
+					} else if first.TypeSelector != nil {
+						composesContext.problemRange = first.TypeSelector.Range()
+					} else if first.NestingSelectorLoc.IsValid() {
+						composesContext.problemRange = logger.Range{Loc: logger.Loc{Start: int32(first.NestingSelectorLoc.GetIndex())}, Len: 1}
+					} else {
+						for i, ss := range first.SubclassSelectors {
+							class, ok := ss.Data.(*css_ast.SSClass)
+							if i > 0 || !ok {
+								composesContext.problemRange = ss.Range
+							} else {
+								composesContext.parentRefs = append(composesContext.parentRefs, class.Name.Ref)
+							}
+						}
+					}
+					if composesContext.problemRange.Len > 0 {
+						break
+					}
+					if len(sel.Selectors) > 1 {
+						composesContext.problemRange = sel.Selectors[1].Range()
+						break
+					}
+				}
+				declOpts.composesContext = &composesContext
+			}
+
+			selector.Rules = p.parseListOfDeclarations(declOpts)
+			p.inSelectorSubtree--
+			closeBraceLoc := p.current().Range.Loc
+			if p.expectWithMatchingLoc(css_lexer.TCloseBrace, matchingLoc) {
+				selector.CloseBraceLoc = closeBraceLoc
+			}
+			p.makeLocalSymbols = local
+			return css_ast.Rule{Loc: p.tokens[preludeStart].Range.Loc, Data: &selector}
+		}
+	}
+
+	p.makeLocalSymbols = local
+	p.index = preludeStart
+
+	// Otherwise, parse a generic qualified rule
+	return p.parseQualifiedRule(parseQualifiedRuleOpts{
+		isAlreadyInvalid:     true,
+		isTopLevel:           isTopLevel,
+		isDeclarationContext: opts.isDeclarationContext,
+	})
+}
+
+type parseQualifiedRuleOpts struct {
+	isAlreadyInvalid     bool
+	isTopLevel           bool
+	isDeclarationContext bool
+}
+
+func (p *parser) parseQualifiedRule(opts parseQualifiedRuleOpts) css_ast.Rule {
+	preludeStart := p.index
+	preludeLoc := p.current().Range.Loc
+
+loop:
+	for {
+		switch p.current().Kind {
+		case css_lexer.TOpenBrace, css_lexer.TEndOfFile:
+			break loop
+
+		case css_lexer.TCloseBrace:
+			if !opts.isTopLevel {
+				break loop
+			}
+
+		case css_lexer.TSemicolon:
+			if opts.isDeclarationContext {
+				return css_ast.Rule{Loc: preludeLoc, Data: &css_ast.RBadDeclaration{
+					Tokens: p.convertTokens(p.tokens[preludeStart:p.index]),
+				}}
+			}
+		}
+
+		p.parseComponentValue()
+	}
+
+	qualified := css_ast.RQualified{
+		Prelude: p.convertTokens(p.tokens[preludeStart:p.index]),
+	}
+
+	matchingLoc := p.current().Range.Loc
+	if p.eat(css_lexer.TOpenBrace) {
+		qualified.Rules = p.parseListOfDeclarations(listOfDeclarationsOpts{})
+		closeBraceLoc := p.current().Range.Loc
+		if p.expectWithMatchingLoc(css_lexer.TCloseBrace, matchingLoc) {
+			qualified.CloseBraceLoc = closeBraceLoc
+		}
+	} else if !opts.isAlreadyInvalid {
+		p.expect(css_lexer.TOpenBrace)
+	}
+
+	return css_ast.Rule{Loc: preludeLoc, Data: &qualified}
+}
+
+type endOfRuleScan uint8
+
+const (
+	endOfRuleUnknown endOfRuleScan = iota
+	endOfRuleSemicolon
+	endOfRuleOpenBrace
+)
+
+// Note: This was a late change to the CSS nesting syntax.
+// See also: https://github.com/w3c/csswg-drafts/issues/7961
+func (p *parser) scanForEndOfRule() (endOfRuleScan, int) {
+	var initialStack [4]css_lexer.T
+	stack := initialStack[:0]
+
+	for i, t := range p.tokens[p.index:] {
+		switch t.Kind {
+		case css_lexer.TSemicolon:
+			if len(stack) == 0 {
+				return endOfRuleSemicolon, p.index + i
+			}
+
+		case css_lexer.TFunction, css_lexer.TOpenParen:
+			stack = append(stack, css_lexer.TCloseParen)
+
+		case css_lexer.TOpenBracket:
+			stack = append(stack, css_lexer.TCloseBracket)
+
+		case css_lexer.TOpenBrace:
+			if len(stack) == 0 {
+				return endOfRuleOpenBrace, p.index + i
+			}
+			stack = append(stack, css_lexer.TCloseBrace)
+
+		case css_lexer.TCloseParen, css_lexer.TCloseBracket:
+			if n := len(stack); n > 0 && t.Kind == stack[n-1] {
+				stack = stack[:n-1]
+			}
+
+		case css_lexer.TCloseBrace:
+			if n := len(stack); n > 0 && t.Kind == stack[n-1] {
+				stack = stack[:n-1]
+			} else {
+				return endOfRuleUnknown, -1
+			}
+		}
+	}
+
+	return endOfRuleUnknown, -1
+}
+
+func (p *parser) parseDeclaration() css_ast.Rule {
+	// Parse the key
+	keyStart := p.index
+	keyRange := p.tokens[keyStart].Range
+	keyIsIdent := p.expect(css_lexer.TIdent)
+	ok := false
+	if keyIsIdent {
+		p.eat(css_lexer.TWhitespace)
+		ok = p.eat(css_lexer.TColon)
+	}
+
+	// Parse the value
+	valueStart := p.index
+stop:
+	for {
+		switch p.current().Kind {
+		case css_lexer.TEndOfFile, css_lexer.TSemicolon, css_lexer.TCloseBrace:
+			break stop
+
+		default:
+			p.parseComponentValue()
+		}
+	}
+
+	// Stop now if this is not a valid declaration
+	if !ok {
+		if keyIsIdent {
+			if end := keyRange.End(); end > p.prevError.Start {
+				p.prevError.Start = end
+				data := p.tracker.MsgData(logger.Range{Loc: logger.Loc{Start: end}}, "Expected \":\"")
+				data.Location.Suggestion = ":"
+				p.log.AddMsgID(logger.MsgID_CSS_CSSSyntaxError, logger.Msg{
+					Kind: logger.Warning,
+					Data: data,
+				})
+			}
+		}
+
+		return css_ast.Rule{Loc: keyRange.Loc, Data: &css_ast.RBadDeclaration{
+			Tokens: p.convertTokens(p.tokens[keyStart:p.index]),
+		}}
+	}
+
+	keyToken := p.tokens[keyStart]
+	keyText := keyToken.DecodedText(p.source.Contents)
+	value := p.tokens[valueStart:p.index]
+	verbatimWhitespace := strings.HasPrefix(keyText, "--")
+
+	// Remove trailing "!important"
+	important := false
+	i := len(value) - 1
+	if i >= 0 && value[i].Kind == css_lexer.TWhitespace {
+		i--
+	}
+	if i >= 0 && value[i].Kind == css_lexer.TIdent && strings.EqualFold(value[i].DecodedText(p.source.Contents), "important") {
+		i--
+		if i >= 0 && value[i].Kind == css_lexer.TWhitespace {
+			i--
+		}
+		if i >= 0 && value[i].Kind == css_lexer.TDelimExclamation {
+			value = value[:i]
+			important = true
+		}
+	}
+
+	result, _ := p.convertTokensHelper(value, css_lexer.TEndOfFile, convertTokensOpts{
+		allowImports: true,
+
+		// CSS variables require verbatim whitespace for correctness
+		verbatimWhitespace: verbatimWhitespace,
+	})
+
+	// Insert or remove whitespace before the first token
+	if !verbatimWhitespace && len(result) > 0 {
+		if p.options.minifyWhitespace {
+			result[0].Whitespace &= ^css_ast.WhitespaceBefore
+		} else {
+			result[0].Whitespace |= css_ast.WhitespaceBefore
+		}
+	}
+
+	lowerKeyText := strings.ToLower(keyText)
+	key := css_ast.KnownDeclarations[lowerKeyText]
+
+	// Attempt to point out trivial typos
+	if key == css_ast.DUnknown {
+		if corrected, ok := css_ast.MaybeCorrectDeclarationTypo(lowerKeyText); ok {
+			data := p.tracker.MsgData(keyToken.Range, fmt.Sprintf("%q is not a known CSS property", keyText))
+			data.Location.Suggestion = corrected
+			p.log.AddMsgID(logger.MsgID_CSS_UnsupportedCSSProperty, logger.Msg{Kind: logger.Warning, Data: data,
+				Notes: []logger.MsgData{{Text: fmt.Sprintf("Did you mean %q instead?", corrected)}}})
+		}
+	}
+
+	return css_ast.Rule{Loc: keyRange.Loc, Data: &css_ast.RDeclaration{
+		Key:       key,
+		KeyText:   keyText,
+		KeyRange:  keyToken.Range,
+		Value:     result,
+		Important: important,
+	}}
+}
+
+func (p *parser) parseComponentValue() {
+	switch p.current().Kind {
+	case css_lexer.TFunction:
+		p.parseBlock(css_lexer.TFunction, css_lexer.TCloseParen)
+
+	case css_lexer.TOpenParen:
+		p.parseBlock(css_lexer.TOpenParen, css_lexer.TCloseParen)
+
+	case css_lexer.TOpenBrace:
+		p.parseBlock(css_lexer.TOpenBrace, css_lexer.TCloseBrace)
+
+	case css_lexer.TOpenBracket:
+		p.parseBlock(css_lexer.TOpenBracket, css_lexer.TCloseBracket)
+
+	case css_lexer.TEndOfFile:
+		p.unexpected()
+
+	default:
+		p.advance()
+	}
+}
+
+func (p *parser) parseBlock(open css_lexer.T, close css_lexer.T) {
+	current := p.current()
+	matchingStart := current.Range.End() - 1
+	if p.expect(open) {
+		for !p.eat(close) {
+			if p.peek(css_lexer.TEndOfFile) {
+				p.expectWithMatchingLoc(close, logger.Loc{Start: matchingStart})
+				return
+			}
+
+			p.parseComponentValue()
+		}
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_parser_selector.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_parser_selector.go
new file mode 100644
index 0000000..d766e8e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_parser_selector.go
@@ -0,0 +1,979 @@
+package css_parser
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type parseSelectorOpts struct {
+	composesContext        *composesContext
+	pseudoClassKind        css_ast.PseudoClassKind
+	isDeclarationContext   bool
+	stopOnCloseParen       bool
+	onlyOneComplexSelector bool
+	noLeadingCombinator    bool
+}
+
+func (p *parser) parseSelectorList(opts parseSelectorOpts) (list []css_ast.ComplexSelector, ok bool) {
+	// Parse the first selector
+	sel, good := p.parseComplexSelector(parseComplexSelectorOpts{
+		parseSelectorOpts: opts,
+		isFirst:           true,
+	})
+	if !good {
+		return
+	}
+	list = p.flattenLocalAndGlobalSelectors(list, sel)
+
+	// Parse the remaining selectors
+	if opts.onlyOneComplexSelector {
+		if t := p.current(); t.Kind == css_lexer.TComma {
+			p.prevError = t.Range.Loc
+			kind := fmt.Sprintf(":%s(...)", opts.pseudoClassKind.String())
+			p.log.AddIDWithNotes(logger.MsgID_CSS_CSSSyntaxError, logger.Warning, &p.tracker, t.Range,
+				fmt.Sprintf("Unexpected \",\" inside %q", kind),
+				[]logger.MsgData{{Text: fmt.Sprintf("Different CSS tools behave differently in this case, so esbuild doesn't allow it. "+
+					"Either remove this comma or split this selector up into multiple comma-separated %q selectors instead.", kind)}})
+			return
+		}
+	} else {
+	skip:
+		for {
+			p.eat(css_lexer.TWhitespace)
+			if !p.eat(css_lexer.TComma) {
+				break
+			}
+			p.eat(css_lexer.TWhitespace)
+			sel, good := p.parseComplexSelector(parseComplexSelectorOpts{
+				parseSelectorOpts: opts,
+			})
+			if !good {
+				return
+			}
+
+			// Omit duplicate selectors
+			if p.options.minifySyntax {
+				for _, existing := range list {
+					if sel.Equal(existing, nil) {
+						continue skip
+					}
+				}
+			}
+
+			list = p.flattenLocalAndGlobalSelectors(list, sel)
+		}
+	}
+
+	if p.options.minifySyntax {
+		for i := 1; i < len(list); i++ {
+			if analyzeLeadingAmpersand(list[i], opts.isDeclarationContext) != cannotRemoveLeadingAmpersand {
+				list[i].Selectors = list[i].Selectors[1:]
+			}
+		}
+
+		switch analyzeLeadingAmpersand(list[0], opts.isDeclarationContext) {
+		case canAlwaysRemoveLeadingAmpersand:
+			list[0].Selectors = list[0].Selectors[1:]
+
+		case canRemoveLeadingAmpersandIfNotFirst:
+			for i := 1; i < len(list); i++ {
+				if sel := list[i].Selectors[0]; !sel.HasNestingSelector() && (sel.Combinator.Byte != 0 || sel.TypeSelector == nil) {
+					list[0].Selectors = list[0].Selectors[1:]
+					list[0], list[i] = list[i], list[0]
+					break
+				}
+			}
+		}
+	}
+
+	ok = true
+	return
+}
+
+func mergeCompoundSelectors(target *css_ast.CompoundSelector, source css_ast.CompoundSelector) {
+	// ".foo:local(&)" => "&.foo"
+	if source.HasNestingSelector() && !target.HasNestingSelector() {
+		target.NestingSelectorLoc = source.NestingSelectorLoc
+	}
+
+	if source.TypeSelector != nil {
+		if target.TypeSelector == nil {
+			// ".foo:local(div)" => "div.foo"
+			target.TypeSelector = source.TypeSelector
+		} else {
+			// "div:local(span)" => "div:is(span)"
+			//
+			// Note: All other implementations of this (Lightning CSS, PostCSS, and
+			// Webpack) do something really weird here. They do this instead:
+			//
+			// "div:local(span)" => "divspan"
+			//
+			// But that just seems so obviously wrong that I'm not going to do that.
+			target.SubclassSelectors = append(target.SubclassSelectors, css_ast.SubclassSelector{
+				Range: source.TypeSelector.Range(),
+				Data: &css_ast.SSPseudoClassWithSelectorList{
+					Kind:      css_ast.PseudoClassIs,
+					Selectors: []css_ast.ComplexSelector{{Selectors: []css_ast.CompoundSelector{{TypeSelector: source.TypeSelector}}}},
+				},
+			})
+		}
+	}
+
+	// ".foo:local(.bar)" => ".foo.bar"
+	target.SubclassSelectors = append(target.SubclassSelectors, source.SubclassSelectors...)
+}
+
+func containsLocalOrGlobalSelector(sel css_ast.ComplexSelector) bool {
+	for _, s := range sel.Selectors {
+		for _, ss := range s.SubclassSelectors {
+			switch pseudo := ss.Data.(type) {
+			case *css_ast.SSPseudoClass:
+				if pseudo.Name == "global" || pseudo.Name == "local" {
+					return true
+				}
+
+			case *css_ast.SSPseudoClassWithSelectorList:
+				if pseudo.Kind == css_ast.PseudoClassGlobal || pseudo.Kind == css_ast.PseudoClassLocal {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+// This handles the ":local()" and ":global()" annotations from CSS modules
+func (p *parser) flattenLocalAndGlobalSelectors(list []css_ast.ComplexSelector, sel css_ast.ComplexSelector) []css_ast.ComplexSelector {
+	// Only do the work to flatten the whole list if there's a ":local" or a ":global"
+	if p.options.symbolMode != symbolModeDisabled && containsLocalOrGlobalSelector(sel) {
+		var selectors []css_ast.CompoundSelector
+
+		for _, s := range sel.Selectors {
+			oldSubclassSelectors := s.SubclassSelectors
+			s.SubclassSelectors = make([]css_ast.SubclassSelector, 0, len(oldSubclassSelectors))
+
+			for _, ss := range oldSubclassSelectors {
+				switch pseudo := ss.Data.(type) {
+				case *css_ast.SSPseudoClass:
+					if pseudo.Name == "global" || pseudo.Name == "local" {
+						// Remove bare ":global" and ":local" pseudo-classes
+						continue
+					}
+
+				case *css_ast.SSPseudoClassWithSelectorList:
+					if pseudo.Kind == css_ast.PseudoClassGlobal || pseudo.Kind == css_ast.PseudoClassLocal {
+						inner := pseudo.Selectors[0].Selectors
+
+						// Replace this pseudo-class with all inner compound selectors.
+						// The first inner compound selector is merged with the compound
+						// selector before it and the last inner compound selector is
+						// merged with the compound selector after it:
+						//
+						// "div:local(.a .b):hover" => "div.a b:hover"
+						//
+						// This behavior is really strange since this is not how anything
+						// involving pseudo-classes in real CSS works at all. However, all
+						// other implementations (Lightning CSS, PostCSS, and Webpack) are
+						// consistent with this strange behavior, so we do it too.
+						if inner[0].Combinator.Byte == 0 {
+							mergeCompoundSelectors(&s, inner[0])
+							inner = inner[1:]
+						} else {
+							// "div:local(+ .foo):hover" => "div + .foo:hover"
+						}
+						if n := len(inner); n > 0 {
+							if !s.IsInvalidBecauseEmpty() {
+								// Don't add this selector if it consisted only of a bare ":global" or ":local"
+								selectors = append(selectors, s)
+							}
+							selectors = append(selectors, inner[:n-1]...)
+							s = inner[n-1]
+						}
+						continue
+					}
+				}
+
+				s.SubclassSelectors = append(s.SubclassSelectors, ss)
+			}
+
+			if !s.IsInvalidBecauseEmpty() {
+				// Don't add this selector if it consisted only of a bare ":global" or ":local"
+				selectors = append(selectors, s)
+			}
+		}
+
+		if len(selectors) == 0 {
+			// Treat a bare ":global" or ":local" as a bare "&" nesting selector
+			selectors = append(selectors, css_ast.CompoundSelector{
+				NestingSelectorLoc:        ast.MakeIndex32(uint32(sel.Selectors[0].Range().Loc.Start)),
+				WasEmptyFromLocalOrGlobal: true,
+			})
+
+			// Make sure we report that nesting is present so that it can be lowered
+			p.nestingIsPresent = true
+		}
+
+		sel.Selectors = selectors
+	}
+
+	return append(list, sel)
+}
+
+type leadingAmpersand uint8
+
+const (
+	cannotRemoveLeadingAmpersand leadingAmpersand = iota
+	canAlwaysRemoveLeadingAmpersand
+	canRemoveLeadingAmpersandIfNotFirst
+)
+
+func analyzeLeadingAmpersand(sel css_ast.ComplexSelector, isDeclarationContext bool) leadingAmpersand {
+	if len(sel.Selectors) > 1 {
+		if first := sel.Selectors[0]; first.IsSingleAmpersand() {
+			if second := sel.Selectors[1]; second.Combinator.Byte == 0 && second.HasNestingSelector() {
+				// ".foo { & &.bar {} }" => ".foo { & &.bar {} }"
+			} else if second.Combinator.Byte != 0 || second.TypeSelector == nil || !isDeclarationContext {
+				// "& + div {}" => "+ div {}"
+				// "& div {}" => "div {}"
+				// ".foo { & + div {} }" => ".foo { + div {} }"
+				// ".foo { & + &.bar {} }" => ".foo { + &.bar {} }"
+				// ".foo { & :hover {} }" => ".foo { :hover {} }"
+				return canAlwaysRemoveLeadingAmpersand
+			} else {
+				// ".foo { & div {} }"
+				// ".foo { .bar, & div {} }" => ".foo { .bar, div {} }"
+				return canRemoveLeadingAmpersandIfNotFirst
+			}
+		}
+	} else {
+		// "& {}" => "& {}"
+	}
+	return cannotRemoveLeadingAmpersand
+}
+
+type parseComplexSelectorOpts struct {
+	parseSelectorOpts
+	isFirst bool
+}
+
+func (p *parser) parseComplexSelector(opts parseComplexSelectorOpts) (result css_ast.ComplexSelector, ok bool) {
+	// This is an extension: https://drafts.csswg.org/css-nesting-1/
+	var combinator css_ast.Combinator
+	if !opts.noLeadingCombinator {
+		combinator = p.parseCombinator()
+		if combinator.Byte != 0 {
+			p.nestingIsPresent = true
+			p.eat(css_lexer.TWhitespace)
+		}
+	}
+
+	// Parent
+	sel, good := p.parseCompoundSelector(parseComplexSelectorOpts{
+		parseSelectorOpts: opts.parseSelectorOpts,
+		isFirst:           opts.isFirst,
+	})
+	if !good {
+		return
+	}
+	sel.Combinator = combinator
+	result.Selectors = append(result.Selectors, sel)
+
+	stop := css_lexer.TOpenBrace
+	if opts.stopOnCloseParen {
+		stop = css_lexer.TCloseParen
+	}
+	for {
+		p.eat(css_lexer.TWhitespace)
+		if p.peek(css_lexer.TEndOfFile) || p.peek(css_lexer.TComma) || p.peek(stop) {
+			break
+		}
+
+		// Optional combinator
+		combinator := p.parseCombinator()
+		if combinator.Byte != 0 {
+			p.eat(css_lexer.TWhitespace)
+		}
+
+		// Child
+		sel, good := p.parseCompoundSelector(parseComplexSelectorOpts{
+			parseSelectorOpts: opts.parseSelectorOpts,
+		})
+		if !good {
+			return
+		}
+		sel.Combinator = combinator
+		result.Selectors = append(result.Selectors, sel)
+	}
+
+	ok = true
+	return
+}
+
+func (p *parser) nameToken() css_ast.NameToken {
+	t := p.current()
+	return css_ast.NameToken{
+		Kind:  t.Kind,
+		Range: t.Range,
+		Text:  p.decoded(),
+	}
+}
+
+func (p *parser) parseCompoundSelector(opts parseComplexSelectorOpts) (sel css_ast.CompoundSelector, ok bool) {
+	startLoc := p.current().Range.Loc
+
+	// This is an extension: https://drafts.csswg.org/css-nesting-1/
+	hasLeadingNestingSelector := p.peek(css_lexer.TDelimAmpersand)
+	if hasLeadingNestingSelector {
+		p.nestingIsPresent = true
+		sel.NestingSelectorLoc = ast.MakeIndex32(uint32(startLoc.Start))
+		p.advance()
+	}
+
+	// Parse the type selector
+	typeSelectorLoc := p.current().Range.Loc
+	switch p.current().Kind {
+	case css_lexer.TDelimBar, css_lexer.TIdent, css_lexer.TDelimAsterisk:
+		nsName := css_ast.NamespacedName{}
+		if !p.peek(css_lexer.TDelimBar) {
+			nsName.Name = p.nameToken()
+			p.advance()
+		} else {
+			// Hack: Create an empty "identifier" to represent this
+			nsName.Name.Kind = css_lexer.TIdent
+		}
+		if p.eat(css_lexer.TDelimBar) {
+			if !p.peek(css_lexer.TIdent) && !p.peek(css_lexer.TDelimAsterisk) {
+				p.expect(css_lexer.TIdent)
+				return
+			}
+			prefix := nsName.Name
+			nsName.NamespacePrefix = &prefix
+			nsName.Name = p.nameToken()
+			p.advance()
+		}
+		sel.TypeSelector = &nsName
+	}
+
+	// Parse the subclass selectors
+subclassSelectors:
+	for {
+		subclassToken := p.current()
+
+		switch subclassToken.Kind {
+		case css_lexer.THash:
+			if (subclassToken.Flags & css_lexer.IsID) == 0 {
+				break subclassSelectors
+			}
+			nameLoc := logger.Loc{Start: subclassToken.Range.Loc.Start + 1}
+			name := p.decoded()
+			sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
+				Range: subclassToken.Range,
+				Data: &css_ast.SSHash{
+					Name: p.symbolForName(nameLoc, name),
+				},
+			})
+			p.advance()
+
+		case css_lexer.TDelimDot:
+			p.advance()
+			nameRange := p.current().Range
+			name := p.decoded()
+			sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
+				Range: logger.Range{Loc: subclassToken.Range.Loc, Len: nameRange.End() - subclassToken.Range.Loc.Start},
+				Data: &css_ast.SSClass{
+					Name: p.symbolForName(nameRange.Loc, name),
+				},
+			})
+			if !p.expect(css_lexer.TIdent) {
+				return
+			}
+
+		case css_lexer.TOpenBracket:
+			attr, r := p.parseAttributeSelector()
+			if r.Len == 0 {
+				return
+			}
+			sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
+				Range: r,
+				Data:  &attr,
+			})
+
+		case css_lexer.TColon:
+			if p.next().Kind == css_lexer.TColon {
+				// Special-case the start of the pseudo-element selector section
+				for p.current().Kind == css_lexer.TColon {
+					firstColonLoc := p.current().Range.Loc
+					isElement := p.next().Kind == css_lexer.TColon
+					if isElement {
+						p.advance()
+					}
+					pseudo, r := p.parsePseudoClassSelector(firstColonLoc, isElement)
+
+					// https://www.w3.org/TR/selectors-4/#single-colon-pseudos
+					// The four Level 2 pseudo-elements (::before, ::after, ::first-line,
+					// and ::first-letter) may, for legacy reasons, be represented using
+					// the <pseudo-class-selector> grammar, with only a single ":"
+					// character at their start.
+					if p.options.minifySyntax && isElement {
+						if pseudo, ok := pseudo.(*css_ast.SSPseudoClass); ok && len(pseudo.Args) == 0 {
+							switch pseudo.Name {
+							case "before", "after", "first-line", "first-letter":
+								pseudo.IsElement = false
+							}
+						}
+					}
+
+					sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
+						Range: r,
+						Data:  pseudo,
+					})
+				}
+				break subclassSelectors
+			}
+
+			pseudo, r := p.parsePseudoClassSelector(subclassToken.Range.Loc, false)
+			sel.SubclassSelectors = append(sel.SubclassSelectors, css_ast.SubclassSelector{
+				Range: r,
+				Data:  pseudo,
+			})
+
+		case css_lexer.TDelimAmpersand:
+			// This is an extension: https://drafts.csswg.org/css-nesting-1/
+			p.nestingIsPresent = true
+			sel.NestingSelectorLoc = ast.MakeIndex32(uint32(subclassToken.Range.Loc.Start))
+			p.advance()
+
+		default:
+			break subclassSelectors
+		}
+	}
+
+	// The compound selector must be non-empty
+	if sel.IsInvalidBecauseEmpty() {
+		p.unexpected()
+		return
+	}
+
+	// Note: "&div {}" was originally valid, but is now an invalid selector:
+	// https://github.com/w3c/csswg-drafts/issues/8662#issuecomment-1514977935.
+	// This is because SASS already uses that syntax to mean something very
+	// different, so that syntax has been removed to avoid mistakes.
+	if hasLeadingNestingSelector && sel.TypeSelector != nil {
+		r := logger.Range{Loc: typeSelectorLoc, Len: p.at(p.index-1).Range.End() - typeSelectorLoc.Start}
+		text := sel.TypeSelector.Name.Text
+		if sel.TypeSelector.NamespacePrefix != nil {
+			text = fmt.Sprintf("%s|%s", sel.TypeSelector.NamespacePrefix.Text, text)
+		}
+		var howToFix string
+		suggestion := p.source.TextForRange(r)
+		if opts.isFirst {
+			suggestion = fmt.Sprintf(":is(%s)", suggestion)
+			howToFix = "You can wrap this selector in \":is(...)\" as a workaround. "
+		} else {
+			r = logger.Range{Loc: startLoc, Len: r.End() - startLoc.Start}
+			suggestion += "&"
+			howToFix = "You can move the \"&\" to the end of this selector as a workaround. "
+		}
+		msg := logger.Msg{
+			Kind: logger.Warning,
+			Data: p.tracker.MsgData(r, fmt.Sprintf("Cannot use type selector %q directly after nesting selector \"&\"", text)),
+			Notes: []logger.MsgData{{Text: "CSS nesting syntax does not allow the \"&\" selector to come before a type selector. " +
+				howToFix +
+				"This restriction exists to avoid problems with SASS nesting, where the same syntax means something very different " +
+				"that has no equivalent in real CSS (appending a suffix to the parent selector)."}},
+		}
+		msg.Data.Location.Suggestion = suggestion
+		p.log.AddMsgID(logger.MsgID_CSS_CSSSyntaxError, msg)
+		return
+	}
+
+	// The type selector must always come first
+	switch p.current().Kind {
+	case css_lexer.TDelimBar, css_lexer.TIdent, css_lexer.TDelimAsterisk:
+		p.unexpected()
+		return
+	}
+
+	ok = true
+	return
+}
+
+func (p *parser) parseAttributeSelector() (attr css_ast.SSAttribute, r logger.Range) {
+	matchingLoc := p.current().Range.Loc
+	p.advance()
+
+	// Parse the namespaced name
+	switch p.current().Kind {
+	case css_lexer.TDelimBar, css_lexer.TDelimAsterisk:
+		// "[|x]"
+		// "[*|x]"
+		if p.peek(css_lexer.TDelimAsterisk) {
+			prefix := p.nameToken()
+			p.advance()
+			attr.NamespacedName.NamespacePrefix = &prefix
+		} else {
+			// "[|attr]" is equivalent to "[attr]". From the specification:
+			// "In keeping with the Namespaces in the XML recommendation, default
+			// namespaces do not apply to attributes, therefore attribute selectors
+			// without a namespace component apply only to attributes that have no
+			// namespace (equivalent to |attr)."
+		}
+		if !p.expect(css_lexer.TDelimBar) {
+			return
+		}
+		attr.NamespacedName.Name = p.nameToken()
+		if !p.expect(css_lexer.TIdent) {
+			return
+		}
+
+	default:
+		// "[x]"
+		// "[x|y]"
+		attr.NamespacedName.Name = p.nameToken()
+		if !p.expect(css_lexer.TIdent) {
+			return
+		}
+		if p.next().Kind != css_lexer.TDelimEquals && p.eat(css_lexer.TDelimBar) {
+			prefix := attr.NamespacedName.Name
+			attr.NamespacedName.NamespacePrefix = &prefix
+			attr.NamespacedName.Name = p.nameToken()
+			if !p.expect(css_lexer.TIdent) {
+				return
+			}
+		}
+	}
+
+	// Parse the optional matcher operator
+	p.eat(css_lexer.TWhitespace)
+	if p.eat(css_lexer.TDelimEquals) {
+		attr.MatcherOp = "="
+	} else {
+		switch p.current().Kind {
+		case css_lexer.TDelimTilde:
+			attr.MatcherOp = "~="
+		case css_lexer.TDelimBar:
+			attr.MatcherOp = "|="
+		case css_lexer.TDelimCaret:
+			attr.MatcherOp = "^="
+		case css_lexer.TDelimDollar:
+			attr.MatcherOp = "$="
+		case css_lexer.TDelimAsterisk:
+			attr.MatcherOp = "*="
+		}
+		if attr.MatcherOp != "" {
+			p.advance()
+			if !p.expect(css_lexer.TDelimEquals) {
+				return
+			}
+		}
+	}
+
+	// Parse the optional matcher value
+	if attr.MatcherOp != "" {
+		p.eat(css_lexer.TWhitespace)
+		if !p.peek(css_lexer.TString) && !p.peek(css_lexer.TIdent) {
+			p.unexpected()
+		}
+		attr.MatcherValue = p.decoded()
+		p.advance()
+		p.eat(css_lexer.TWhitespace)
+		if p.peek(css_lexer.TIdent) {
+			if modifier := p.decoded(); len(modifier) == 1 {
+				if c := modifier[0]; c == 'i' || c == 'I' || c == 's' || c == 'S' {
+					attr.MatcherModifier = c
+					p.advance()
+				}
+			}
+		}
+	}
+
+	closeRange := p.current().Range
+	if !p.expectWithMatchingLoc(css_lexer.TCloseBracket, matchingLoc) {
+		closeRange.Len = 0
+	}
+	r = logger.Range{Loc: matchingLoc, Len: closeRange.End() - matchingLoc.Start}
+	return
+}
+
+func (p *parser) parsePseudoClassSelector(loc logger.Loc, isElement bool) (css_ast.SS, logger.Range) {
+	p.advance()
+
+	if p.peek(css_lexer.TFunction) {
+		text := p.decoded()
+		matchingLoc := logger.Loc{Start: p.current().Range.End() - 1}
+		p.advance()
+
+		// Potentially parse a pseudo-class with a selector list
+		if !isElement {
+			var kind css_ast.PseudoClassKind
+			local := p.makeLocalSymbols
+			ok := true
+			switch text {
+			case "global":
+				kind = css_ast.PseudoClassGlobal
+				if p.options.symbolMode != symbolModeDisabled {
+					local = false
+				}
+			case "has":
+				kind = css_ast.PseudoClassHas
+			case "is":
+				kind = css_ast.PseudoClassIs
+			case "local":
+				kind = css_ast.PseudoClassLocal
+				if p.options.symbolMode != symbolModeDisabled {
+					local = true
+				}
+			case "not":
+				kind = css_ast.PseudoClassNot
+			case "nth-child":
+				kind = css_ast.PseudoClassNthChild
+			case "nth-last-child":
+				kind = css_ast.PseudoClassNthLastChild
+			case "nth-of-type":
+				kind = css_ast.PseudoClassNthOfType
+			case "nth-last-of-type":
+				kind = css_ast.PseudoClassNthLastOfType
+			case "where":
+				kind = css_ast.PseudoClassWhere
+			default:
+				ok = false
+			}
+			if ok {
+				old := p.index
+				if kind.HasNthIndex() {
+					p.eat(css_lexer.TWhitespace)
+
+					// Parse the "An+B" syntax
+					if index, ok := p.parseNthIndex(); ok {
+						var selectors []css_ast.ComplexSelector
+
+						// Parse the optional "of" clause
+						if (kind == css_ast.PseudoClassNthChild || kind == css_ast.PseudoClassNthLastChild) &&
+							p.peek(css_lexer.TIdent) && strings.EqualFold(p.decoded(), "of") {
+							p.advance()
+							p.eat(css_lexer.TWhitespace)
+
+							// Contain the effects of ":local" and ":global"
+							oldLocal := p.makeLocalSymbols
+							selectors, ok = p.parseSelectorList(parseSelectorOpts{
+								stopOnCloseParen:    true,
+								noLeadingCombinator: true,
+							})
+							p.makeLocalSymbols = oldLocal
+						}
+
+						// "2n+0" => "2n"
+						if p.options.minifySyntax {
+							index.Minify()
+						}
+
+						// Match the closing ")"
+						if ok {
+							closeRange := p.current().Range
+							if !p.expectWithMatchingLoc(css_lexer.TCloseParen, matchingLoc) {
+								closeRange.Len = 0
+							}
+							return &css_ast.SSPseudoClassWithSelectorList{Kind: kind, Selectors: selectors, Index: index},
+								logger.Range{Loc: loc, Len: closeRange.End() - loc.Start}
+						}
+					}
+				} else {
+					p.eat(css_lexer.TWhitespace)
+
+					// ":local" forces local names and ":global" forces global names
+					oldLocal := p.makeLocalSymbols
+					p.makeLocalSymbols = local
+					selectors, ok := p.parseSelectorList(parseSelectorOpts{
+						pseudoClassKind:        kind,
+						stopOnCloseParen:       true,
+						onlyOneComplexSelector: kind == css_ast.PseudoClassGlobal || kind == css_ast.PseudoClassLocal,
+					})
+					p.makeLocalSymbols = oldLocal
+
+					// Match the closing ")"
+					if ok {
+						closeRange := p.current().Range
+						if !p.expectWithMatchingLoc(css_lexer.TCloseParen, matchingLoc) {
+							closeRange.Len = 0
+						}
+						return &css_ast.SSPseudoClassWithSelectorList{Kind: kind, Selectors: selectors},
+							logger.Range{Loc: loc, Len: closeRange.End() - loc.Start}
+					}
+				}
+				p.index = old
+			}
+		}
+
+		args := p.convertTokens(p.parseAnyValue())
+		closeRange := p.current().Range
+		if !p.expectWithMatchingLoc(css_lexer.TCloseParen, matchingLoc) {
+			closeRange.Len = 0
+		}
+		return &css_ast.SSPseudoClass{IsElement: isElement, Name: text, Args: args},
+			logger.Range{Loc: loc, Len: closeRange.End() - loc.Start}
+	}
+
+	nameRange := p.current().Range
+	name := p.decoded()
+	sel := css_ast.SSPseudoClass{IsElement: isElement}
+	if p.expect(css_lexer.TIdent) {
+		sel.Name = name
+
+		// ":local .local_name :global .global_name {}"
+		// ":local { .local_name { :global { .global_name {} } }"
+		if p.options.symbolMode != symbolModeDisabled {
+			switch name {
+			case "local":
+				p.makeLocalSymbols = true
+			case "global":
+				p.makeLocalSymbols = false
+			}
+		}
+	} else {
+		nameRange.Len = 0
+	}
+	return &sel, logger.Range{Loc: loc, Len: nameRange.End() - loc.Start}
+}
+
+func (p *parser) parseAnyValue() []css_lexer.Token {
+	// Reference: https://drafts.csswg.org/css-syntax-3/#typedef-declaration-value
+
+	p.stack = p.stack[:0] // Reuse allocated memory
+	start := p.index
+
+loop:
+	for {
+		switch p.current().Kind {
+		case css_lexer.TCloseParen, css_lexer.TCloseBracket, css_lexer.TCloseBrace:
+			last := len(p.stack) - 1
+			if last < 0 || !p.peek(p.stack[last]) {
+				break loop
+			}
+			p.stack = p.stack[:last]
+
+		case css_lexer.TSemicolon, css_lexer.TDelimExclamation:
+			if len(p.stack) == 0 {
+				break loop
+			}
+
+		case css_lexer.TOpenParen, css_lexer.TFunction:
+			p.stack = append(p.stack, css_lexer.TCloseParen)
+
+		case css_lexer.TOpenBracket:
+			p.stack = append(p.stack, css_lexer.TCloseBracket)
+
+		case css_lexer.TOpenBrace:
+			p.stack = append(p.stack, css_lexer.TCloseBrace)
+
+		case css_lexer.TEndOfFile:
+			break loop
+		}
+
+		p.advance()
+	}
+
+	tokens := p.tokens[start:p.index]
+	if len(tokens) == 0 {
+		p.unexpected()
+	}
+	return tokens
+}
+
+func (p *parser) parseCombinator() css_ast.Combinator {
+	t := p.current()
+
+	switch t.Kind {
+	case css_lexer.TDelimGreaterThan:
+		p.advance()
+		return css_ast.Combinator{Loc: t.Range.Loc, Byte: '>'}
+
+	case css_lexer.TDelimPlus:
+		p.advance()
+		return css_ast.Combinator{Loc: t.Range.Loc, Byte: '+'}
+
+	case css_lexer.TDelimTilde:
+		p.advance()
+		return css_ast.Combinator{Loc: t.Range.Loc, Byte: '~'}
+
+	default:
+		return css_ast.Combinator{}
+	}
+}
+
+func parseInteger(text string) (string, bool) {
+	n := len(text)
+	if n == 0 {
+		return "", false
+	}
+
+	// Trim leading zeros
+	start := 0
+	for start < n && text[start] == '0' {
+		start++
+	}
+
+	// Make sure remaining characters are digits
+	if start == n {
+		return "0", true
+	}
+	for i := start; i < n; i++ {
+		if c := text[i]; c < '0' || c > '9' {
+			return "", false
+		}
+	}
+	return text[start:], true
+}
+
+func (p *parser) parseNthIndex() (css_ast.NthIndex, bool) {
+	type sign uint8
+	const (
+		none sign = iota
+		negative
+		positive
+	)
+
+	// Reference: https://drafts.csswg.org/css-syntax-3/#anb-microsyntax
+	t0 := p.current()
+	text0 := p.decoded()
+
+	// Handle "even" and "odd"
+	if t0.Kind == css_lexer.TIdent && (text0 == "even" || text0 == "odd") {
+		p.advance()
+		p.eat(css_lexer.TWhitespace)
+		return css_ast.NthIndex{B: text0}, true
+	}
+
+	// Handle a single number
+	if t0.Kind == css_lexer.TNumber {
+		bNeg := false
+		if strings.HasPrefix(text0, "-") {
+			bNeg = true
+			text0 = text0[1:]
+		} else {
+			text0 = strings.TrimPrefix(text0, "+")
+		}
+		if b, ok := parseInteger(text0); ok {
+			if bNeg {
+				b = "-" + b
+			}
+			p.advance()
+			p.eat(css_lexer.TWhitespace)
+			return css_ast.NthIndex{B: b}, true
+		}
+		p.unexpected()
+		return css_ast.NthIndex{}, false
+	}
+
+	aSign := none
+	if p.eat(css_lexer.TDelimPlus) {
+		aSign = positive
+		t0 = p.current()
+		text0 = p.decoded()
+	}
+
+	// Everything from here must be able to contain an "n"
+	if t0.Kind != css_lexer.TIdent && t0.Kind != css_lexer.TDimension {
+		p.unexpected()
+		return css_ast.NthIndex{}, false
+	}
+
+	// Check for a leading sign
+	if aSign == none {
+		if strings.HasPrefix(text0, "-") {
+			aSign = negative
+			text0 = text0[1:]
+		} else {
+			text0 = strings.TrimPrefix(text0, "+")
+		}
+	}
+
+	// The string must contain an "n"
+	n := strings.IndexByte(text0, 'n')
+	if n < 0 {
+		p.unexpected()
+		return css_ast.NthIndex{}, false
+	}
+
+	// Parse the number before the "n"
+	var a string
+	if n == 0 {
+		if aSign == negative {
+			a = "-1"
+		} else {
+			a = "1"
+		}
+	} else if aInt, ok := parseInteger(text0[:n]); ok {
+		if aSign == negative {
+			aInt = "-" + aInt
+		}
+		a = aInt
+	} else {
+		p.unexpected()
+		return css_ast.NthIndex{}, false
+	}
+	text0 = text0[n+1:]
+
+	// Parse the stuff after the "n"
+	bSign := none
+	if strings.HasPrefix(text0, "-") {
+		text0 = text0[1:]
+		if b, ok := parseInteger(text0); ok {
+			p.advance()
+			p.eat(css_lexer.TWhitespace)
+			return css_ast.NthIndex{A: a, B: "-" + b}, true
+		}
+		bSign = negative
+	}
+	if text0 != "" {
+		p.unexpected()
+		return css_ast.NthIndex{}, false
+	}
+	p.advance()
+	p.eat(css_lexer.TWhitespace)
+
+	// Parse an optional sign delimiter
+	if bSign == none {
+		if p.eat(css_lexer.TDelimMinus) {
+			bSign = negative
+			p.eat(css_lexer.TWhitespace)
+		} else if p.eat(css_lexer.TDelimPlus) {
+			bSign = positive
+			p.eat(css_lexer.TWhitespace)
+		}
+	}
+
+	// Parse an optional trailing number
+	t1 := p.current()
+	text1 := p.decoded()
+	if t1.Kind == css_lexer.TNumber {
+		if bSign == none {
+			if strings.HasPrefix(text1, "-") {
+				bSign = negative
+				text1 = text1[1:]
+			} else if strings.HasPrefix(text1, "+") {
+				text1 = text1[1:]
+			}
+		}
+		if b, ok := parseInteger(text1); ok {
+			if bSign == negative {
+				b = "-" + b
+			}
+			p.advance()
+			p.eat(css_lexer.TWhitespace)
+			return css_ast.NthIndex{A: a, B: b}, true
+		}
+	}
+
+	// If there is a trailing sign, then there must also be a trailing number
+	if bSign != none {
+		p.expect(css_lexer.TNumber)
+		return css_ast.NthIndex{}, false
+	}
+
+	return css_ast.NthIndex{A: a}, true
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_reduce_calc.go b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_reduce_calc.go
new file mode 100644
index 0000000..19b4582
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_parser/css_reduce_calc.go
@@ -0,0 +1,605 @@
+package css_parser
+
+import (
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+func (p *parser) tryToReduceCalcExpression(token css_ast.Token) css_ast.Token {
+	if term := tryToParseCalcTerm(*token.Children); term != nil {
+		whitespace := css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
+		if p.options.minifyWhitespace {
+			whitespace = 0
+		}
+		term = term.partiallySimplify()
+		if result, ok := term.convertToToken(whitespace); ok {
+			if result.Kind == css_lexer.TOpenParen {
+				result.Kind = css_lexer.TFunction
+				result.Text = "calc"
+			}
+			result.Loc = token.Loc
+			result.Whitespace = css_ast.WhitespaceBefore | css_ast.WhitespaceAfter
+			return result
+		}
+	}
+	return token
+}
+
+type calcTermWithOp struct {
+	data  calcTerm
+	opLoc logger.Loc
+}
+
+// See: https://www.w3.org/TR/css-values-4/#calc-internal
+type calcTerm interface {
+	convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool)
+	partiallySimplify() calcTerm
+}
+
+type calcSum struct {
+	terms []calcTermWithOp
+}
+
+type calcProduct struct {
+	terms []calcTermWithOp
+}
+
+type calcNegate struct {
+	term calcTermWithOp
+}
+
+type calcInvert struct {
+	term calcTermWithOp
+}
+
+type calcNumeric struct {
+	unit   string
+	number float64
+	loc    logger.Loc
+}
+
+type calcValue struct {
+	token                css_ast.Token
+	isInvalidPlusOrMinus bool
+}
+
+func floatToStringForCalc(a float64) (string, bool) {
+	// Handle non-finite cases
+	if math.IsNaN(a) || math.IsInf(a, 0) {
+		return "", false
+	}
+
+	// Print the number as a string
+	text := fmt.Sprintf("%.05f", a)
+	for text[len(text)-1] == '0' {
+		text = text[:len(text)-1]
+	}
+	if text[len(text)-1] == '.' {
+		text = text[:len(text)-1]
+	}
+	if strings.HasPrefix(text, "0.") {
+		text = text[1:]
+	} else if strings.HasPrefix(text, "-0.") {
+		text = "-" + text[2:]
+	}
+
+	// Bail if the number is not exactly represented
+	if number, err := strconv.ParseFloat(text, 64); err != nil || number != a {
+		return "", false
+	}
+
+	return text, true
+}
+
+func (c *calcSum) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
+	tokens := make([]css_ast.Token, 0, len(c.terms)*2)
+
+	// ALGORITHM DEVIATION: Avoid parenthesizing product nodes inside sum nodes
+	if product, ok := c.terms[0].data.(*calcProduct); ok {
+		token, ok := product.convertToToken(whitespace)
+		if !ok {
+			return css_ast.Token{}, false
+		}
+		tokens = append(tokens, *token.Children...)
+	} else {
+		token, ok := c.terms[0].data.convertToToken(whitespace)
+		if !ok {
+			return css_ast.Token{}, false
+		}
+		tokens = append(tokens, token)
+	}
+
+	for _, term := range c.terms[1:] {
+		// If child is a Negate node, append " - " to s, then serialize the Negate’s child and append the result to s.
+		if negate, ok := term.data.(*calcNegate); ok {
+			token, ok := negate.term.data.convertToToken(whitespace)
+			if !ok {
+				return css_ast.Token{}, false
+			}
+			tokens = append(tokens, css_ast.Token{
+				Loc:        term.opLoc,
+				Kind:       css_lexer.TDelimMinus,
+				Text:       "-",
+				Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
+			}, token)
+			continue
+		}
+
+		// If child is a negative numeric value, append " - " to s, then serialize the negation of child as normal and append the result to s.
+		if numeric, ok := term.data.(*calcNumeric); ok && numeric.number < 0 {
+			clone := *numeric
+			clone.number = -clone.number
+			token, ok := clone.convertToToken(whitespace)
+			if !ok {
+				return css_ast.Token{}, false
+			}
+			tokens = append(tokens, css_ast.Token{
+				Loc:        term.opLoc,
+				Kind:       css_lexer.TDelimMinus,
+				Text:       "-",
+				Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
+			}, token)
+			continue
+		}
+
+		// Otherwise, append " + " to s, then serialize child and append the result to s.
+		tokens = append(tokens, css_ast.Token{
+			Loc:        term.opLoc,
+			Kind:       css_lexer.TDelimPlus,
+			Text:       "+",
+			Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter,
+		})
+
+		// ALGORITHM DEVIATION: Avoid parenthesizing product nodes inside sum nodes
+		if product, ok := term.data.(*calcProduct); ok {
+			token, ok := product.convertToToken(whitespace)
+			if !ok {
+				return css_ast.Token{}, false
+			}
+			tokens = append(tokens, *token.Children...)
+		} else {
+			token, ok := term.data.convertToToken(whitespace)
+			if !ok {
+				return css_ast.Token{}, false
+			}
+			tokens = append(tokens, token)
+		}
+	}
+
+	return css_ast.Token{
+		Loc:      tokens[0].Loc,
+		Kind:     css_lexer.TOpenParen,
+		Text:     "(",
+		Children: &tokens,
+	}, true
+}
+
+func (c *calcProduct) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
+	tokens := make([]css_ast.Token, 0, len(c.terms)*2)
+	token, ok := c.terms[0].data.convertToToken(whitespace)
+	if !ok {
+		return css_ast.Token{}, false
+	}
+	tokens = append(tokens, token)
+
+	for _, term := range c.terms[1:] {
+		// If child is an Invert node, append " / " to s, then serialize the Invert’s child and append the result to s.
+		if invert, ok := term.data.(*calcInvert); ok {
+			token, ok := invert.term.data.convertToToken(whitespace)
+			if !ok {
+				return css_ast.Token{}, false
+			}
+			tokens = append(tokens, css_ast.Token{
+				Loc:        term.opLoc,
+				Kind:       css_lexer.TDelimSlash,
+				Text:       "/",
+				Whitespace: whitespace,
+			}, token)
+			continue
+		}
+
+		// Otherwise, append " * " to s, then serialize child and append the result to s.
+		token, ok := term.data.convertToToken(whitespace)
+		if !ok {
+			return css_ast.Token{}, false
+		}
+		tokens = append(tokens, css_ast.Token{
+			Loc:        term.opLoc,
+			Kind:       css_lexer.TDelimAsterisk,
+			Text:       "*",
+			Whitespace: whitespace,
+		}, token)
+	}
+
+	return css_ast.Token{
+		Loc:      tokens[0].Loc,
+		Kind:     css_lexer.TOpenParen,
+		Text:     "(",
+		Children: &tokens,
+	}, true
+}
+
+func (c *calcNegate) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
+	token, ok := c.term.data.convertToToken(whitespace)
+	if !ok {
+		return css_ast.Token{}, false
+	}
+	return css_ast.Token{
+		Kind: css_lexer.TOpenParen,
+		Text: "(",
+		Children: &[]css_ast.Token{
+			{Loc: c.term.opLoc, Kind: css_lexer.TNumber, Text: "-1"},
+			{Loc: c.term.opLoc, Kind: css_lexer.TDelimSlash, Text: "*", Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter},
+			token,
+		},
+	}, true
+}
+
+func (c *calcInvert) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-serialize
+	token, ok := c.term.data.convertToToken(whitespace)
+	if !ok {
+		return css_ast.Token{}, false
+	}
+	return css_ast.Token{
+		Kind: css_lexer.TOpenParen,
+		Text: "(",
+		Children: &[]css_ast.Token{
+			{Loc: c.term.opLoc, Kind: css_lexer.TNumber, Text: "1"},
+			{Loc: c.term.opLoc, Kind: css_lexer.TDelimSlash, Text: "/", Whitespace: css_ast.WhitespaceBefore | css_ast.WhitespaceAfter},
+			token,
+		},
+	}, true
+}
+
+func (c *calcNumeric) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
+	text, ok := floatToStringForCalc(c.number)
+	if !ok {
+		return css_ast.Token{}, false
+	}
+	if c.unit == "" {
+		return css_ast.Token{
+			Loc:  c.loc,
+			Kind: css_lexer.TNumber,
+			Text: text,
+		}, true
+	}
+	if c.unit == "%" {
+		return css_ast.Token{
+			Loc:  c.loc,
+			Kind: css_lexer.TPercentage,
+			Text: text + "%",
+		}, true
+	}
+	return css_ast.Token{
+		Loc:        c.loc,
+		Kind:       css_lexer.TDimension,
+		Text:       text + c.unit,
+		UnitOffset: uint16(len(text)),
+	}, true
+}
+
+func (c *calcValue) convertToToken(whitespace css_ast.WhitespaceFlags) (css_ast.Token, bool) {
+	t := c.token
+	t.Whitespace = 0
+	return t, true
+}
+
+func (c *calcSum) partiallySimplify() calcTerm {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
+
+	// For each of root’s children that are Sum nodes, replace them with their children.
+	terms := make([]calcTermWithOp, 0, len(c.terms))
+	for _, term := range c.terms {
+		term.data = term.data.partiallySimplify()
+		if sum, ok := term.data.(*calcSum); ok {
+			terms = append(terms, sum.terms...)
+		} else {
+			terms = append(terms, term)
+		}
+	}
+
+	// For each set of root’s children that are numeric values with identical units, remove
+	// those children and replace them with a single numeric value containing the sum of the
+	// removed nodes, and with the same unit. (E.g. combine numbers, combine percentages,
+	// combine px values, etc.)
+	for i := 0; i < len(terms); i++ {
+		term := terms[i]
+		if numeric, ok := term.data.(*calcNumeric); ok {
+			end := i + 1
+			for j := end; j < len(terms); j++ {
+				term2 := terms[j]
+				if numeric2, ok := term2.data.(*calcNumeric); ok && strings.EqualFold(numeric2.unit, numeric.unit) {
+					numeric.number += numeric2.number
+				} else {
+					terms[end] = term2
+					end++
+				}
+			}
+			terms = terms[:end]
+		}
+	}
+
+	// If root has only a single child at this point, return the child.
+	if len(terms) == 1 {
+		return terms[0].data
+	}
+
+	// Otherwise, return root.
+	c.terms = terms
+	return c
+}
+
+func (c *calcProduct) partiallySimplify() calcTerm {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
+
+	// For each of root’s children that are Product nodes, replace them with their children.
+	terms := make([]calcTermWithOp, 0, len(c.terms))
+	for _, term := range c.terms {
+		term.data = term.data.partiallySimplify()
+		if product, ok := term.data.(*calcProduct); ok {
+			terms = append(terms, product.terms...)
+		} else {
+			terms = append(terms, term)
+		}
+	}
+
+	// If root has multiple children that are numbers (not percentages or dimensions), remove
+	// them and replace them with a single number containing the product of the removed nodes.
+	for i, term := range terms {
+		if numeric, ok := term.data.(*calcNumeric); ok && numeric.unit == "" {
+			end := i + 1
+			for j := end; j < len(terms); j++ {
+				term2 := terms[j]
+				if numeric2, ok := term2.data.(*calcNumeric); ok && numeric2.unit == "" {
+					numeric.number *= numeric2.number
+				} else {
+					terms[end] = term2
+					end++
+				}
+			}
+			terms = terms[:end]
+			break
+		}
+	}
+
+	// If root contains only numeric values and/or Invert nodes containing numeric values,
+	// and multiplying the types of all the children (noting that the type of an Invert
+	// node is the inverse of its child’s type) results in a type that matches any of the
+	// types that a math function can resolve to, return the result of multiplying all the
+	// values of the children (noting that the value of an Invert node is the reciprocal
+	// of its child’s value), expressed in the result’s canonical unit.
+	if len(terms) == 2 {
+		// Right now, only handle the case of two numbers, one of which has no unit
+		if first, ok := terms[0].data.(*calcNumeric); ok {
+			if second, ok := terms[1].data.(*calcNumeric); ok {
+				if first.unit == "" {
+					second.number *= first.number
+					return second
+				}
+				if second.unit == "" {
+					first.number *= second.number
+					return first
+				}
+			}
+		}
+	}
+
+	// ALGORITHM DEVIATION: Divide instead of multiply if the reciprocal is shorter
+	for i := 1; i < len(terms); i++ {
+		if numeric, ok := terms[i].data.(*calcNumeric); ok {
+			reciprocal := 1 / numeric.number
+			if multiply, ok := floatToStringForCalc(numeric.number); ok {
+				if divide, ok := floatToStringForCalc(reciprocal); ok && len(divide) < len(multiply) {
+					numeric.number = reciprocal
+					terms[i].data = &calcInvert{term: calcTermWithOp{
+						data:  numeric,
+						opLoc: terms[i].opLoc,
+					}}
+				}
+			}
+		}
+	}
+
+	// If root has only a single child at this point, return the child.
+	if len(terms) == 1 {
+		return terms[0].data
+	}
+
+	// Otherwise, return root.
+	c.terms = terms
+	return c
+}
+
+func (c *calcNegate) partiallySimplify() calcTerm {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
+
+	c.term.data = c.term.data.partiallySimplify()
+
+	// If root’s child is a numeric value, return an equivalent numeric value, but with the value negated (0 - value).
+	if numeric, ok := c.term.data.(*calcNumeric); ok {
+		numeric.number = -numeric.number
+		return numeric
+	}
+
+	// If root’s child is a Negate node, return the child’s child.
+	if negate, ok := c.term.data.(*calcNegate); ok {
+		return negate.term.data
+	}
+
+	return c
+}
+
+func (c *calcInvert) partiallySimplify() calcTerm {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-simplification
+
+	c.term.data = c.term.data.partiallySimplify()
+
+	// If root’s child is a number (not a percentage or dimension) return the reciprocal of the child’s value.
+	if numeric, ok := c.term.data.(*calcNumeric); ok && numeric.unit == "" {
+		numeric.number = 1 / numeric.number
+		return numeric
+	}
+
+	// If root’s child is an Invert node, return the child’s child.
+	if invert, ok := c.term.data.(*calcInvert); ok {
+		return invert.term.data
+	}
+
+	return c
+}
+
+func (c *calcNumeric) partiallySimplify() calcTerm {
+	return c
+}
+
+func (c *calcValue) partiallySimplify() calcTerm {
+	return c
+}
+
+func tryToParseCalcTerm(tokens []css_ast.Token) calcTerm {
+	// Specification: https://www.w3.org/TR/css-values-4/#calc-internal
+	terms := make([]calcTermWithOp, len(tokens))
+
+	for i, token := range tokens {
+		var term calcTerm
+		if token.Kind == css_lexer.TFunction && strings.EqualFold(token.Text, "var") {
+			// Using "var()" should bail because it can expand to any number of tokens
+			return nil
+		} else if token.Kind == css_lexer.TOpenParen || (token.Kind == css_lexer.TFunction && strings.EqualFold(token.Text, "calc")) {
+			term = tryToParseCalcTerm(*token.Children)
+			if term == nil {
+				return nil
+			}
+		} else if token.Kind == css_lexer.TNumber {
+			if number, err := strconv.ParseFloat(token.Text, 64); err == nil {
+				term = &calcNumeric{loc: token.Loc, number: number}
+			} else {
+				term = &calcValue{token: token}
+			}
+		} else if token.Kind == css_lexer.TPercentage {
+			if number, err := strconv.ParseFloat(token.PercentageValue(), 64); err == nil {
+				term = &calcNumeric{loc: token.Loc, number: number, unit: "%"}
+			} else {
+				term = &calcValue{token: token}
+			}
+		} else if token.Kind == css_lexer.TDimension {
+			if number, err := strconv.ParseFloat(token.DimensionValue(), 64); err == nil {
+				term = &calcNumeric{loc: token.Loc, number: number, unit: token.DimensionUnit()}
+			} else {
+				term = &calcValue{token: token}
+			}
+		} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "Infinity") {
+			term = &calcNumeric{loc: token.Loc, number: math.Inf(1)}
+		} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "-Infinity") {
+			term = &calcNumeric{loc: token.Loc, number: math.Inf(-1)}
+		} else if token.Kind == css_lexer.TIdent && strings.EqualFold(token.Text, "NaN") {
+			term = &calcNumeric{loc: token.Loc, number: math.NaN()}
+		} else {
+			term = &calcValue{
+				token: token,
+
+				// From the specification: "In addition, whitespace is required on both sides of the
+				// + and - operators. (The * and / operators can be used without white space around them.)"
+				isInvalidPlusOrMinus: i > 0 && i+1 < len(tokens) &&
+					(token.Kind == css_lexer.TDelimPlus || token.Kind == css_lexer.TDelimMinus) &&
+					(((token.Whitespace&css_ast.WhitespaceBefore) == 0 && (tokens[i-1].Whitespace&css_ast.WhitespaceAfter) == 0) ||
+						(token.Whitespace&css_ast.WhitespaceAfter) == 0 && (tokens[i+1].Whitespace&css_ast.WhitespaceBefore) == 0),
+			}
+		}
+		terms[i].data = term
+	}
+
+	// Collect children into Product and Invert nodes
+	first := 1
+	for first+1 < len(terms) {
+		// If this is a "*" or "/" operator
+		if value, ok := terms[first].data.(*calcValue); ok && (value.token.Kind == css_lexer.TDelimAsterisk || value.token.Kind == css_lexer.TDelimSlash) {
+			// Scan over the run
+			last := first
+			for last+3 < len(terms) {
+				if value, ok := terms[last+2].data.(*calcValue); ok && (value.token.Kind == css_lexer.TDelimAsterisk || value.token.Kind == css_lexer.TDelimSlash) {
+					last += 2
+				} else {
+					break
+				}
+			}
+
+			// Generate a node for the run
+			product := calcProduct{terms: make([]calcTermWithOp, (last-first)/2+2)}
+			for i := range product.terms {
+				term := terms[first+i*2-1]
+				if i > 0 {
+					op := terms[first+i*2-2].data.(*calcValue).token
+					term.opLoc = op.Loc
+					if op.Kind == css_lexer.TDelimSlash {
+						term.data = &calcInvert{term: term}
+					}
+				}
+				product.terms[i] = term
+			}
+
+			// Replace the run with a single node
+			terms[first-1].data = &product
+			terms = append(terms[:first], terms[last+2:]...)
+			continue
+		}
+
+		first++
+	}
+
+	// Collect children into Sum and Negate nodes
+	first = 1
+	for first+1 < len(terms) {
+		// If this is a "+" or "-" operator
+		if value, ok := terms[first].data.(*calcValue); ok && !value.isInvalidPlusOrMinus &&
+			(value.token.Kind == css_lexer.TDelimPlus || value.token.Kind == css_lexer.TDelimMinus) {
+			// Scan over the run
+			last := first
+			for last+3 < len(terms) {
+				if value, ok := terms[last+2].data.(*calcValue); ok && !value.isInvalidPlusOrMinus &&
+					(value.token.Kind == css_lexer.TDelimPlus || value.token.Kind == css_lexer.TDelimMinus) {
+					last += 2
+				} else {
+					break
+				}
+			}
+
+			// Generate a node for the run
+			sum := calcSum{terms: make([]calcTermWithOp, (last-first)/2+2)}
+			for i := range sum.terms {
+				term := terms[first+i*2-1]
+				if i > 0 {
+					op := terms[first+i*2-2].data.(*calcValue).token
+					term.opLoc = op.Loc
+					if op.Kind == css_lexer.TDelimMinus {
+						term.data = &calcNegate{term: term}
+					}
+				}
+				sum.terms[i] = term
+			}
+
+			// Replace the run with a single node
+			terms[first-1].data = &sum
+			terms = append(terms[:first], terms[last+2:]...)
+			continue
+		}
+
+		first++
+	}
+
+	// This only succeeds if everything reduces to a single term
+	if len(terms) == 1 {
+		return terms[0].data
+	}
+	return nil
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/css_printer/css_printer.go b/source/vendor/github.com/evanw/esbuild/internal/css_printer/css_printer.go
new file mode 100644
index 0000000..c010074
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/css_printer/css_printer.go
@@ -0,0 +1,1141 @@
+package css_printer
+
+import (
+	"fmt"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/sourcemap"
+)
+
+const quoteForURL byte = 0
+
+type printer struct {
+	options                Options
+	symbols                ast.SymbolMap
+	importRecords          []ast.ImportRecord
+	css                    []byte
+	hasLegalComment        map[string]struct{}
+	extractedLegalComments []string
+	jsonMetadataImports    []string
+	builder                sourcemap.ChunkBuilder
+	oldLineStart           int
+	oldLineEnd             int
+}
+
+type Options struct {
+	// This will be present if the input file had a source map. In that case we
+	// want to map all the way back to the original input file(s).
+	InputSourceMap *sourcemap.SourceMap
+
+	// If we're writing out a source map, this table of line start indices lets
+	// us do binary search on to figure out what line a given AST node came from
+	LineOffsetTables []sourcemap.LineOffsetTable
+
+	// Local symbol renaming results go here
+	LocalNames map[ast.Ref]string
+
+	LineLimit           int
+	InputSourceIndex    uint32
+	UnsupportedFeatures compat.CSSFeature
+	MinifyWhitespace    bool
+	ASCIIOnly           bool
+	SourceMap           config.SourceMap
+	AddSourceMappings   bool
+	LegalComments       config.LegalComments
+	NeedsMetafile       bool
+}
+
+type PrintResult struct {
+	CSS                    []byte
+	ExtractedLegalComments []string
+	JSONMetadataImports    []string
+
+	// This source map chunk just contains the VLQ-encoded offsets for the "CSS"
+	// field above. It's not a full source map. The bundler will be joining many
+	// source map chunks together to form the final source map.
+	SourceMapChunk sourcemap.Chunk
+}
+
+func Print(tree css_ast.AST, symbols ast.SymbolMap, options Options) PrintResult {
+	p := printer{
+		options:       options,
+		symbols:       symbols,
+		importRecords: tree.ImportRecords,
+		builder:       sourcemap.MakeChunkBuilder(options.InputSourceMap, options.LineOffsetTables, options.ASCIIOnly),
+	}
+	for _, rule := range tree.Rules {
+		p.printRule(rule, 0, false)
+	}
+	result := PrintResult{
+		CSS:                    p.css,
+		ExtractedLegalComments: p.extractedLegalComments,
+		JSONMetadataImports:    p.jsonMetadataImports,
+	}
+	if options.SourceMap != config.SourceMapNone {
+		// This is expensive. Only do this if it's necessary. For example, skipping
+		// this if it's not needed sped up end-to-end parsing and printing of a
+		// large CSS file from 66ms to 52ms (around 25% faster).
+		result.SourceMapChunk = p.builder.GenerateChunk(p.css)
+	}
+	return result
+}
+
+func (p *printer) recordImportPathForMetafile(importRecordIndex uint32) {
+	if p.options.NeedsMetafile {
+		record := p.importRecords[importRecordIndex]
+		external := ""
+		if (record.Flags & ast.ShouldNotBeExternalInMetafile) == 0 {
+			external = ",\n          \"external\": true"
+		}
+		p.jsonMetadataImports = append(p.jsonMetadataImports, fmt.Sprintf("\n        {\n          \"path\": %s,\n          \"kind\": %s%s\n        }",
+			helpers.QuoteForJSON(record.Path.Text, p.options.ASCIIOnly),
+			helpers.QuoteForJSON(record.Kind.StringForMetafile(), p.options.ASCIIOnly),
+			external))
+	}
+}
+
+func (p *printer) printRule(rule css_ast.Rule, indent int32, omitTrailingSemicolon bool) {
+	if r, ok := rule.Data.(*css_ast.RComment); ok {
+		switch p.options.LegalComments {
+		case config.LegalCommentsNone:
+			return
+
+		case config.LegalCommentsEndOfFile,
+			config.LegalCommentsLinkedWithComment,
+			config.LegalCommentsExternalWithoutComment:
+
+			// Don't record the same legal comment more than once per file
+			if p.hasLegalComment == nil {
+				p.hasLegalComment = make(map[string]struct{})
+			} else if _, ok := p.hasLegalComment[r.Text]; ok {
+				return
+			}
+			p.hasLegalComment[r.Text] = struct{}{}
+			p.extractedLegalComments = append(p.extractedLegalComments, r.Text)
+			return
+		}
+	}
+
+	if p.options.LineLimit > 0 {
+		p.printNewlinePastLineLimit(indent)
+	}
+
+	if p.options.AddSourceMappings {
+		shouldPrintMapping := true
+		if indent == 0 || p.options.MinifyWhitespace {
+			switch rule.Data.(type) {
+			case *css_ast.RSelector, *css_ast.RQualified, *css_ast.RBadDeclaration:
+				// These rules will begin with a potentially more accurate mapping. We
+				// shouldn't print a mapping here if there's no indent in between this
+				// mapping and the rule.
+				shouldPrintMapping = false
+			}
+		}
+		if shouldPrintMapping {
+			p.builder.AddSourceMapping(rule.Loc, "", p.css)
+		}
+	}
+
+	if !p.options.MinifyWhitespace {
+		p.printIndent(indent)
+	}
+
+	switch r := rule.Data.(type) {
+	case *css_ast.RAtCharset:
+		// It's not valid to remove the space in between these two tokens
+		p.print("@charset ")
+
+		// It's not valid to print the string with single quotes
+		p.printQuotedWithQuote(r.Encoding, '"', 0)
+		p.print(";")
+
+	case *css_ast.RAtImport:
+		if p.options.MinifyWhitespace {
+			p.print("@import")
+		} else {
+			p.print("@import ")
+		}
+		record := p.importRecords[r.ImportRecordIndex]
+		var flags printQuotedFlags
+		if record.Flags.Has(ast.ContainsUniqueKey) {
+			flags |= printQuotedNoWrap
+		}
+		p.printQuoted(record.Path.Text, flags)
+		p.recordImportPathForMetafile(r.ImportRecordIndex)
+		if conditions := r.ImportConditions; conditions != nil {
+			space := !p.options.MinifyWhitespace
+			if len(conditions.Layers) > 0 {
+				if space {
+					p.print(" ")
+				}
+				p.printTokens(conditions.Layers, printTokensOpts{})
+				space = true
+			}
+			if len(conditions.Supports) > 0 {
+				if space {
+					p.print(" ")
+				}
+				p.printTokens(conditions.Supports, printTokensOpts{})
+				space = true
+			}
+			if len(conditions.Media) > 0 {
+				if space {
+					p.print(" ")
+				}
+				p.printTokens(conditions.Media, printTokensOpts{})
+			}
+		}
+		p.print(";")
+
+	case *css_ast.RAtKeyframes:
+		p.print("@")
+		p.printIdent(r.AtToken, identNormal, mayNeedWhitespaceAfter)
+		p.print(" ")
+		p.printSymbol(r.Name.Loc, r.Name.Ref, identNormal, canDiscardWhitespaceAfter)
+		if !p.options.MinifyWhitespace {
+			p.print(" ")
+		}
+		if p.options.MinifyWhitespace {
+			p.print("{")
+		} else {
+			p.print("{\n")
+		}
+		indent++
+		for _, block := range r.Blocks {
+			if p.options.AddSourceMappings {
+				p.builder.AddSourceMapping(block.Loc, "", p.css)
+			}
+			if !p.options.MinifyWhitespace {
+				p.printIndent(indent)
+			}
+			for i, sel := range block.Selectors {
+				if i > 0 {
+					if p.options.MinifyWhitespace {
+						p.print(",")
+					} else {
+						p.print(", ")
+					}
+				}
+				p.print(sel)
+			}
+			if !p.options.MinifyWhitespace {
+				p.print(" ")
+			}
+			p.printRuleBlock(block.Rules, indent, block.CloseBraceLoc)
+			if !p.options.MinifyWhitespace {
+				p.print("\n")
+			}
+		}
+		indent--
+		if p.options.AddSourceMappings && r.CloseBraceLoc.Start != 0 {
+			p.builder.AddSourceMapping(r.CloseBraceLoc, "", p.css)
+		}
+		if !p.options.MinifyWhitespace {
+			p.printIndent(indent)
+		}
+		p.print("}")
+
+	case *css_ast.RKnownAt:
+		p.print("@")
+		whitespace := mayNeedWhitespaceAfter
+		if len(r.Prelude) == 0 {
+			whitespace = canDiscardWhitespaceAfter
+		}
+		p.printIdent(r.AtToken, identNormal, whitespace)
+		if (!p.options.MinifyWhitespace && r.Rules != nil) || len(r.Prelude) > 0 {
+			p.print(" ")
+		}
+		p.printTokens(r.Prelude, printTokensOpts{})
+		if r.Rules == nil {
+			p.print(";")
+		} else {
+			if !p.options.MinifyWhitespace && len(r.Prelude) > 0 {
+				p.print(" ")
+			}
+			p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
+		}
+
+	case *css_ast.RUnknownAt:
+		p.print("@")
+		whitespace := mayNeedWhitespaceAfter
+		if len(r.Prelude) == 0 {
+			whitespace = canDiscardWhitespaceAfter
+		}
+		p.printIdent(r.AtToken, identNormal, whitespace)
+		if (!p.options.MinifyWhitespace && len(r.Block) != 0) || len(r.Prelude) > 0 {
+			p.print(" ")
+		}
+		p.printTokens(r.Prelude, printTokensOpts{})
+		if !p.options.MinifyWhitespace && len(r.Block) != 0 && len(r.Prelude) > 0 {
+			p.print(" ")
+		}
+		if len(r.Block) == 0 {
+			p.print(";")
+		} else {
+			p.printTokens(r.Block, printTokensOpts{})
+		}
+
+	case *css_ast.RSelector:
+		p.printComplexSelectors(r.Selectors, indent, layoutMultiLine)
+		if !p.options.MinifyWhitespace {
+			p.print(" ")
+		}
+		p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
+
+	case *css_ast.RQualified:
+		hasWhitespaceAfter := p.printTokens(r.Prelude, printTokensOpts{})
+		if !hasWhitespaceAfter && !p.options.MinifyWhitespace {
+			p.print(" ")
+		}
+		p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
+
+	case *css_ast.RDeclaration:
+		p.printIdent(r.KeyText, identNormal, canDiscardWhitespaceAfter)
+		p.print(":")
+		hasWhitespaceAfter := p.printTokens(r.Value, printTokensOpts{
+			indent:        indent,
+			isDeclaration: true,
+		})
+		if r.Important {
+			if !hasWhitespaceAfter && !p.options.MinifyWhitespace && len(r.Value) > 0 {
+				p.print(" ")
+			}
+			p.print("!important")
+		}
+		if !omitTrailingSemicolon {
+			p.print(";")
+		}
+
+	case *css_ast.RBadDeclaration:
+		p.printTokens(r.Tokens, printTokensOpts{})
+		if !omitTrailingSemicolon {
+			p.print(";")
+		}
+
+	case *css_ast.RComment:
+		p.printIndentedComment(indent, r.Text)
+
+	case *css_ast.RAtLayer:
+		p.print("@layer")
+		for i, parts := range r.Names {
+			if i == 0 {
+				p.print(" ")
+			} else if !p.options.MinifyWhitespace {
+				p.print(", ")
+			} else {
+				p.print(",")
+			}
+			p.print(strings.Join(parts, "."))
+		}
+		if r.Rules == nil {
+			p.print(";")
+		} else {
+			if !p.options.MinifyWhitespace {
+				p.print(" ")
+			}
+			p.printRuleBlock(r.Rules, indent, r.CloseBraceLoc)
+		}
+
+	default:
+		panic("Internal error")
+	}
+
+	if !p.options.MinifyWhitespace {
+		p.print("\n")
+	}
+}
+
+func (p *printer) printIndentedComment(indent int32, text string) {
+	// Avoid generating a comment containing the character sequence "</style"
+	if !p.options.UnsupportedFeatures.Has(compat.InlineStyle) {
+		text = helpers.EscapeClosingTag(text, "/style")
+	}
+
+	// Re-indent multi-line comments
+	for {
+		newline := strings.IndexByte(text, '\n')
+		if newline == -1 {
+			break
+		}
+		p.print(text[:newline+1])
+		if !p.options.MinifyWhitespace {
+			p.printIndent(indent)
+		}
+		text = text[newline+1:]
+	}
+	p.print(text)
+}
+
+func (p *printer) printRuleBlock(rules []css_ast.Rule, indent int32, closeBraceLoc logger.Loc) {
+	if p.options.MinifyWhitespace {
+		p.print("{")
+	} else {
+		p.print("{\n")
+	}
+
+	for i, decl := range rules {
+		omitTrailingSemicolon := p.options.MinifyWhitespace && i+1 == len(rules)
+		p.printRule(decl, indent+1, omitTrailingSemicolon)
+	}
+
+	if p.options.AddSourceMappings && closeBraceLoc.Start != 0 {
+		p.builder.AddSourceMapping(closeBraceLoc, "", p.css)
+	}
+	if !p.options.MinifyWhitespace {
+		p.printIndent(indent)
+	}
+	p.print("}")
+}
+
+type selectorLayout uint8
+
+const (
+	layoutMultiLine selectorLayout = iota
+	layoutSingleLine
+)
+
+func (p *printer) printComplexSelectors(selectors []css_ast.ComplexSelector, indent int32, layout selectorLayout) {
+	for i, complex := range selectors {
+		if i > 0 {
+			if p.options.MinifyWhitespace {
+				p.print(",")
+				if p.options.LineLimit > 0 {
+					p.printNewlinePastLineLimit(indent)
+				}
+			} else if layout == layoutMultiLine {
+				p.print(",\n")
+				p.printIndent(indent)
+			} else {
+				p.print(", ")
+			}
+		}
+
+		for j, compound := range complex.Selectors {
+			p.printCompoundSelector(compound, j == 0, j+1 == len(complex.Selectors), indent)
+		}
+	}
+}
+
+func (p *printer) printCompoundSelector(sel css_ast.CompoundSelector, isFirst bool, isLast bool, indent int32) {
+	if !isFirst && sel.Combinator.Byte == 0 {
+		// A space is required in between compound selectors if there is no
+		// combinator in the middle. It's fine to convert "a + b" into "a+b"
+		// but not to convert "a b" into "ab".
+		if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit(indent) {
+			p.print(" ")
+		}
+	}
+
+	if sel.Combinator.Byte != 0 {
+		if !isFirst && !p.options.MinifyWhitespace {
+			p.print(" ")
+		}
+
+		if p.options.AddSourceMappings {
+			p.builder.AddSourceMapping(sel.Combinator.Loc, "", p.css)
+		}
+		p.css = append(p.css, sel.Combinator.Byte)
+
+		if (p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit(indent)) && !p.options.MinifyWhitespace {
+			p.print(" ")
+		}
+	}
+
+	if sel.TypeSelector != nil {
+		whitespace := mayNeedWhitespaceAfter
+		if len(sel.SubclassSelectors) > 0 {
+			// There is no chance of whitespace before a subclass selector or pseudo
+			// class selector
+			whitespace = canDiscardWhitespaceAfter
+		}
+		p.printNamespacedName(*sel.TypeSelector, whitespace)
+	}
+
+	if sel.HasNestingSelector() {
+		if p.options.AddSourceMappings {
+			p.builder.AddSourceMapping(logger.Loc{Start: int32(sel.NestingSelectorLoc.GetIndex())}, "", p.css)
+		}
+
+		p.print("&")
+	}
+
+	for i, ss := range sel.SubclassSelectors {
+		whitespace := mayNeedWhitespaceAfter
+
+		// There is no chance of whitespace between subclass selectors
+		if i+1 < len(sel.SubclassSelectors) {
+			whitespace = canDiscardWhitespaceAfter
+		}
+
+		if p.options.AddSourceMappings {
+			p.builder.AddSourceMapping(ss.Range.Loc, "", p.css)
+		}
+
+		switch s := ss.Data.(type) {
+		case *css_ast.SSHash:
+			p.print("#")
+
+			// This deliberately does not use identHash. From the specification:
+			// "In <id-selector>, the <hash-token>'s value must be an identifier."
+			p.printSymbol(s.Name.Loc, s.Name.Ref, identNormal, whitespace)
+
+		case *css_ast.SSClass:
+			p.print(".")
+			p.printSymbol(s.Name.Loc, s.Name.Ref, identNormal, whitespace)
+
+		case *css_ast.SSAttribute:
+			p.print("[")
+			p.printNamespacedName(s.NamespacedName, canDiscardWhitespaceAfter)
+			if s.MatcherOp != "" {
+				p.print(s.MatcherOp)
+				printAsIdent := false
+
+				// Print the value as an identifier if it's possible
+				if css_lexer.WouldStartIdentifierWithoutEscapes(s.MatcherValue) {
+					printAsIdent = true
+					for _, c := range s.MatcherValue {
+						if !css_lexer.IsNameContinue(c) {
+							printAsIdent = false
+							break
+						}
+					}
+				}
+
+				if printAsIdent {
+					p.printIdent(s.MatcherValue, identNormal, canDiscardWhitespaceAfter)
+				} else {
+					p.printQuoted(s.MatcherValue, 0)
+				}
+			}
+			if s.MatcherModifier != 0 {
+				p.print(" ")
+				p.print(string(rune(s.MatcherModifier)))
+			}
+			p.print("]")
+
+		case *css_ast.SSPseudoClass:
+			p.printPseudoClassSelector(*s, whitespace)
+
+		case *css_ast.SSPseudoClassWithSelectorList:
+			p.print(":")
+			p.print(s.Kind.String())
+			p.print("(")
+			if s.Index.A != "" || s.Index.B != "" {
+				p.printNthIndex(s.Index)
+				if len(s.Selectors) > 0 {
+					if p.options.MinifyWhitespace && s.Selectors[0].Selectors[0].TypeSelector == nil {
+						p.print(" of")
+					} else {
+						p.print(" of ")
+					}
+				}
+			}
+			p.printComplexSelectors(s.Selectors, indent, layoutSingleLine)
+			p.print(")")
+
+		default:
+			panic("Internal error")
+		}
+	}
+}
+
+func (p *printer) printNthIndex(index css_ast.NthIndex) {
+	if index.A != "" {
+		if index.A == "-1" {
+			p.print("-")
+		} else if index.A != "1" {
+			p.print(index.A)
+		}
+		p.print("n")
+		if index.B != "" {
+			if !strings.HasPrefix(index.B, "-") {
+				p.print("+")
+			}
+			p.print(index.B)
+		}
+	} else if index.B != "" {
+		p.print(index.B)
+	}
+}
+
+func (p *printer) printNamespacedName(nsName css_ast.NamespacedName, whitespace trailingWhitespace) {
+	if prefix := nsName.NamespacePrefix; prefix != nil {
+		if p.options.AddSourceMappings {
+			p.builder.AddSourceMapping(prefix.Range.Loc, "", p.css)
+		}
+
+		switch prefix.Kind {
+		case css_lexer.TIdent:
+			p.printIdent(prefix.Text, identNormal, canDiscardWhitespaceAfter)
+		case css_lexer.TDelimAsterisk:
+			p.print("*")
+		default:
+			panic("Internal error")
+		}
+
+		p.print("|")
+	}
+
+	if p.options.AddSourceMappings {
+		p.builder.AddSourceMapping(nsName.Name.Range.Loc, "", p.css)
+	}
+
+	switch nsName.Name.Kind {
+	case css_lexer.TIdent:
+		p.printIdent(nsName.Name.Text, identNormal, whitespace)
+	case css_lexer.TDelimAsterisk:
+		p.print("*")
+	case css_lexer.TDelimAmpersand:
+		p.print("&")
+	default:
+		panic("Internal error")
+	}
+}
+
+func (p *printer) printPseudoClassSelector(pseudo css_ast.SSPseudoClass, whitespace trailingWhitespace) {
+	if pseudo.IsElement {
+		p.print("::")
+	} else {
+		p.print(":")
+	}
+
+	// This checks for "nil" so we can distinguish ":is()" from ":is"
+	if pseudo.Args != nil {
+		p.printIdent(pseudo.Name, identNormal, canDiscardWhitespaceAfter)
+		p.print("(")
+		p.printTokens(pseudo.Args, printTokensOpts{})
+		p.print(")")
+	} else {
+		p.printIdent(pseudo.Name, identNormal, whitespace)
+	}
+}
+
+func (p *printer) print(text string) {
+	p.css = append(p.css, text...)
+}
+
+func bestQuoteCharForString(text string, forURL bool) byte {
+	forURLCost := 0
+	singleCost := 2
+	doubleCost := 2
+
+	for _, c := range text {
+		switch c {
+		case '\'':
+			forURLCost++
+			singleCost++
+
+		case '"':
+			forURLCost++
+			doubleCost++
+
+		case '(', ')', ' ', '\t':
+			forURLCost++
+
+		case '\\', '\n', '\r', '\f':
+			forURLCost++
+			singleCost++
+			doubleCost++
+		}
+	}
+
+	// Quotes can sometimes be omitted for URL tokens
+	if forURL && forURLCost < singleCost && forURLCost < doubleCost {
+		return quoteForURL
+	}
+
+	// Prefer double quotes to single quotes if there is no cost difference
+	if singleCost < doubleCost {
+		return '\''
+	}
+
+	return '"'
+}
+
+type printQuotedFlags uint8
+
+const (
+	printQuotedNoWrap printQuotedFlags = 1 << iota
+)
+
+func (p *printer) printQuoted(text string, flags printQuotedFlags) {
+	p.printQuotedWithQuote(text, bestQuoteCharForString(text, false), flags)
+}
+
+type escapeKind uint8
+
+const (
+	escapeNone escapeKind = iota
+	escapeBackslash
+	escapeHex
+)
+
+func (p *printer) printWithEscape(c rune, escape escapeKind, remainingText string, mayNeedWhitespaceAfter bool) {
+	var temp [utf8.UTFMax]byte
+
+	if escape == escapeBackslash && ((c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F')) {
+		// Hexadecimal characters cannot use a plain backslash escape
+		escape = escapeHex
+	}
+
+	switch escape {
+	case escapeNone:
+		width := utf8.EncodeRune(temp[:], c)
+		p.css = append(p.css, temp[:width]...)
+
+	case escapeBackslash:
+		p.css = append(p.css, '\\')
+		width := utf8.EncodeRune(temp[:], c)
+		p.css = append(p.css, temp[:width]...)
+
+	case escapeHex:
+		text := fmt.Sprintf("\\%x", c)
+		p.css = append(p.css, text...)
+
+		// Make sure the next character is not interpreted as part of the escape sequence
+		if len(text) < 1+6 {
+			if next := utf8.RuneLen(c); next < len(remainingText) {
+				c = rune(remainingText[next])
+				if c == ' ' || c == '\t' || (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F') {
+					p.css = append(p.css, ' ')
+				}
+			} else if mayNeedWhitespaceAfter {
+				// If the last character is a hexadecimal escape, print a space afterwards
+				// for the escape sequence to consume. That way we're sure it won't
+				// accidentally consume a semantically significant space afterward.
+				p.css = append(p.css, ' ')
+			}
+		}
+	}
+}
+
+// Note: This function is hot in profiles
+func (p *printer) printQuotedWithQuote(text string, quote byte, flags printQuotedFlags) {
+	if quote != quoteForURL {
+		p.css = append(p.css, quote)
+	}
+
+	n := len(text)
+	i := 0
+	runStart := 0
+
+	// Only compute the line length if necessary
+	var startLineLength int
+	wrapLongLines := false
+	if p.options.LineLimit > 0 && quote != quoteForURL && (flags&printQuotedNoWrap) == 0 {
+		startLineLength = p.currentLineLength()
+		if startLineLength > p.options.LineLimit {
+			startLineLength = p.options.LineLimit
+		}
+		wrapLongLines = true
+	}
+
+	for i < n {
+		// Wrap long lines that are over the limit using escaped newlines
+		if wrapLongLines && startLineLength+i >= p.options.LineLimit {
+			if runStart < i {
+				p.css = append(p.css, text[runStart:i]...)
+				runStart = i
+			}
+			p.css = append(p.css, "\\\n"...)
+			startLineLength -= p.options.LineLimit
+		}
+
+		c, width := utf8.DecodeRuneInString(text[i:])
+		escape := escapeNone
+
+		switch c {
+		case '\x00', '\r', '\n', '\f':
+			// Use a hexadecimal escape for characters that would be invalid escapes
+			escape = escapeHex
+
+		case '\\', rune(quote):
+			escape = escapeBackslash
+
+		case '(', ')', ' ', '\t', '"', '\'':
+			// These characters must be escaped in URL tokens
+			if quote == quoteForURL {
+				escape = escapeBackslash
+			}
+
+		case '/':
+			// Avoid generating the sequence "</style" in CSS code
+			if !p.options.UnsupportedFeatures.Has(compat.InlineStyle) && i >= 1 && text[i-1] == '<' && i+6 <= len(text) && strings.EqualFold(text[i+1:i+6], "style") {
+				escape = escapeBackslash
+			}
+
+		default:
+			if (p.options.ASCIIOnly && c >= 0x80) || c == '\uFEFF' {
+				escape = escapeHex
+			}
+		}
+
+		if escape != escapeNone {
+			if runStart < i {
+				p.css = append(p.css, text[runStart:i]...)
+			}
+			p.printWithEscape(c, escape, text[i:], false)
+			runStart = i + width
+		}
+		i += width
+	}
+
+	if runStart < n {
+		p.css = append(p.css, text[runStart:]...)
+	}
+
+	if quote != quoteForURL {
+		p.css = append(p.css, quote)
+	}
+}
+
+func (p *printer) currentLineLength() int {
+	css := p.css
+	n := len(css)
+	stop := p.oldLineEnd
+
+	// Update "oldLineStart" to the start of the current line
+	for i := n; i > stop; i-- {
+		if c := css[i-1]; c == '\r' || c == '\n' {
+			p.oldLineStart = i
+			break
+		}
+	}
+
+	p.oldLineEnd = n
+	return n - p.oldLineStart
+}
+
+func (p *printer) printNewlinePastLineLimit(indent int32) bool {
+	if p.currentLineLength() < p.options.LineLimit {
+		return false
+	}
+	p.print("\n")
+	if !p.options.MinifyWhitespace {
+		p.printIndent(indent)
+	}
+	return true
+}
+
+type identMode uint8
+
+const (
+	identNormal identMode = iota
+	identHash
+	identDimensionUnit
+	identDimensionUnitAfterExponent
+)
+
+type trailingWhitespace uint8
+
+const (
+	mayNeedWhitespaceAfter trailingWhitespace = iota
+	canDiscardWhitespaceAfter
+)
+
+// Note: This function is hot in profiles
+func (p *printer) printIdent(text string, mode identMode, whitespace trailingWhitespace) {
+	n := len(text)
+
+	// Special escape behavior for the first character
+	initialEscape := escapeNone
+	switch mode {
+	case identNormal:
+		if !css_lexer.WouldStartIdentifierWithoutEscapes(text) {
+			initialEscape = escapeBackslash
+		}
+	case identDimensionUnit, identDimensionUnitAfterExponent:
+		if !css_lexer.WouldStartIdentifierWithoutEscapes(text) {
+			initialEscape = escapeBackslash
+		} else if n > 0 {
+			if c := text[0]; c >= '0' && c <= '9' {
+				// Unit: "2x"
+				initialEscape = escapeHex
+			} else if (c == 'e' || c == 'E') && mode != identDimensionUnitAfterExponent {
+				if n >= 2 && text[1] >= '0' && text[1] <= '9' {
+					// Unit: "e2x"
+					initialEscape = escapeHex
+				} else if n >= 3 && text[1] == '-' && text[2] >= '0' && text[2] <= '9' {
+					// Unit: "e-2x"
+					initialEscape = escapeHex
+				}
+			}
+		}
+	}
+
+	// Fast path: the identifier does not need to be escaped. This fast path is
+	// important for performance. For example, doing this sped up end-to-end
+	// parsing and printing of a large CSS file from 84ms to 66ms (around 25%
+	// faster).
+	if initialEscape == escapeNone {
+		for i := 0; i < n; i++ {
+			if c := text[i]; c >= 0x80 || !css_lexer.IsNameContinue(rune(c)) {
+				goto slowPath
+			}
+		}
+		p.css = append(p.css, text...)
+		return
+	slowPath:
+	}
+
+	// Slow path: the identifier needs to be escaped
+	for i, c := range text {
+		escape := escapeNone
+
+		if p.options.ASCIIOnly && c >= 0x80 {
+			escape = escapeHex
+		} else if c == '\r' || c == '\n' || c == '\f' || c == '\uFEFF' {
+			// Use a hexadecimal escape for characters that would be invalid escapes
+			escape = escapeHex
+		} else {
+			// Escape non-identifier characters
+			if !css_lexer.IsNameContinue(c) {
+				escape = escapeBackslash
+			}
+
+			// Special escape behavior for the first character
+			if i == 0 && initialEscape != escapeNone {
+				escape = initialEscape
+			}
+		}
+
+		// If the last character is a hexadecimal escape, print a space afterwards
+		// for the escape sequence to consume. That way we're sure it won't
+		// accidentally consume a semantically significant space afterward.
+		mayNeedWhitespaceAfter := whitespace == mayNeedWhitespaceAfter && escape != escapeNone && i+utf8.RuneLen(c) == n
+		p.printWithEscape(c, escape, text[i:], mayNeedWhitespaceAfter)
+	}
+}
+
+func (p *printer) printSymbol(loc logger.Loc, ref ast.Ref, mode identMode, whitespace trailingWhitespace) {
+	ref = ast.FollowSymbols(p.symbols, ref)
+	originalName := p.symbols.Get(ref).OriginalName
+	name, ok := p.options.LocalNames[ref]
+	if !ok {
+		name = originalName
+	}
+	if p.options.AddSourceMappings {
+		if originalName == name {
+			originalName = ""
+		}
+		p.builder.AddSourceMapping(loc, originalName, p.css)
+	}
+	p.printIdent(name, mode, whitespace)
+}
+
+func (p *printer) printIndent(indent int32) {
+	n := int(indent)
+	if p.options.LineLimit > 0 && n*2 >= p.options.LineLimit {
+		n = p.options.LineLimit / 2
+	}
+	for i := 0; i < n; i++ {
+		p.css = append(p.css, "  "...)
+	}
+}
+
+type printTokensOpts struct {
+	indent               int32
+	multiLineCommaPeriod uint8
+	isDeclaration        bool
+}
+
+func functionMultiLineCommaPeriod(token css_ast.Token) uint8 {
+	if token.Kind == css_lexer.TFunction {
+		commaCount := 0
+		for _, t := range *token.Children {
+			if t.Kind == css_lexer.TComma {
+				commaCount++
+			}
+		}
+
+		switch strings.ToLower(token.Text) {
+		case "linear-gradient", "radial-gradient", "conic-gradient",
+			"repeating-linear-gradient", "repeating-radial-gradient", "repeating-conic-gradient":
+			if commaCount >= 2 {
+				return 1
+			}
+
+		case "matrix":
+			if commaCount == 5 {
+				return 2
+			}
+
+		case "matrix3d":
+			if commaCount == 15 {
+				return 4
+			}
+		}
+	}
+	return 0
+}
+
+func (p *printer) printTokens(tokens []css_ast.Token, opts printTokensOpts) bool {
+	hasWhitespaceAfter := len(tokens) > 0 && (tokens[0].Whitespace&css_ast.WhitespaceBefore) != 0
+
+	// Pretty-print long comma-separated declarations of 3 or more items
+	commaPeriod := int(opts.multiLineCommaPeriod)
+	if !p.options.MinifyWhitespace && opts.isDeclaration {
+		commaCount := 0
+		for _, t := range tokens {
+			if t.Kind == css_lexer.TComma {
+				commaCount++
+				if commaCount >= 2 {
+					commaPeriod = 1
+					break
+				}
+			}
+			if t.Kind == css_lexer.TFunction && functionMultiLineCommaPeriod(t) > 0 {
+				commaPeriod = 1
+				break
+			}
+		}
+	}
+
+	commaCount := 0
+	for i, t := range tokens {
+		if t.Kind == css_lexer.TComma {
+			commaCount++
+		}
+		if t.Kind == css_lexer.TWhitespace {
+			hasWhitespaceAfter = true
+			continue
+		}
+		if hasWhitespaceAfter {
+			if commaPeriod > 0 && (i == 0 || (tokens[i-1].Kind == css_lexer.TComma && commaCount%commaPeriod == 0)) {
+				p.print("\n")
+				p.printIndent(opts.indent + 1)
+			} else if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit(opts.indent+1) {
+				p.print(" ")
+			}
+		}
+		hasWhitespaceAfter = (t.Whitespace&css_ast.WhitespaceAfter) != 0 ||
+			(i+1 < len(tokens) && (tokens[i+1].Whitespace&css_ast.WhitespaceBefore) != 0)
+
+		whitespace := mayNeedWhitespaceAfter
+		if !hasWhitespaceAfter {
+			whitespace = canDiscardWhitespaceAfter
+		}
+
+		if p.options.AddSourceMappings {
+			p.builder.AddSourceMapping(t.Loc, "", p.css)
+		}
+
+		switch t.Kind {
+		case css_lexer.TIdent:
+			p.printIdent(t.Text, identNormal, whitespace)
+
+		case css_lexer.TSymbol:
+			ref := ast.Ref{SourceIndex: p.options.InputSourceIndex, InnerIndex: t.PayloadIndex}
+			p.printSymbol(t.Loc, ref, identNormal, whitespace)
+
+		case css_lexer.TFunction:
+			p.printIdent(t.Text, identNormal, whitespace)
+			p.print("(")
+
+		case css_lexer.TDimension:
+			value := t.DimensionValue()
+			p.print(value)
+			mode := identDimensionUnit
+			if strings.ContainsAny(value, "eE") {
+				mode = identDimensionUnitAfterExponent
+			}
+			p.printIdent(t.DimensionUnit(), mode, whitespace)
+
+		case css_lexer.TAtKeyword:
+			p.print("@")
+			p.printIdent(t.Text, identNormal, whitespace)
+
+		case css_lexer.THash:
+			p.print("#")
+			p.printIdent(t.Text, identHash, whitespace)
+
+		case css_lexer.TString:
+			p.printQuoted(t.Text, 0)
+
+		case css_lexer.TURL:
+			record := p.importRecords[t.PayloadIndex]
+			text := record.Path.Text
+			tryToAvoidQuote := true
+			var flags printQuotedFlags
+			if record.Flags.Has(ast.ContainsUniqueKey) {
+				flags |= printQuotedNoWrap
+
+				// If the caller will be substituting a path here later using string
+				// substitution, then we can't be sure that it will form a valid URL
+				// token when unquoted (e.g. it may contain spaces). So we need to
+				// quote the unique key here just in case. For more info see this
+				// issue: https://github.com/evanw/esbuild/issues/3410
+				tryToAvoidQuote = false
+			} else if p.options.LineLimit > 0 && p.currentLineLength()+len(text) >= p.options.LineLimit {
+				tryToAvoidQuote = false
+			}
+			p.print("url(")
+			p.printQuotedWithQuote(text, bestQuoteCharForString(text, tryToAvoidQuote), flags)
+			p.print(")")
+			p.recordImportPathForMetafile(t.PayloadIndex)
+
+		case css_lexer.TUnterminatedString:
+			// We must end this with a newline so that this string stays unterminated
+			p.print(t.Text)
+			p.print("\n")
+			if !p.options.MinifyWhitespace {
+				p.printIndent(opts.indent)
+			}
+			hasWhitespaceAfter = false
+
+		default:
+			p.print(t.Text)
+		}
+
+		if t.Children != nil {
+			childCommaPeriod := uint8(0)
+
+			if commaPeriod > 0 && opts.isDeclaration {
+				childCommaPeriod = functionMultiLineCommaPeriod(t)
+			}
+
+			if childCommaPeriod > 0 {
+				opts.indent++
+				if !p.options.MinifyWhitespace {
+					p.print("\n")
+					p.printIndent(opts.indent + 1)
+				}
+			}
+
+			p.printTokens(*t.Children, printTokensOpts{
+				indent:               opts.indent,
+				multiLineCommaPeriod: childCommaPeriod,
+			})
+
+			if childCommaPeriod > 0 {
+				opts.indent--
+			}
+
+			switch t.Kind {
+			case css_lexer.TFunction:
+				p.print(")")
+
+			case css_lexer.TOpenParen:
+				p.print(")")
+
+			case css_lexer.TOpenBrace:
+				p.print("}")
+
+			case css_lexer.TOpenBracket:
+				p.print("]")
+			}
+		}
+	}
+	if hasWhitespaceAfter {
+		p.print(" ")
+	}
+	return hasWhitespaceAfter
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/error_other.go b/source/vendor/github.com/evanw/esbuild/internal/fs/error_other.go
new file mode 100644
index 0000000..bd8647d
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/error_other.go
@@ -0,0 +1,9 @@
+//go:build (!js || !wasm) && !windows
+// +build !js !wasm
+// +build !windows
+
+package fs
+
+func is_ERROR_INVALID_NAME(err error) bool {
+	return false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/error_wasm+windows.go b/source/vendor/github.com/evanw/esbuild/internal/fs/error_wasm+windows.go
new file mode 100644
index 0000000..1ddecff
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/error_wasm+windows.go
@@ -0,0 +1,17 @@
+//go:build (js && wasm) || windows
+// +build js,wasm windows
+
+package fs
+
+import "syscall"
+
+// This check is here in a conditionally-compiled file because Go's standard
+// library for Plan 9 doesn't define a type called "syscall.Errno". Plan 9 is
+// not a supported operating system but someone wanted to be able to compile
+// esbuild for Plan 9 anyway.
+func is_ERROR_INVALID_NAME(err error) bool {
+	// This has been copied from golang.org/x/sys/windows
+	const ERROR_INVALID_NAME syscall.Errno = 123
+
+	return err == ERROR_INVALID_NAME
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/filepath.go b/source/vendor/github.com/evanw/esbuild/internal/fs/filepath.go
new file mode 100644
index 0000000..edab22c
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/filepath.go
@@ -0,0 +1,649 @@
+// Code in this file has been forked from the "filepath" module in the Go
+// source code to work around bugs with the WebAssembly build target. More
+// information about why here: https://github.com/golang/go/issues/43768.
+
+////////////////////////////////////////////////////////////////////////////////
+
+// Copyright (c) 2009 The Go Authors. All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//    * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//    * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//    * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package fs
+
+import (
+	"errors"
+	"os"
+	"strings"
+	"syscall"
+)
+
+type goFilepath struct {
+	cwd           string
+	isWindows     bool
+	pathSeparator byte
+}
+
+func isSlash(c uint8) bool {
+	return c == '\\' || c == '/'
+}
+
+// reservedNames lists reserved Windows names. Search for PRN in
+// https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file
+// for details.
+var reservedNames = []string{
+	"CON", "PRN", "AUX", "NUL",
+	"COM1", "COM2", "COM3", "COM4", "COM5", "COM6", "COM7", "COM8", "COM9",
+	"LPT1", "LPT2", "LPT3", "LPT4", "LPT5", "LPT6", "LPT7", "LPT8", "LPT9",
+}
+
+// isReservedName returns true, if path is Windows reserved name.
+// See reservedNames for the full list.
+func isReservedName(path string) bool {
+	if len(path) == 0 {
+		return false
+	}
+	for _, reserved := range reservedNames {
+		if strings.EqualFold(path, reserved) {
+			return true
+		}
+	}
+	return false
+}
+
+// IsAbs reports whether the path is absolute.
+func (fp goFilepath) isAbs(path string) bool {
+	if !fp.isWindows {
+		return strings.HasPrefix(path, "/")
+	}
+	if isReservedName(path) {
+		return true
+	}
+	l := fp.volumeNameLen(path)
+	if l == 0 {
+		return false
+	}
+	path = path[l:]
+	if path == "" {
+		return false
+	}
+	return isSlash(path[0])
+}
+
+// Abs returns an absolute representation of path.
+// If the path is not absolute it will be joined with the current
+// working directory to turn it into an absolute path. The absolute
+// path name for a given file is not guaranteed to be unique.
+// Abs calls Clean on the result.
+func (fp goFilepath) abs(path string) (string, error) {
+	if fp.isAbs(path) {
+		return fp.clean(path), nil
+	}
+	return fp.join([]string{fp.cwd, path}), nil
+}
+
+// IsPathSeparator reports whether c is a directory separator character.
+func (fp goFilepath) isPathSeparator(c uint8) bool {
+	return c == '/' || (fp.isWindows && c == '\\')
+}
+
+// volumeNameLen returns length of the leading volume name on Windows.
+// It returns 0 elsewhere.
+func (fp goFilepath) volumeNameLen(path string) int {
+	if !fp.isWindows {
+		return 0
+	}
+	if len(path) < 2 {
+		return 0
+	}
+	// with drive letter
+	c := path[0]
+	if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') {
+		return 2
+	}
+	// is it UNC? https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
+	if l := len(path); l >= 5 && isSlash(path[0]) && isSlash(path[1]) &&
+		!isSlash(path[2]) && path[2] != '.' {
+		// first, leading `\\` and next shouldn't be `\`. its server name.
+		for n := 3; n < l-1; n++ {
+			// second, next '\' shouldn't be repeated.
+			if isSlash(path[n]) {
+				n++
+				// third, following something characters. its share name.
+				if !isSlash(path[n]) {
+					if path[n] == '.' {
+						break
+					}
+					for ; n < l; n++ {
+						if isSlash(path[n]) {
+							break
+						}
+					}
+					return n
+				}
+				break
+			}
+		}
+	}
+	return 0
+}
+
+// EvalSymlinks returns the path name after the evaluation of any symbolic
+// links.
+// If path is relative the result will be relative to the current directory,
+// unless one of the components is an absolute symbolic link.
+// EvalSymlinks calls Clean on the result.
+func (fp goFilepath) evalSymlinks(path string) (string, error) {
+	volLen := fp.volumeNameLen(path)
+	pathSeparator := string(fp.pathSeparator)
+
+	if volLen < len(path) && fp.isPathSeparator(path[volLen]) {
+		volLen++
+	}
+	vol := path[:volLen]
+	dest := vol
+	linksWalked := 0
+	for start, end := volLen, volLen; start < len(path); start = end {
+		for start < len(path) && fp.isPathSeparator(path[start]) {
+			start++
+		}
+		end = start
+		for end < len(path) && !fp.isPathSeparator(path[end]) {
+			end++
+		}
+
+		// On Windows, "." can be a symlink.
+		// We look it up, and use the value if it is absolute.
+		// If not, we just return ".".
+		isWindowsDot := fp.isWindows && path[fp.volumeNameLen(path):] == "."
+
+		// The next path component is in path[start:end].
+		if end == start {
+			// No more path components.
+			break
+		} else if path[start:end] == "." && !isWindowsDot {
+			// Ignore path component ".".
+			continue
+		} else if path[start:end] == ".." {
+			// Back up to previous component if possible.
+			// Note that volLen includes any leading slash.
+
+			// Set r to the index of the last slash in dest,
+			// after the volume.
+			var r int
+			for r = len(dest) - 1; r >= volLen; r-- {
+				if fp.isPathSeparator(dest[r]) {
+					break
+				}
+			}
+			if r < volLen || dest[r+1:] == ".." {
+				// Either path has no slashes
+				// (it's empty or just "C:")
+				// or it ends in a ".." we had to keep.
+				// Either way, keep this "..".
+				if len(dest) > volLen {
+					dest += pathSeparator
+				}
+				dest += ".."
+			} else {
+				// Discard everything since the last slash.
+				dest = dest[:r]
+			}
+			continue
+		}
+
+		// Ordinary path component. Add it to result.
+
+		if len(dest) > fp.volumeNameLen(dest) && !fp.isPathSeparator(dest[len(dest)-1]) {
+			dest += pathSeparator
+		}
+
+		dest += path[start:end]
+
+		// Resolve symlink.
+
+		fi, err := os.Lstat(dest)
+		if err != nil {
+			return "", err
+		}
+
+		if fi.Mode()&os.ModeSymlink == 0 {
+			if !fi.Mode().IsDir() && end < len(path) {
+				return "", syscall.ENOTDIR
+			}
+			continue
+		}
+
+		// Found symlink.
+
+		linksWalked++
+		if linksWalked > 255 {
+			return "", errors.New("EvalSymlinks: too many links")
+		}
+
+		link, err := os.Readlink(dest)
+		if err != nil {
+			return "", err
+		}
+
+		if isWindowsDot && !fp.isAbs(link) {
+			// On Windows, if "." is a relative symlink,
+			// just return ".".
+			break
+		}
+
+		path = link + path[end:]
+
+		v := fp.volumeNameLen(link)
+		if v > 0 {
+			// Symlink to drive name is an absolute path.
+			if v < len(link) && fp.isPathSeparator(link[v]) {
+				v++
+			}
+			vol = link[:v]
+			dest = vol
+			end = len(vol)
+		} else if len(link) > 0 && fp.isPathSeparator(link[0]) {
+			// Symlink to absolute path.
+			dest = link[:1]
+			end = 1
+		} else {
+			// Symlink to relative path; replace last
+			// path component in dest.
+			var r int
+			for r = len(dest) - 1; r >= volLen; r-- {
+				if fp.isPathSeparator(dest[r]) {
+					break
+				}
+			}
+			if r < volLen {
+				dest = vol
+			} else {
+				dest = dest[:r]
+			}
+			end = 0
+		}
+	}
+	return fp.clean(dest), nil
+}
+
+// A lazybuf is a lazily constructed path buffer.
+// It supports append, reading previously appended bytes,
+// and retrieving the final string. It does not allocate a buffer
+// to hold the output until that output diverges from s.
+type lazybuf struct {
+	path       string
+	volAndPath string
+	buf        []byte
+	w          int
+	volLen     int
+}
+
+func (b *lazybuf) index(i int) byte {
+	if b.buf != nil {
+		return b.buf[i]
+	}
+	return b.path[i]
+}
+
+func (b *lazybuf) append(c byte) {
+	if b.buf == nil {
+		if b.w < len(b.path) && b.path[b.w] == c {
+			b.w++
+			return
+		}
+		b.buf = make([]byte, len(b.path))
+		copy(b.buf, b.path[:b.w])
+	}
+	b.buf[b.w] = c
+	b.w++
+}
+
+func (b *lazybuf) string() string {
+	if b.buf == nil {
+		return b.volAndPath[:b.volLen+b.w]
+	}
+	return b.volAndPath[:b.volLen] + string(b.buf[:b.w])
+}
+
+// FromSlash returns the result of replacing each slash ('/') character
+// in path with a separator character. Multiple slashes are replaced
+// by multiple separators.
+func (fp goFilepath) fromSlash(path string) string {
+	if !fp.isWindows {
+		return path
+	}
+	return strings.ReplaceAll(path, "/", "\\")
+}
+
+// Clean returns the shortest path name equivalent to path
+// by purely lexical processing. It applies the following rules
+// iteratively until no further processing can be done:
+//
+//  1. Replace multiple Separator elements with a single one.
+//  2. Eliminate each . path name element (the current directory).
+//  3. Eliminate each inner .. path name element (the parent directory)
+//     along with the non-.. element that precedes it.
+//  4. Eliminate .. elements that begin a rooted path:
+//     that is, replace "/.." by "/" at the beginning of a path,
+//     assuming Separator is '/'.
+//
+// The returned path ends in a slash only if it represents a root directory,
+// such as "/" on Unix or `C:\` on Windows.
+//
+// Finally, any occurrences of slash are replaced by Separator.
+//
+// If the result of this process is an empty string, Clean
+// returns the string ".".
+//
+// See also Rob Pike, "Lexical File Names in Plan 9 or
+// Getting Dot-Dot Right,"
+// https://9p.io/sys/doc/lexnames.html
+func (fp goFilepath) clean(path string) string {
+	originalPath := path
+	volLen := fp.volumeNameLen(path)
+	path = path[volLen:]
+	if path == "" {
+		if volLen > 1 && originalPath[1] != ':' {
+			// should be UNC
+			return fp.fromSlash(originalPath)
+		}
+		return originalPath + "."
+	}
+	rooted := fp.isPathSeparator(path[0])
+
+	// Invariants:
+	//	reading from path; r is index of next byte to process.
+	//	writing to buf; w is index of next byte to write.
+	//	dotdot is index in buf where .. must stop, either because
+	//		it is the leading slash or it is a leading ../../.. prefix.
+	n := len(path)
+	out := lazybuf{path: path, volAndPath: originalPath, volLen: volLen}
+	r, dotdot := 0, 0
+	if rooted {
+		out.append(fp.pathSeparator)
+		r, dotdot = 1, 1
+	}
+
+	for r < n {
+		switch {
+		case fp.isPathSeparator(path[r]):
+			// empty path element
+			r++
+		case path[r] == '.' && (r+1 == n || fp.isPathSeparator(path[r+1])):
+			// . element
+			r++
+		case path[r] == '.' && path[r+1] == '.' && (r+2 == n || fp.isPathSeparator(path[r+2])):
+			// .. element: remove to last separator
+			r += 2
+			switch {
+			case out.w > dotdot:
+				// can backtrack
+				out.w--
+				for out.w > dotdot && !fp.isPathSeparator(out.index(out.w)) {
+					out.w--
+				}
+			case !rooted:
+				// cannot backtrack, but not rooted, so append .. element.
+				if out.w > 0 {
+					out.append(fp.pathSeparator)
+				}
+				out.append('.')
+				out.append('.')
+				dotdot = out.w
+			}
+		default:
+			// real path element.
+			// add slash if needed
+			if rooted && out.w != 1 || !rooted && out.w != 0 {
+				out.append(fp.pathSeparator)
+			}
+			// copy element
+			for ; r < n && !fp.isPathSeparator(path[r]); r++ {
+				out.append(path[r])
+			}
+		}
+	}
+
+	// Turn empty string into "."
+	if out.w == 0 {
+		out.append('.')
+	}
+
+	return fp.fromSlash(out.string())
+}
+
+// VolumeName returns leading volume name.
+// Given "C:\foo\bar" it returns "C:" on Windows.
+// Given "\\host\share\foo" it returns "\\host\share".
+// On other platforms it returns "".
+func (fp goFilepath) volumeName(path string) string {
+	return path[:fp.volumeNameLen(path)]
+}
+
+// Base returns the last element of path.
+// Trailing path separators are removed before extracting the last element.
+// If the path is empty, Base returns ".".
+// If the path consists entirely of separators, Base returns a single separator.
+func (fp goFilepath) base(path string) string {
+	if path == "" {
+		return "."
+	}
+	// Strip trailing slashes.
+	for len(path) > 0 && fp.isPathSeparator(path[len(path)-1]) {
+		path = path[0 : len(path)-1]
+	}
+	// Throw away volume name
+	path = path[len(fp.volumeName(path)):]
+	// Find the last element
+	i := len(path) - 1
+	for i >= 0 && !fp.isPathSeparator(path[i]) {
+		i--
+	}
+	if i >= 0 {
+		path = path[i+1:]
+	}
+	// If empty now, it had only slashes.
+	if path == "" {
+		return string(fp.pathSeparator)
+	}
+	return path
+}
+
+// Dir returns all but the last element of path, typically the path's directory.
+// After dropping the final element, Dir calls Clean on the path and trailing
+// slashes are removed.
+// If the path is empty, Dir returns ".".
+// If the path consists entirely of separators, Dir returns a single separator.
+// The returned path does not end in a separator unless it is the root directory.
+func (fp goFilepath) dir(path string) string {
+	vol := fp.volumeName(path)
+	i := len(path) - 1
+	for i >= len(vol) && !fp.isPathSeparator(path[i]) {
+		i--
+	}
+	dir := fp.clean(path[len(vol) : i+1])
+	if dir == "." && len(vol) > 2 {
+		// must be UNC
+		return vol
+	}
+	return vol + dir
+}
+
+// Ext returns the file name extension used by path.
+// The extension is the suffix beginning at the final dot
+// in the final element of path; it is empty if there is
+// no dot.
+func (fp goFilepath) ext(path string) string {
+	for i := len(path) - 1; i >= 0 && !fp.isPathSeparator(path[i]); i-- {
+		if path[i] == '.' {
+			return path[i:]
+		}
+	}
+	return ""
+}
+
+// Join joins any number of path elements into a single path,
+// separating them with an OS specific Separator. Empty elements
+// are ignored. The result is Cleaned. However, if the argument
+// list is empty or all its elements are empty, Join returns
+// an empty string.
+// On Windows, the result will only be a UNC path if the first
+// non-empty element is a UNC path.
+func (fp goFilepath) join(elem []string) string {
+	for i, e := range elem {
+		if e != "" {
+			if fp.isWindows {
+				return fp.joinNonEmpty(elem[i:])
+			}
+			return fp.clean(strings.Join(elem[i:], string(fp.pathSeparator)))
+		}
+	}
+	return ""
+}
+
+// joinNonEmpty is like join, but it assumes that the first element is non-empty.
+func (fp goFilepath) joinNonEmpty(elem []string) string {
+	if len(elem[0]) == 2 && elem[0][1] == ':' {
+		// First element is drive letter without terminating slash.
+		// Keep path relative to current directory on that drive.
+		// Skip empty elements.
+		i := 1
+		for ; i < len(elem); i++ {
+			if elem[i] != "" {
+				break
+			}
+		}
+		return fp.clean(elem[0] + strings.Join(elem[i:], string(fp.pathSeparator)))
+	}
+	// The following logic prevents Join from inadvertently creating a
+	// UNC path on Windows. Unless the first element is a UNC path, Join
+	// shouldn't create a UNC path. See golang.org/issue/9167.
+	p := fp.clean(strings.Join(elem, string(fp.pathSeparator)))
+	if !fp.isUNC(p) {
+		return p
+	}
+	// p == UNC only allowed when the first element is a UNC path.
+	head := fp.clean(elem[0])
+	if fp.isUNC(head) {
+		return p
+	}
+	// head + tail == UNC, but joining two non-UNC paths should not result
+	// in a UNC path. Undo creation of UNC path.
+	tail := fp.clean(strings.Join(elem[1:], string(fp.pathSeparator)))
+	if head[len(head)-1] == fp.pathSeparator {
+		return head + tail
+	}
+	return head + string(fp.pathSeparator) + tail
+}
+
+// isUNC reports whether path is a UNC path.
+func (fp goFilepath) isUNC(path string) bool {
+	return fp.volumeNameLen(path) > 2
+}
+
+// Rel returns a relative path that is lexically equivalent to targpath when
+// joined to basepath with an intervening separator. That is,
+// Join(basepath, Rel(basepath, targpath)) is equivalent to targpath itself.
+// On success, the returned path will always be relative to basepath,
+// even if basepath and targpath share no elements.
+// An error is returned if targpath can't be made relative to basepath or if
+// knowing the current working directory would be necessary to compute it.
+// Rel calls Clean on the result.
+func (fp goFilepath) rel(basepath, targpath string) (string, error) {
+	baseVol := fp.volumeName(basepath)
+	targVol := fp.volumeName(targpath)
+	base := fp.clean(basepath)
+	targ := fp.clean(targpath)
+	if fp.sameWord(targ, base) {
+		return ".", nil
+	}
+	base = base[len(baseVol):]
+	targ = targ[len(targVol):]
+	if base == "." {
+		base = ""
+	}
+	// Can't use IsAbs - `\a` and `a` are both relative in Windows.
+	baseSlashed := len(base) > 0 && base[0] == fp.pathSeparator
+	targSlashed := len(targ) > 0 && targ[0] == fp.pathSeparator
+	if baseSlashed != targSlashed || !fp.sameWord(baseVol, targVol) {
+		return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
+	}
+	// Position base[b0:bi] and targ[t0:ti] at the first differing elements.
+	bl := len(base)
+	tl := len(targ)
+	var b0, bi, t0, ti int
+	for {
+		for bi < bl && base[bi] != fp.pathSeparator {
+			bi++
+		}
+		for ti < tl && targ[ti] != fp.pathSeparator {
+			ti++
+		}
+		if !fp.sameWord(targ[t0:ti], base[b0:bi]) {
+			break
+		}
+		if bi < bl {
+			bi++
+		}
+		if ti < tl {
+			ti++
+		}
+		b0 = bi
+		t0 = ti
+	}
+	if base[b0:bi] == ".." {
+		return "", errors.New("Rel: can't make " + targpath + " relative to " + basepath)
+	}
+	if b0 != bl {
+		// Base elements left. Must go up before going down.
+		seps := strings.Count(base[b0:bl], string(fp.pathSeparator))
+		size := 2 + seps*3
+		if tl != t0 {
+			size += 1 + tl - t0
+		}
+		buf := make([]byte, size)
+		n := copy(buf, "..")
+		for i := 0; i < seps; i++ {
+			buf[n] = fp.pathSeparator
+			copy(buf[n+1:], "..")
+			n += 3
+		}
+		if t0 != tl {
+			buf[n] = fp.pathSeparator
+			copy(buf[n+1:], targ[t0:])
+		}
+		return string(buf), nil
+	}
+	return targ[t0:], nil
+}
+
+func (fp goFilepath) sameWord(a, b string) bool {
+	if !fp.isWindows {
+		return a == b
+	}
+	return strings.EqualFold(a, b)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/fs.go b/source/vendor/github.com/evanw/esbuild/internal/fs/fs.go
new file mode 100644
index 0000000..ccfcc6c
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/fs.go
@@ -0,0 +1,287 @@
+package fs
+
+// Most of esbuild's internals use this file system abstraction instead of
+// using native file system APIs. This lets us easily mock the file system
+// for tests and also implement Yarn's virtual ".zip" file system overlay.
+
+import (
+	"errors"
+	"os"
+	"sort"
+	"strings"
+	"sync"
+	"syscall"
+)
+
+type EntryKind uint8
+
+const (
+	DirEntry  EntryKind = 1
+	FileEntry EntryKind = 2
+)
+
+type Entry struct {
+	symlink  string
+	dir      string
+	base     string
+	mutex    sync.Mutex
+	kind     EntryKind
+	needStat bool
+}
+
+func (e *Entry) Kind(fs FS) EntryKind {
+	e.mutex.Lock()
+	defer e.mutex.Unlock()
+	if e.needStat {
+		e.needStat = false
+		e.symlink, e.kind = fs.kind(e.dir, e.base)
+	}
+	return e.kind
+}
+
+func (e *Entry) Symlink(fs FS) string {
+	e.mutex.Lock()
+	defer e.mutex.Unlock()
+	if e.needStat {
+		e.needStat = false
+		e.symlink, e.kind = fs.kind(e.dir, e.base)
+	}
+	return e.symlink
+}
+
+type accessedEntries struct {
+	wasPresent map[string]bool
+
+	// If this is nil, "SortedKeys()" was not accessed. This means we should
+	// check for whether this directory has changed or not by seeing if any of
+	// the entries in the "wasPresent" map have changed in "present or not"
+	// status, since the only access was to individual entries via "Get()".
+	//
+	// If this is non-nil, "SortedKeys()" was accessed. This means we should
+	// check for whether this directory has changed or not by checking the
+	// "allEntries" array for equality with the existing entries list, since the
+	// code asked for all entries and may have used the presence or absence of
+	// entries in that list.
+	//
+	// The goal of having these two checks is to be as narrow as possible to
+	// avoid unnecessary rebuilds. If only "Get()" is called on a few entries,
+	// then we won't invalidate the build if random unrelated entries are added
+	// or removed. But if "SortedKeys()" is called, we need to invalidate the
+	// build if anything about the set of entries in this directory is changed.
+	allEntries []string
+
+	mutex sync.Mutex
+}
+
+type DirEntries struct {
+	data            map[string]*Entry
+	accessedEntries *accessedEntries
+	dir             string
+}
+
+func MakeEmptyDirEntries(dir string) DirEntries {
+	return DirEntries{dir: dir, data: make(map[string]*Entry)}
+}
+
+type DifferentCase struct {
+	Dir    string
+	Query  string
+	Actual string
+}
+
+func (entries DirEntries) Get(query string) (*Entry, *DifferentCase) {
+	if entries.data != nil {
+		key := strings.ToLower(query)
+		entry := entries.data[key]
+
+		// Track whether this specific entry was present or absent for watch mode
+		if accessed := entries.accessedEntries; accessed != nil {
+			accessed.mutex.Lock()
+			accessed.wasPresent[key] = entry != nil
+			accessed.mutex.Unlock()
+		}
+
+		if entry != nil {
+			if entry.base != query {
+				return entry, &DifferentCase{
+					Dir:    entries.dir,
+					Query:  query,
+					Actual: entry.base,
+				}
+			}
+			return entry, nil
+		}
+	}
+
+	return nil, nil
+}
+
+// This function lets you "peek" at the number of entries without watch mode
+// considering the number of entries as having been observed. This is used when
+// generating debug log messages to log the number of entries without causing
+// watch mode to rebuild when the number of entries has been changed.
+func (entries DirEntries) PeekEntryCount() int {
+	if entries.data != nil {
+		return len(entries.data)
+	}
+	return 0
+}
+
+func (entries DirEntries) SortedKeys() (keys []string) {
+	if entries.data != nil {
+		keys = make([]string, 0, len(entries.data))
+		for _, entry := range entries.data {
+			keys = append(keys, entry.base)
+		}
+		sort.Strings(keys)
+
+		// Track the exact set of all entries for watch mode
+		if entries.accessedEntries != nil {
+			entries.accessedEntries.mutex.Lock()
+			entries.accessedEntries.allEntries = keys
+			entries.accessedEntries.mutex.Unlock()
+		}
+
+		return keys
+	}
+
+	return
+}
+
+type OpenedFile interface {
+	Len() int
+	Read(start int, end int) ([]byte, error)
+	Close() error
+}
+
+type InMemoryOpenedFile struct {
+	Contents []byte
+}
+
+func (f *InMemoryOpenedFile) Len() int {
+	return len(f.Contents)
+}
+
+func (f *InMemoryOpenedFile) Read(start int, end int) ([]byte, error) {
+	return []byte(f.Contents[start:end]), nil
+}
+
+func (f *InMemoryOpenedFile) Close() error {
+	return nil
+}
+
+type FS interface {
+	// The returned map is immutable and is cached across invocations. Do not
+	// mutate it.
+	ReadDirectory(path string) (entries DirEntries, canonicalError error, originalError error)
+	ReadFile(path string) (contents string, canonicalError error, originalError error)
+	OpenFile(path string) (result OpenedFile, canonicalError error, originalError error)
+
+	// This is a key made from the information returned by "stat". It is intended
+	// to be different if the file has been edited, and to otherwise be equal if
+	// the file has not been edited. It should usually work, but no guarantees.
+	//
+	// See https://apenwarr.ca/log/20181113 for more information about why this
+	// can be broken. For example, writing to a file with mmap on WSL on Windows
+	// won't change this key. Hopefully this isn't too much of an issue.
+	//
+	// Additional reading:
+	// - https://github.com/npm/npm/pull/20027
+	// - https://github.com/golang/go/commit/7dea509703eb5ad66a35628b12a678110fbb1f72
+	ModKey(path string) (ModKey, error)
+
+	// This is part of the interface because the mock interface used for tests
+	// should not depend on file system behavior (i.e. different slashes for
+	// Windows) while the real interface should.
+	IsAbs(path string) bool
+	Abs(path string) (string, bool)
+	Dir(path string) string
+	Base(path string) string
+	Ext(path string) string
+	Join(parts ...string) string
+	Cwd() string
+	Rel(base string, target string) (string, bool)
+	EvalSymlinks(path string) (string, bool)
+
+	// This is used in the implementation of "Entry"
+	kind(dir string, base string) (symlink string, kind EntryKind)
+
+	// This is a set of all files used and all directories checked. The build
+	// must be invalidated if any of these watched files change.
+	WatchData() WatchData
+}
+
+type WatchData struct {
+	// These functions return a non-empty path as a string if the file system
+	// entry has been modified. For files, the returned path is the same as the
+	// file path. For directories, the returned path is either the directory
+	// itself or a file in the directory that was changed.
+	Paths map[string]func() string
+}
+
+type ModKey struct {
+	// What gets filled in here is OS-dependent
+	inode      uint64
+	size       int64
+	mtime_sec  int64
+	mtime_nsec int64
+	mode       uint32
+	uid        uint32
+}
+
+// Some file systems have a time resolution of only a few seconds. If a mtime
+// value is too new, we won't be able to tell if it has been recently modified
+// or not. So we only use mtimes for comparison if they are sufficiently old.
+// Apparently the FAT file system has a resolution of two seconds according to
+// this article: https://en.wikipedia.org/wiki/Stat_(system_call).
+const modKeySafetyGap = 3 // In seconds
+var modKeyUnusable = errors.New("The modification key is unusable")
+
+// Limit the number of files open simultaneously to avoid ulimit issues
+var fileOpenLimit = make(chan bool, 32)
+
+func BeforeFileOpen() {
+	// This will block if the number of open files is already at the limit
+	fileOpenLimit <- false
+}
+
+func AfterFileClose() {
+	<-fileOpenLimit
+}
+
+// This is a fork of "os.MkdirAll" to work around bugs with the WebAssembly
+// build target. More information here: https://github.com/golang/go/issues/43768.
+func MkdirAll(fs FS, path string, perm os.FileMode) error {
+	// Run "Join" once to run "Clean" on the path, which removes trailing slashes
+	return mkdirAll(fs, fs.Join(path), perm)
+}
+
+func mkdirAll(fs FS, path string, perm os.FileMode) error {
+	// Fast path: if we can tell whether path is a directory or file, stop with success or error.
+	if dir, err := os.Stat(path); err == nil {
+		if dir.IsDir() {
+			return nil
+		}
+		return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+	}
+
+	// Slow path: make sure parent exists and then call Mkdir for path.
+	if parent := fs.Dir(path); parent != path {
+		// Create parent.
+		if err := mkdirAll(fs, parent, perm); err != nil {
+			return err
+		}
+	}
+
+	// Parent now exists; invoke Mkdir and use its result.
+	if err := os.Mkdir(path, perm); err != nil {
+		// Handle arguments like "foo/." by
+		// double-checking that directory doesn't exist.
+		dir, err1 := os.Lstat(path)
+		if err1 == nil && dir.IsDir() {
+			return nil
+		}
+		return err
+	}
+	return nil
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/fs_mock.go b/source/vendor/github.com/evanw/esbuild/internal/fs/fs_mock.go
new file mode 100644
index 0000000..8626b59
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/fs_mock.go
@@ -0,0 +1,294 @@
+package fs
+
+// This is a mock implementation of the "fs" module for use with tests. It does
+// not actually read from the file system. Instead, it reads from a pre-specified
+// map of file paths to files.
+
+import (
+	"errors"
+	"path"
+	"strings"
+	"syscall"
+)
+
+type MockKind uint8
+
+const (
+	MockUnix MockKind = iota
+	MockWindows
+)
+
+type mockFS struct {
+	dirs          map[string]DirEntries
+	files         map[string]string
+	absWorkingDir string
+	Kind          MockKind
+}
+
+func MockFS(input map[string]string, kind MockKind, absWorkingDir string) FS {
+	dirs := make(map[string]DirEntries)
+	files := make(map[string]string)
+
+	for k, v := range input {
+		key := k
+		if kind == MockWindows {
+			key = "C:" + strings.ReplaceAll(key, "/", "\\")
+		}
+		files[key] = v
+		original := k
+
+		// Build the directory map
+		for {
+			kDir := path.Dir(k)
+			key := kDir
+			if kind == MockWindows {
+				key = "C:" + strings.ReplaceAll(key, "/", "\\")
+			}
+			dir, ok := dirs[key]
+			if !ok {
+				dir = DirEntries{dir: key, data: make(map[string]*Entry)}
+				dirs[key] = dir
+			}
+			if kDir == k {
+				break
+			}
+			base := path.Base(k)
+			if k == original {
+				dir.data[strings.ToLower(base)] = &Entry{kind: FileEntry, base: base}
+			} else {
+				dir.data[strings.ToLower(base)] = &Entry{kind: DirEntry, base: base}
+			}
+			k = kDir
+		}
+	}
+
+	return &mockFS{dirs, files, absWorkingDir, kind}
+}
+
+func (fs *mockFS) ReadDirectory(path string) (DirEntries, error, error) {
+	if fs.Kind == MockWindows {
+		path = strings.ReplaceAll(path, "/", "\\")
+	}
+
+	var slash byte = '/'
+	if fs.Kind == MockWindows {
+		slash = '\\'
+	}
+
+	// Trim trailing slashes before lookup
+	firstSlash := strings.IndexByte(path, slash)
+	for {
+		i := strings.LastIndexByte(path, slash)
+		if i != len(path)-1 || i <= firstSlash {
+			break
+		}
+		path = path[:i]
+	}
+
+	if dir, ok := fs.dirs[path]; ok {
+		return dir, nil, nil
+	}
+	return DirEntries{}, syscall.ENOENT, syscall.ENOENT
+}
+
+func (fs *mockFS) ReadFile(path string) (string, error, error) {
+	if fs.Kind == MockWindows {
+		path = strings.ReplaceAll(path, "/", "\\")
+	}
+	if contents, ok := fs.files[path]; ok {
+		return contents, nil, nil
+	}
+	return "", syscall.ENOENT, syscall.ENOENT
+}
+
+func (fs *mockFS) OpenFile(path string) (OpenedFile, error, error) {
+	if fs.Kind == MockWindows {
+		path = strings.ReplaceAll(path, "/", "\\")
+	}
+	if contents, ok := fs.files[path]; ok {
+		return &InMemoryOpenedFile{Contents: []byte(contents)}, nil, nil
+	}
+	return nil, syscall.ENOENT, syscall.ENOENT
+}
+
+func (fs *mockFS) ModKey(path string) (ModKey, error) {
+	return ModKey{}, errors.New("This is not available during tests")
+}
+
+func win2unix(p string) string {
+	if strings.HasPrefix(p, "C:\\") {
+		p = p[2:]
+	}
+	p = strings.ReplaceAll(p, "\\", "/")
+	return p
+}
+
+func unix2win(p string) string {
+	p = strings.ReplaceAll(p, "/", "\\")
+	if strings.HasPrefix(p, "\\") {
+		p = "C:" + p
+	}
+	return p
+}
+
+func (fs *mockFS) IsAbs(p string) bool {
+	if fs.Kind == MockWindows {
+		p = win2unix(p)
+	}
+	return path.IsAbs(p)
+}
+
+func (fs *mockFS) Abs(p string) (string, bool) {
+	if fs.Kind == MockWindows {
+		p = win2unix(p)
+	}
+
+	p = path.Clean(path.Join("/", p))
+
+	if fs.Kind == MockWindows {
+		p = unix2win(p)
+	}
+
+	return p, true
+}
+
+func (fs *mockFS) Dir(p string) string {
+	if fs.Kind == MockWindows {
+		p = win2unix(p)
+	}
+
+	p = path.Dir(p)
+
+	if fs.Kind == MockWindows {
+		p = unix2win(p)
+	}
+
+	return p
+}
+
+func (fs *mockFS) Base(p string) string {
+	if fs.Kind == MockWindows {
+		p = win2unix(p)
+	}
+
+	p = path.Base(p)
+
+	if fs.Kind == MockWindows && p == "/" {
+		p = "\\"
+	}
+
+	return p
+}
+
+func (fs *mockFS) Ext(p string) string {
+	if fs.Kind == MockWindows {
+		p = win2unix(p)
+	}
+
+	return path.Ext(p)
+}
+
+func (fs *mockFS) Join(parts ...string) string {
+	if fs.Kind == MockWindows {
+		converted := make([]string, len(parts))
+		for i, part := range parts {
+			converted[i] = win2unix(part)
+		}
+		parts = converted
+	}
+
+	p := path.Clean(path.Join(parts...))
+
+	if fs.Kind == MockWindows {
+		p = unix2win(p)
+	}
+
+	return p
+}
+
+func (fs *mockFS) Cwd() string {
+	return fs.absWorkingDir
+}
+
+func splitOnSlash(path string) (string, string) {
+	if slash := strings.IndexByte(path, '/'); slash != -1 {
+		return path[:slash], path[slash+1:]
+	}
+	return path, ""
+}
+
+func (fs *mockFS) Rel(base string, target string) (string, bool) {
+	if fs.Kind == MockWindows {
+		base = win2unix(base)
+		target = win2unix(target)
+	}
+
+	base = path.Clean(base)
+	target = path.Clean(target)
+
+	// Go's implementation does these checks
+	if base == target {
+		return ".", true
+	}
+	if base == "." {
+		base = ""
+	}
+
+	// Go's implementation fails when this condition is false. I believe this is
+	// because of this part of the contract, from Go's documentation: "An error
+	// is returned if targpath can't be made relative to basepath or if knowing
+	// the current working directory would be necessary to compute it."
+	if (len(base) > 0 && base[0] == '/') != (len(target) > 0 && target[0] == '/') {
+		return "", false
+	}
+
+	// Find the common parent directory
+	for {
+		bHead, bTail := splitOnSlash(base)
+		tHead, tTail := splitOnSlash(target)
+		if bHead != tHead {
+			break
+		}
+		base = bTail
+		target = tTail
+	}
+
+	// Stop now if base is a subpath of target
+	if base == "" {
+		if fs.Kind == MockWindows {
+			target = unix2win(target)
+		}
+		return target, true
+	}
+
+	// Traverse up to the common parent
+	commonParent := strings.Repeat("../", strings.Count(base, "/")+1)
+
+	// Stop now if target is a subpath of base
+	if target == "" {
+		target = commonParent[:len(commonParent)-1]
+		if fs.Kind == MockWindows {
+			target = unix2win(target)
+		}
+		return target, true
+	}
+
+	// Otherwise, down to the parent
+	target = commonParent + target
+	if fs.Kind == MockWindows {
+		target = unix2win(target)
+	}
+	return target, true
+}
+
+func (fs *mockFS) EvalSymlinks(path string) (string, bool) {
+	return "", false
+}
+
+func (fs *mockFS) kind(dir string, base string) (symlink string, kind EntryKind) {
+	panic("This should never be called")
+}
+
+func (fs *mockFS) WatchData() WatchData {
+	panic("This should never be called")
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/fs_real.go b/source/vendor/github.com/evanw/esbuild/internal/fs/fs_real.go
new file mode 100644
index 0000000..412eb68
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/fs_real.go
@@ -0,0 +1,543 @@
+package fs
+
+import (
+	"fmt"
+	"io"
+	"io/ioutil"
+	"os"
+	"sort"
+	"strings"
+	"sync"
+	"syscall"
+)
+
+type realFS struct {
+	// Stores the file entries for directories we've listed before
+	entries map[string]entriesOrErr
+
+	// This stores data that will end up being returned by "WatchData()"
+	watchData map[string]privateWatchData
+
+	// When building with WebAssembly, the Go compiler doesn't correctly handle
+	// platform-specific path behavior. Hack around these bugs by compiling
+	// support for both Unix and Windows paths into all executables and switch
+	// between them at run-time instead.
+	fp goFilepath
+
+	entriesMutex sync.Mutex
+	watchMutex   sync.Mutex
+
+	// If true, do not use the "entries" cache
+	doNotCacheEntries bool
+}
+
+type entriesOrErr struct {
+	canonicalError error
+	originalError  error
+	entries        DirEntries
+}
+
+type watchState uint8
+
+const (
+	stateNone                  watchState = iota
+	stateDirHasAccessedEntries            // Compare "accessedEntries"
+	stateDirUnreadable                    // Compare directory readability
+	stateFileHasModKey                    // Compare "modKey"
+	stateFileNeedModKey                   // Need to transition to "stateFileHasModKey" or "stateFileUnusableModKey" before "WatchData()" returns
+	stateFileMissing                      // Compare file presence
+	stateFileUnusableModKey               // Compare "fileContents"
+)
+
+type privateWatchData struct {
+	accessedEntries *accessedEntries
+	fileContents    string
+	modKey          ModKey
+	state           watchState
+}
+
+type RealFSOptions struct {
+	AbsWorkingDir string
+	WantWatchData bool
+	DoNotCache    bool
+}
+
+func RealFS(options RealFSOptions) (FS, error) {
+	var fp goFilepath
+	if CheckIfWindows() {
+		fp.isWindows = true
+		fp.pathSeparator = '\\'
+	} else {
+		fp.isWindows = false
+		fp.pathSeparator = '/'
+	}
+
+	// Come up with a default working directory if one was not specified
+	fp.cwd = options.AbsWorkingDir
+	if fp.cwd == "" {
+		if cwd, err := os.Getwd(); err == nil {
+			fp.cwd = cwd
+		} else if fp.isWindows {
+			fp.cwd = "C:\\"
+		} else {
+			fp.cwd = "/"
+		}
+	} else if !fp.isAbs(fp.cwd) {
+		return nil, fmt.Errorf("The working directory %q is not an absolute path", fp.cwd)
+	}
+
+	// Resolve symlinks in the current working directory. Symlinks are resolved
+	// when input file paths are converted to absolute paths because we need to
+	// recognize an input file as unique even if it has multiple symlinks
+	// pointing to it. The build will generate relative paths from the current
+	// working directory to the absolute input file paths for error messages,
+	// so the current working directory should be processed the same way. Not
+	// doing this causes test failures with esbuild when run from inside a
+	// symlinked directory.
+	//
+	// This deliberately ignores errors due to e.g. infinite loops. If there is
+	// an error, we will just use the original working directory and likely
+	// encounter an error later anyway. And if we don't encounter an error
+	// later, then the current working directory didn't even matter and the
+	// error is unimportant.
+	if path, err := fp.evalSymlinks(fp.cwd); err == nil {
+		fp.cwd = path
+	}
+
+	// Only allocate memory for watch data if necessary
+	var watchData map[string]privateWatchData
+	if options.WantWatchData {
+		watchData = make(map[string]privateWatchData)
+	}
+
+	var result FS = &realFS{
+		entries:           make(map[string]entriesOrErr),
+		fp:                fp,
+		watchData:         watchData,
+		doNotCacheEntries: options.DoNotCache,
+	}
+
+	// Add a wrapper that lets us traverse into ".zip" files. This is what yarn
+	// uses as a package format when in yarn is in its "PnP" mode.
+	result = &zipFS{
+		inner:    result,
+		zipFiles: make(map[string]*zipFile),
+	}
+
+	return result, nil
+}
+
+func (fs *realFS) ReadDirectory(dir string) (entries DirEntries, canonicalError error, originalError error) {
+	if !fs.doNotCacheEntries {
+		// First, check the cache
+		cached, ok := func() (cached entriesOrErr, ok bool) {
+			fs.entriesMutex.Lock()
+			defer fs.entriesMutex.Unlock()
+			cached, ok = fs.entries[dir]
+			return
+		}()
+		if ok {
+			// Cache hit: stop now
+			return cached.entries, cached.canonicalError, cached.originalError
+		}
+	}
+
+	// Cache miss: read the directory entries
+	names, canonicalError, originalError := fs.readdir(dir)
+	entries = DirEntries{dir: dir, data: make(map[string]*Entry)}
+
+	// Unwrap to get the underlying error
+	if pathErr, ok := canonicalError.(*os.PathError); ok {
+		canonicalError = pathErr.Unwrap()
+	}
+
+	if canonicalError == nil {
+		for _, name := range names {
+			// Call "stat" lazily for performance. The "@material-ui/icons" package
+			// contains a directory with over 11,000 entries in it and running "stat"
+			// for each entry was a big performance issue for that package.
+			entries.data[strings.ToLower(name)] = &Entry{
+				dir:      dir,
+				base:     name,
+				needStat: true,
+			}
+		}
+	}
+
+	// Store data for watch mode
+	if fs.watchData != nil {
+		defer fs.watchMutex.Unlock()
+		fs.watchMutex.Lock()
+		state := stateDirHasAccessedEntries
+		if canonicalError != nil {
+			state = stateDirUnreadable
+		}
+		entries.accessedEntries = &accessedEntries{wasPresent: make(map[string]bool)}
+		fs.watchData[dir] = privateWatchData{
+			accessedEntries: entries.accessedEntries,
+			state:           state,
+		}
+	}
+
+	// Update the cache unconditionally. Even if the read failed, we don't want to
+	// retry again later. The directory is inaccessible so trying again is wasted.
+	if canonicalError != nil {
+		entries.data = nil
+	}
+	if !fs.doNotCacheEntries {
+		fs.entriesMutex.Lock()
+		defer fs.entriesMutex.Unlock()
+		fs.entries[dir] = entriesOrErr{
+			entries:        entries,
+			canonicalError: canonicalError,
+			originalError:  originalError,
+		}
+	}
+	return entries, canonicalError, originalError
+}
+
+func (fs *realFS) ReadFile(path string) (contents string, canonicalError error, originalError error) {
+	BeforeFileOpen()
+	defer AfterFileClose()
+	buffer, originalError := ioutil.ReadFile(path)
+	canonicalError = fs.canonicalizeError(originalError)
+
+	// Allocate the string once
+	fileContents := string(buffer)
+
+	// Store data for watch mode
+	if fs.watchData != nil {
+		defer fs.watchMutex.Unlock()
+		fs.watchMutex.Lock()
+		data, ok := fs.watchData[path]
+		if canonicalError != nil {
+			data.state = stateFileMissing
+		} else if !ok || data.state == stateDirUnreadable {
+			// Note: If "ReadDirectory" is called before "ReadFile" with this same
+			// path, then "data.state" will be "stateDirUnreadable". In that case
+			// we want to transition to "stateFileNeedModKey" because it's a file.
+			data.state = stateFileNeedModKey
+		}
+		data.fileContents = fileContents
+		fs.watchData[path] = data
+	}
+
+	return fileContents, canonicalError, originalError
+}
+
+type realOpenedFile struct {
+	handle *os.File
+	len    int
+}
+
+func (f *realOpenedFile) Len() int {
+	return f.len
+}
+
+func (f *realOpenedFile) Read(start int, end int) ([]byte, error) {
+	bytes := make([]byte, end-start)
+	remaining := bytes
+
+	_, err := f.handle.Seek(int64(start), io.SeekStart)
+	if err != nil {
+		return nil, err
+	}
+
+	for len(remaining) > 0 {
+		n, err := f.handle.Read(remaining)
+		if err != nil && n <= 0 {
+			return nil, err
+		}
+		remaining = remaining[n:]
+	}
+
+	return bytes, nil
+}
+
+func (f *realOpenedFile) Close() error {
+	return f.handle.Close()
+}
+
+func (fs *realFS) OpenFile(path string) (OpenedFile, error, error) {
+	BeforeFileOpen()
+	defer AfterFileClose()
+
+	f, err := os.Open(path)
+	if err != nil {
+		return nil, fs.canonicalizeError(err), err
+	}
+
+	info, err := f.Stat()
+	if err != nil {
+		f.Close()
+		return nil, fs.canonicalizeError(err), err
+	}
+
+	return &realOpenedFile{f, int(info.Size())}, nil, nil
+}
+
+func (fs *realFS) ModKey(path string) (ModKey, error) {
+	BeforeFileOpen()
+	defer AfterFileClose()
+	key, err := modKey(path)
+
+	// Store data for watch mode
+	if fs.watchData != nil {
+		defer fs.watchMutex.Unlock()
+		fs.watchMutex.Lock()
+		data, ok := fs.watchData[path]
+		if !ok {
+			if err == modKeyUnusable {
+				data.state = stateFileUnusableModKey
+			} else if err != nil {
+				data.state = stateFileMissing
+			} else {
+				data.state = stateFileHasModKey
+			}
+		} else if data.state == stateFileNeedModKey {
+			data.state = stateFileHasModKey
+		}
+		data.modKey = key
+		fs.watchData[path] = data
+	}
+
+	return key, err
+}
+
+func (fs *realFS) IsAbs(p string) bool {
+	return fs.fp.isAbs(p)
+}
+
+func (fs *realFS) Abs(p string) (string, bool) {
+	abs, err := fs.fp.abs(p)
+	return abs, err == nil
+}
+
+func (fs *realFS) Dir(p string) string {
+	return fs.fp.dir(p)
+}
+
+func (fs *realFS) Base(p string) string {
+	return fs.fp.base(p)
+}
+
+func (fs *realFS) Ext(p string) string {
+	return fs.fp.ext(p)
+}
+
+func (fs *realFS) Join(parts ...string) string {
+	return fs.fp.clean(fs.fp.join(parts))
+}
+
+func (fs *realFS) Cwd() string {
+	return fs.fp.cwd
+}
+
+func (fs *realFS) Rel(base string, target string) (string, bool) {
+	if rel, err := fs.fp.rel(base, target); err == nil {
+		return rel, true
+	}
+	return "", false
+}
+
+func (fs *realFS) EvalSymlinks(path string) (string, bool) {
+	if path, err := fs.fp.evalSymlinks(path); err == nil {
+		return path, true
+	}
+	return "", false
+}
+
+func (fs *realFS) readdir(dirname string) (entries []string, canonicalError error, originalError error) {
+	BeforeFileOpen()
+	defer AfterFileClose()
+	f, originalError := os.Open(dirname)
+	canonicalError = fs.canonicalizeError(originalError)
+
+	// Stop now if there was an error
+	if canonicalError != nil {
+		return nil, canonicalError, originalError
+	}
+
+	defer f.Close()
+	entries, originalError = f.Readdirnames(-1)
+	canonicalError = originalError
+
+	// Unwrap to get the underlying error
+	if syscallErr, ok := canonicalError.(*os.SyscallError); ok {
+		canonicalError = syscallErr.Unwrap()
+	}
+
+	// Don't convert ENOTDIR to ENOENT here. ENOTDIR is a legitimate error
+	// condition for Readdirnames() on non-Windows platforms.
+
+	// Go's WebAssembly implementation returns EINVAL instead of ENOTDIR if we
+	// call "readdir" on a file. Canonicalize this to ENOTDIR so esbuild's path
+	// resolution code continues traversing instead of failing with an error.
+	// https://github.com/golang/go/blob/2449bbb5e614954ce9e99c8a481ea2ee73d72d61/src/syscall/fs_js.go#L144
+	if pathErr, ok := canonicalError.(*os.PathError); ok && pathErr.Unwrap() == syscall.EINVAL {
+		canonicalError = syscall.ENOTDIR
+	}
+
+	return entries, canonicalError, originalError
+}
+
+func (fs *realFS) canonicalizeError(err error) error {
+	// Unwrap to get the underlying error
+	if pathErr, ok := err.(*os.PathError); ok {
+		err = pathErr.Unwrap()
+	}
+
+	// Windows is much more restrictive than Unix about file names. If a file name
+	// is invalid, it will return ERROR_INVALID_NAME. Treat this as ENOENT (i.e.
+	// "the file does not exist") so that the resolver continues trying to resolve
+	// the path on this failure instead of aborting with an error.
+	if fs.fp.isWindows && is_ERROR_INVALID_NAME(err) {
+		err = syscall.ENOENT
+	}
+
+	// Windows returns ENOTDIR here even though nothing we've done yet has asked
+	// for a directory. This really means ENOENT on Windows. Return ENOENT here
+	// so callers that check for ENOENT will successfully detect this file as
+	// missing.
+	if err == syscall.ENOTDIR {
+		err = syscall.ENOENT
+	}
+
+	return err
+}
+
+func (fs *realFS) kind(dir string, base string) (symlink string, kind EntryKind) {
+	entryPath := fs.fp.join([]string{dir, base})
+
+	// Use "lstat" since we want information about symbolic links
+	BeforeFileOpen()
+	defer AfterFileClose()
+	stat, err := os.Lstat(entryPath)
+	if err != nil {
+		return
+	}
+	mode := stat.Mode()
+
+	// Follow symlinks now so the cache contains the translation
+	if (mode & os.ModeSymlink) != 0 {
+		link, err := fs.fp.evalSymlinks(entryPath)
+		if err != nil {
+			return // Skip over this entry
+		}
+
+		// Re-run "lstat" on the symlink target to see if it's a file or not
+		stat2, err2 := os.Lstat(link)
+		if err2 != nil {
+			return // Skip over this entry
+		}
+		mode = stat2.Mode()
+		if (mode & os.ModeSymlink) != 0 {
+			return // This should no longer be a symlink, so this is unexpected
+		}
+		symlink = link
+	}
+
+	// We consider the entry either a directory or a file
+	if (mode & os.ModeDir) != 0 {
+		kind = DirEntry
+	} else {
+		kind = FileEntry
+	}
+	return
+}
+
+func (fs *realFS) WatchData() WatchData {
+	paths := make(map[string]func() string)
+
+	for path, data := range fs.watchData {
+		// Each closure below needs its own copy of these loop variables
+		path := path
+		data := data
+
+		// Each function should return true if the state has been changed
+		if data.state == stateFileNeedModKey {
+			key, err := modKey(path)
+			if err == modKeyUnusable {
+				data.state = stateFileUnusableModKey
+			} else if err != nil {
+				data.state = stateFileMissing
+			} else {
+				data.state = stateFileHasModKey
+				data.modKey = key
+			}
+		}
+
+		switch data.state {
+		case stateDirUnreadable:
+			paths[path] = func() string {
+				_, err, _ := fs.readdir(path)
+				if err == nil {
+					return path
+				}
+				return ""
+			}
+
+		case stateDirHasAccessedEntries:
+			paths[path] = func() string {
+				names, err, _ := fs.readdir(path)
+				if err != nil {
+					return path
+				}
+				data.accessedEntries.mutex.Lock()
+				defer data.accessedEntries.mutex.Unlock()
+				if allEntries := data.accessedEntries.allEntries; allEntries != nil {
+					// Check all entries
+					if len(names) != len(allEntries) {
+						return path
+					}
+					sort.Strings(names)
+					for i, s := range names {
+						if s != allEntries[i] {
+							return path
+						}
+					}
+				} else {
+					// Check individual entries
+					lookup := make(map[string]string, len(names))
+					for _, name := range names {
+						lookup[strings.ToLower(name)] = name
+					}
+					for name, wasPresent := range data.accessedEntries.wasPresent {
+						if originalName, isPresent := lookup[name]; wasPresent != isPresent {
+							return fs.Join(path, originalName)
+						}
+					}
+				}
+				return ""
+			}
+
+		case stateFileMissing:
+			paths[path] = func() string {
+				if info, err := os.Stat(path); err == nil && !info.IsDir() {
+					return path
+				}
+				return ""
+			}
+
+		case stateFileHasModKey:
+			paths[path] = func() string {
+				if key, err := modKey(path); err != nil || key != data.modKey {
+					return path
+				}
+				return ""
+			}
+
+		case stateFileUnusableModKey:
+			paths[path] = func() string {
+				if buffer, err := ioutil.ReadFile(path); err != nil || string(buffer) != data.fileContents {
+					return path
+				}
+				return ""
+			}
+		}
+	}
+
+	return WatchData{
+		Paths: paths,
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/fs_zip.go b/source/vendor/github.com/evanw/esbuild/internal/fs/fs_zip.go
new file mode 100644
index 0000000..58a7b8b
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/fs_zip.go
@@ -0,0 +1,405 @@
+package fs
+
+// The Yarn package manager (https://yarnpkg.com/) has a custom installation
+// strategy called "Plug'n'Play" where they install packages as zip files
+// instead of directory trees, and then modify node to treat zip files like
+// directories. This reduces package installation time because Yarn now only
+// has to copy a single file per package instead of a whole directory tree.
+// However, it introduces overhead at run-time because the virtual file system
+// is written in JavaScript.
+//
+// This file contains esbuild's implementation of the behavior that treats zip
+// files like directories. It implements the "FS" interface and wraps an inner
+// "FS" interface that treats zip files like files. That way it can run both on
+// a real file system and a mock file system.
+//
+// This file also implements another Yarn-specific behavior where certain paths
+// containing the special path segments "__virtual__" or "$$virtual" have some
+// unusual behavior. See the code below for details.
+
+import (
+	"archive/zip"
+	"io/ioutil"
+	"strconv"
+	"strings"
+	"sync"
+	"syscall"
+)
+
+type zipFS struct {
+	inner FS
+
+	zipFilesMutex sync.Mutex
+	zipFiles      map[string]*zipFile
+}
+
+type zipFile struct {
+	reader *zip.ReadCloser
+	err    error
+
+	dirs  map[string]*compressedDir
+	files map[string]*compressedFile
+	wait  sync.WaitGroup
+}
+
+type compressedDir struct {
+	entries map[string]EntryKind
+	path    string
+
+	// Compatible entries are decoded lazily
+	mutex      sync.Mutex
+	dirEntries DirEntries
+}
+
+type compressedFile struct {
+	compressed *zip.File
+
+	// The file is decompressed lazily
+	mutex    sync.Mutex
+	contents string
+	err      error
+	wasRead  bool
+}
+
+func (fs *zipFS) checkForZip(path string, kind EntryKind) (*zipFile, string) {
+	var zipPath string
+	var pathTail string
+
+	// Do a quick check for a ".zip" in the path at all
+	path = strings.ReplaceAll(path, "\\", "/")
+	if i := strings.Index(path, ".zip/"); i != -1 {
+		zipPath = path[:i+len(".zip")]
+		pathTail = path[i+len(".zip/"):]
+	} else if kind == DirEntry && strings.HasSuffix(path, ".zip") {
+		zipPath = path
+	} else {
+		return nil, ""
+	}
+
+	// If there is one, then check whether it's a file on the file system or not
+	fs.zipFilesMutex.Lock()
+	archive := fs.zipFiles[zipPath]
+	if archive != nil {
+		fs.zipFilesMutex.Unlock()
+		archive.wait.Wait()
+	} else {
+		archive = &zipFile{}
+		archive.wait.Add(1)
+		fs.zipFiles[zipPath] = archive
+		fs.zipFilesMutex.Unlock()
+		defer archive.wait.Done()
+
+		// Try reading the zip archive if it's not in the cache
+		tryToReadZipArchive(zipPath, archive)
+	}
+
+	if archive.err != nil {
+		return nil, ""
+	}
+	return archive, pathTail
+}
+
+func tryToReadZipArchive(zipPath string, archive *zipFile) {
+	reader, err := zip.OpenReader(zipPath)
+	if err != nil {
+		archive.err = err
+		return
+	}
+
+	dirs := make(map[string]*compressedDir)
+	files := make(map[string]*compressedFile)
+	seeds := []string{}
+
+	// Build an index of all files in the archive
+	for _, file := range reader.File {
+		baseName := strings.TrimSuffix(file.Name, "/")
+		dirPath := ""
+		if slash := strings.LastIndexByte(baseName, '/'); slash != -1 {
+			dirPath = baseName[:slash]
+			baseName = baseName[slash+1:]
+		}
+		if file.FileInfo().IsDir() {
+			// Handle a directory
+			lowerDir := strings.ToLower(dirPath)
+			if _, ok := dirs[lowerDir]; !ok {
+				dir := &compressedDir{
+					path:    dirPath,
+					entries: make(map[string]EntryKind),
+				}
+
+				// List the same directory both with and without the slash
+				dirs[lowerDir] = dir
+				dirs[lowerDir+"/"] = dir
+				seeds = append(seeds, lowerDir)
+			}
+		} else {
+			// Handle a file
+			files[strings.ToLower(file.Name)] = &compressedFile{compressed: file}
+			lowerDir := strings.ToLower(dirPath)
+			dir, ok := dirs[lowerDir]
+			if !ok {
+				dir = &compressedDir{
+					path:    dirPath,
+					entries: make(map[string]EntryKind),
+				}
+
+				// List the same directory both with and without the slash
+				dirs[lowerDir] = dir
+				dirs[lowerDir+"/"] = dir
+				seeds = append(seeds, lowerDir)
+			}
+			dir.entries[baseName] = FileEntry
+		}
+	}
+
+	// Populate child directories
+	for _, baseName := range seeds {
+		for baseName != "" {
+			dirPath := ""
+			if slash := strings.LastIndexByte(baseName, '/'); slash != -1 {
+				dirPath = baseName[:slash]
+				baseName = baseName[slash+1:]
+			}
+			lowerDir := strings.ToLower(dirPath)
+			dir, ok := dirs[lowerDir]
+			if !ok {
+				dir = &compressedDir{
+					path:    dirPath,
+					entries: make(map[string]EntryKind),
+				}
+
+				// List the same directory both with and without the slash
+				dirs[lowerDir] = dir
+				dirs[lowerDir+"/"] = dir
+			}
+			dir.entries[baseName] = DirEntry
+			baseName = dirPath
+		}
+	}
+
+	archive.dirs = dirs
+	archive.files = files
+	archive.reader = reader
+}
+
+func (fs *zipFS) ReadDirectory(path string) (entries DirEntries, canonicalError error, originalError error) {
+	path = mangleYarnPnPVirtualPath(path)
+
+	entries, canonicalError, originalError = fs.inner.ReadDirectory(path)
+
+	// Only continue if reading this path as a directory caused an error that's
+	// consistent with trying to read a zip file as a directory. Note that EINVAL
+	// is produced by the file system in Go's WebAssembly implementation.
+	if canonicalError != syscall.ENOENT && canonicalError != syscall.ENOTDIR && canonicalError != syscall.EINVAL {
+		return
+	}
+
+	// If the directory doesn't exist, try reading from an enclosing zip archive
+	zip, pathTail := fs.checkForZip(path, DirEntry)
+	if zip == nil {
+		return
+	}
+
+	// Does the zip archive have this directory?
+	dir, ok := zip.dirs[strings.ToLower(pathTail)]
+	if !ok {
+		return DirEntries{}, syscall.ENOENT, syscall.ENOENT
+	}
+
+	// Check whether it has already been converted
+	dir.mutex.Lock()
+	defer dir.mutex.Unlock()
+	if dir.dirEntries.data != nil {
+		return dir.dirEntries, nil, nil
+	}
+
+	// Otherwise, fill in the entries
+	dir.dirEntries = DirEntries{dir: path, data: make(map[string]*Entry, len(dir.entries))}
+	for name, kind := range dir.entries {
+		dir.dirEntries.data[strings.ToLower(name)] = &Entry{
+			dir:  path,
+			base: name,
+			kind: kind,
+		}
+	}
+
+	return dir.dirEntries, nil, nil
+}
+
+func (fs *zipFS) ReadFile(path string) (contents string, canonicalError error, originalError error) {
+	path = mangleYarnPnPVirtualPath(path)
+
+	contents, canonicalError, originalError = fs.inner.ReadFile(path)
+	if canonicalError != syscall.ENOENT {
+		return
+	}
+
+	// If the file doesn't exist, try reading from an enclosing zip archive
+	zip, pathTail := fs.checkForZip(path, FileEntry)
+	if zip == nil {
+		return
+	}
+
+	// Does the zip archive have this file?
+	file, ok := zip.files[strings.ToLower(pathTail)]
+	if !ok {
+		return "", syscall.ENOENT, syscall.ENOENT
+	}
+
+	// Check whether it has already been read
+	file.mutex.Lock()
+	defer file.mutex.Unlock()
+	if file.wasRead {
+		return file.contents, file.err, file.err
+	}
+	file.wasRead = true
+
+	// If not, try to open it
+	reader, err := file.compressed.Open()
+	if err != nil {
+		file.err = err
+		return "", err, err
+	}
+	defer reader.Close()
+
+	// Then try to read it
+	bytes, err := ioutil.ReadAll(reader)
+	if err != nil {
+		file.err = err
+		return "", err, err
+	}
+
+	file.contents = string(bytes)
+	return file.contents, nil, nil
+}
+
+func (fs *zipFS) OpenFile(path string) (result OpenedFile, canonicalError error, originalError error) {
+	path = mangleYarnPnPVirtualPath(path)
+
+	result, canonicalError, originalError = fs.inner.OpenFile(path)
+	return
+}
+
+func (fs *zipFS) ModKey(path string) (modKey ModKey, err error) {
+	path = mangleYarnPnPVirtualPath(path)
+
+	modKey, err = fs.inner.ModKey(path)
+	return
+}
+
+func (fs *zipFS) IsAbs(path string) bool {
+	return fs.inner.IsAbs(path)
+}
+
+func (fs *zipFS) Abs(path string) (string, bool) {
+	return fs.inner.Abs(path)
+}
+
+func (fs *zipFS) Dir(path string) string {
+	if prefix, suffix, ok := ParseYarnPnPVirtualPath(path); ok && suffix == "" {
+		return prefix
+	}
+	return fs.inner.Dir(path)
+}
+
+func (fs *zipFS) Base(path string) string {
+	return fs.inner.Base(path)
+}
+
+func (fs *zipFS) Ext(path string) string {
+	return fs.inner.Ext(path)
+}
+
+func (fs *zipFS) Join(parts ...string) string {
+	return fs.inner.Join(parts...)
+}
+
+func (fs *zipFS) Cwd() string {
+	return fs.inner.Cwd()
+}
+
+func (fs *zipFS) Rel(base string, target string) (string, bool) {
+	return fs.inner.Rel(base, target)
+}
+
+func (fs *zipFS) EvalSymlinks(path string) (string, bool) {
+	return fs.inner.EvalSymlinks(path)
+}
+
+func (fs *zipFS) kind(dir string, base string) (symlink string, kind EntryKind) {
+	return fs.inner.kind(dir, base)
+}
+
+func (fs *zipFS) WatchData() WatchData {
+	return fs.inner.WatchData()
+}
+
+func ParseYarnPnPVirtualPath(path string) (string, string, bool) {
+	i := 0
+
+	for {
+		start := i
+		slash := strings.IndexAny(path[i:], "/\\")
+		if slash == -1 {
+			break
+		}
+		i += slash + 1
+
+		// Replace the segments "__virtual__/<segment>/<n>" with N times the ".."
+		// operation. Note: The "__virtual__" folder name appeared with Yarn 3.0.
+		// Earlier releases used "$$virtual", but it was changed after discovering
+		// that this pattern triggered bugs in software where paths were used as
+		// either regexps or replacement. For example, "$$" found in the second
+		// parameter of "String.prototype.replace" silently turned into "$".
+		if segment := path[start : i-1]; segment == "__virtual__" || segment == "$$virtual" {
+			if slash := strings.IndexAny(path[i:], "/\\"); slash != -1 {
+				var count string
+				var suffix string
+				j := i + slash + 1
+
+				// Find the range of the count
+				if slash := strings.IndexAny(path[j:], "/\\"); slash != -1 {
+					count = path[j : j+slash]
+					suffix = path[j+slash:]
+				} else {
+					count = path[j:]
+				}
+
+				// Parse the count
+				if n, err := strconv.ParseInt(count, 10, 64); err == nil {
+					prefix := path[:start]
+
+					// Apply N times the ".." operator
+					for n > 0 && (strings.HasSuffix(prefix, "/") || strings.HasSuffix(prefix, "\\")) {
+						slash := strings.LastIndexAny(prefix[:len(prefix)-1], "/\\")
+						if slash == -1 {
+							break
+						}
+						prefix = prefix[:slash+1]
+						n--
+					}
+
+					// Make sure the prefix and suffix work well when joined together
+					if suffix == "" && strings.IndexAny(prefix, "/\\") != strings.LastIndexAny(prefix, "/\\") {
+						prefix = prefix[:len(prefix)-1]
+					} else if prefix == "" {
+						prefix = "."
+					} else if strings.HasPrefix(suffix, "/") || strings.HasPrefix(suffix, "\\") {
+						suffix = suffix[1:]
+					}
+
+					return prefix, suffix, true
+				}
+			}
+		}
+	}
+
+	return "", "", false
+}
+
+func mangleYarnPnPVirtualPath(path string) string {
+	if prefix, suffix, ok := ParseYarnPnPVirtualPath(path); ok {
+		return prefix + suffix
+	}
+	return path
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_other.go b/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_other.go
new file mode 100644
index 0000000..1aa2037
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_other.go
@@ -0,0 +1,9 @@
+//go:build (!js || !wasm) && !windows
+// +build !js !wasm
+// +build !windows
+
+package fs
+
+func CheckIfWindows() bool {
+	return false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_wasm.go b/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_wasm.go
new file mode 100644
index 0000000..b44a60e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_wasm.go
@@ -0,0 +1,25 @@
+//go:build js && wasm
+// +build js,wasm
+
+package fs
+
+import (
+	"os"
+)
+
+var checkedIfWindows bool
+var cachedIfWindows bool
+
+func CheckIfWindows() bool {
+	if !checkedIfWindows {
+		checkedIfWindows = true
+
+		// Hack: Assume that we're on Windows if we're running WebAssembly and
+		// the "C:\\" directory exists. This is a workaround for a bug in Go's
+		// WebAssembly support: https://github.com/golang/go/issues/43768.
+		_, err := os.Stat("C:\\")
+		cachedIfWindows = err == nil
+	}
+
+	return cachedIfWindows
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_windows.go b/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_windows.go
new file mode 100644
index 0000000..6f0128d
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/iswin_windows.go
@@ -0,0 +1,8 @@
+//go:build windows
+// +build windows
+
+package fs
+
+func CheckIfWindows() bool {
+	return true
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/modkey_other.go b/source/vendor/github.com/evanw/esbuild/internal/fs/modkey_other.go
new file mode 100644
index 0000000..4999e80
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/modkey_other.go
@@ -0,0 +1,35 @@
+//go:build !darwin && !freebsd && !linux
+// +build !darwin,!freebsd,!linux
+
+package fs
+
+import (
+	"os"
+	"time"
+)
+
+var zeroTime time.Time
+
+func modKey(path string) (ModKey, error) {
+	info, err := os.Stat(path)
+	if err != nil {
+		return ModKey{}, err
+	}
+
+	// We can't detect changes if the file system zeros out the modification time
+	mtime := info.ModTime()
+	if mtime == zeroTime || mtime.Unix() == 0 {
+		return ModKey{}, modKeyUnusable
+	}
+
+	// Don't generate a modification key if the file is too new
+	if mtime.Add(modKeySafetyGap * time.Second).After(time.Now()) {
+		return ModKey{}, modKeyUnusable
+	}
+
+	return ModKey{
+		size:      info.Size(),
+		mtime_sec: mtime.Unix(),
+		mode:      uint32(info.Mode()),
+	}, nil
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/fs/modkey_unix.go b/source/vendor/github.com/evanw/esbuild/internal/fs/modkey_unix.go
new file mode 100644
index 0000000..c7c7f9c
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/fs/modkey_unix.go
@@ -0,0 +1,41 @@
+//go:build darwin || freebsd || linux
+// +build darwin freebsd linux
+
+package fs
+
+import (
+	"time"
+
+	"golang.org/x/sys/unix"
+)
+
+func modKey(path string) (ModKey, error) {
+	stat := unix.Stat_t{}
+	if err := unix.Stat(path, &stat); err != nil {
+		return ModKey{}, err
+	}
+
+	// We can't detect changes if the file system zeros out the modification time
+	if stat.Mtim.Sec == 0 && stat.Mtim.Nsec == 0 {
+		return ModKey{}, modKeyUnusable
+	}
+
+	// Don't generate a modification key if the file is too new
+	now, err := unix.TimeToTimespec(time.Now())
+	if err != nil {
+		return ModKey{}, err
+	}
+	mtimeSec := stat.Mtim.Sec + modKeySafetyGap
+	if mtimeSec > now.Sec || (mtimeSec == now.Sec && stat.Mtim.Nsec > now.Nsec) {
+		return ModKey{}, modKeyUnusable
+	}
+
+	return ModKey{
+		inode:      stat.Ino,
+		size:       stat.Size,
+		mtime_sec:  int64(stat.Mtim.Sec),
+		mtime_nsec: int64(stat.Mtim.Nsec),
+		mode:       uint32(stat.Mode),
+		uid:        stat.Uid,
+	}, nil
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/graph/graph.go b/source/vendor/github.com/evanw/esbuild/internal/graph/graph.go
new file mode 100644
index 0000000..a030797
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/graph/graph.go
@@ -0,0 +1,431 @@
+package graph
+
+// This graph represents the set of files that the linker operates on. Each
+// linker has a separate one of these graphs (there is one linker when code
+// splitting is on, but one linker per entry point when code splitting is off).
+//
+// The input data to the linker constructor must be considered immutable because
+// it's shared between linker invocations and is also stored in the cache for
+// incremental builds.
+//
+// The linker constructor makes a shallow clone of the input data and is careful
+// to pre-clone ahead of time the AST fields that it may modify. The Go language
+// doesn't have any type system features for immutability so this has to be
+// manually enforced. Please be careful.
+
+import (
+	"sort"
+	"sync"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/runtime"
+)
+
+type entryPointKind uint8
+
+const (
+	entryPointNone entryPointKind = iota
+	entryPointUserSpecified
+	entryPointDynamicImport
+)
+
+type LinkerFile struct {
+	// This holds all entry points that can reach this file. It will be used to
+	// assign the parts in this file to a chunk.
+	EntryBits helpers.BitSet
+
+	// This is lazily-allocated because it's only needed if there are warnings
+	// logged, which should be relatively rare.
+	lazyLineColumnTracker *logger.LineColumnTracker
+
+	InputFile InputFile
+
+	// The minimum number of links in the module graph to get from an entry point
+	// to this file
+	DistanceFromEntryPoint uint32
+
+	// If "entryPointKind" is not "entryPointNone", this is the index of the
+	// corresponding entry point chunk.
+	EntryPointChunkIndex uint32
+
+	// This file is an entry point if and only if this is not "entryPointNone".
+	// Note that dynamically-imported files are allowed to also be specified by
+	// the user as top-level entry points, so some dynamically-imported files
+	// may be "entryPointUserSpecified" instead of "entryPointDynamicImport".
+	entryPointKind entryPointKind
+
+	// This is true if this file has been marked as live by the tree shaking
+	// algorithm.
+	IsLive bool
+}
+
+func (f *LinkerFile) IsEntryPoint() bool {
+	return f.entryPointKind != entryPointNone
+}
+
+func (f *LinkerFile) IsUserSpecifiedEntryPoint() bool {
+	return f.entryPointKind == entryPointUserSpecified
+}
+
+// Note: This is not guarded by a mutex. Make sure this isn't called from a
+// parallel part of the code.
+func (f *LinkerFile) LineColumnTracker() *logger.LineColumnTracker {
+	if f.lazyLineColumnTracker == nil {
+		tracker := logger.MakeLineColumnTracker(&f.InputFile.Source)
+		f.lazyLineColumnTracker = &tracker
+	}
+	return f.lazyLineColumnTracker
+}
+
+type EntryPoint struct {
+	// This may be an absolute path or a relative path. If absolute, it will
+	// eventually be turned into a relative path by computing the path relative
+	// to the "outbase" directory. Then this relative path will be joined onto
+	// the "outdir" directory to form the final output path for this entry point.
+	OutputPath string
+
+	// This is the source index of the entry point. This file must have a valid
+	// entry point kind (i.e. not "none").
+	SourceIndex uint32
+
+	// Manually specified output paths are ignored when computing the default
+	// "outbase" directory, which is computed as the lowest common ancestor of
+	// all automatically generated output paths.
+	OutputPathWasAutoGenerated bool
+}
+
+type LinkerGraph struct {
+	Files       []LinkerFile
+	entryPoints []EntryPoint
+	Symbols     ast.SymbolMap
+
+	// This is for cross-module inlining of TypeScript enum constants
+	TSEnums map[ast.Ref]map[string]js_ast.TSEnumValue
+
+	// This is for cross-module inlining of detected inlinable constants
+	ConstValues map[ast.Ref]js_ast.ConstValue
+
+	// We should avoid traversing all files in the bundle, because the linker
+	// should be able to run a linking operation on a large bundle where only
+	// a few files are needed (e.g. an incremental compilation scenario). This
+	// holds all files that could possibly be reached through the entry points.
+	// If you need to iterate over all files in the linking operation, iterate
+	// over this array. This array is also sorted in a deterministic ordering
+	// to help ensure deterministic builds (source indices are random).
+	ReachableFiles []uint32
+
+	// This maps from unstable source index to stable reachable file index. This
+	// is useful as a deterministic key for sorting if you need to sort something
+	// containing a source index (such as "ast.Ref" symbol references).
+	StableSourceIndices []uint32
+}
+
+func CloneLinkerGraph(
+	inputFiles []InputFile,
+	reachableFiles []uint32,
+	originalEntryPoints []EntryPoint,
+	codeSplitting bool,
+) LinkerGraph {
+	entryPoints := append([]EntryPoint{}, originalEntryPoints...)
+	symbols := ast.NewSymbolMap(len(inputFiles))
+	files := make([]LinkerFile, len(inputFiles))
+
+	// Mark all entry points so we don't add them again for import() expressions
+	for _, entryPoint := range entryPoints {
+		files[entryPoint.SourceIndex].entryPointKind = entryPointUserSpecified
+	}
+
+	// Clone various things since we may mutate them later. Do this in parallel
+	// for a speedup (around ~2x faster for this function in the three.js
+	// benchmark on a 6-core laptop).
+	var dynamicImportEntryPoints []uint32
+	var dynamicImportEntryPointsMutex sync.Mutex
+	waitGroup := sync.WaitGroup{}
+	waitGroup.Add(len(reachableFiles))
+	stableSourceIndices := make([]uint32, len(inputFiles))
+	for stableIndex, sourceIndex := range reachableFiles {
+		// Create a way to convert source indices to a stable ordering
+		stableSourceIndices[sourceIndex] = uint32(stableIndex)
+
+		go func(sourceIndex uint32) {
+			file := &files[sourceIndex]
+			file.InputFile = inputFiles[sourceIndex]
+
+			switch repr := file.InputFile.Repr.(type) {
+			case *JSRepr:
+				// Clone the representation
+				{
+					clone := *repr
+					repr = &clone
+					file.InputFile.Repr = repr
+				}
+
+				// Clone the symbol map
+				fileSymbols := append([]ast.Symbol{}, repr.AST.Symbols...)
+				symbols.SymbolsForSource[sourceIndex] = fileSymbols
+				repr.AST.Symbols = nil
+
+				// Clone the parts
+				repr.AST.Parts = append([]js_ast.Part{}, repr.AST.Parts...)
+				for i := range repr.AST.Parts {
+					part := &repr.AST.Parts[i]
+					clone := make(map[ast.Ref]js_ast.SymbolUse, len(part.SymbolUses))
+					for ref, uses := range part.SymbolUses {
+						clone[ref] = uses
+					}
+					part.SymbolUses = clone
+				}
+
+				// Clone the import records
+				repr.AST.ImportRecords = append([]ast.ImportRecord{}, repr.AST.ImportRecords...)
+
+				// Add dynamic imports as additional entry points if code splitting is active
+				if codeSplitting {
+					for importRecordIndex := range repr.AST.ImportRecords {
+						if record := &repr.AST.ImportRecords[importRecordIndex]; record.SourceIndex.IsValid() && record.Kind == ast.ImportDynamic {
+							dynamicImportEntryPointsMutex.Lock()
+							dynamicImportEntryPoints = append(dynamicImportEntryPoints, record.SourceIndex.GetIndex())
+							dynamicImportEntryPointsMutex.Unlock()
+
+							// Remove import assertions for dynamic imports of additional
+							// entry points so that they don't mess with the run-time behavior.
+							// For example, "import('./foo.json', { assert: { type: 'json' } })"
+							// will likely be converted into an import of a JavaScript file and
+							// leaving the import assertion there will prevent it from working.
+							record.AssertOrWith = nil
+						}
+					}
+				}
+
+				// Clone the import map
+				namedImports := make(map[ast.Ref]js_ast.NamedImport, len(repr.AST.NamedImports))
+				for k, v := range repr.AST.NamedImports {
+					namedImports[k] = v
+				}
+				repr.AST.NamedImports = namedImports
+
+				// Clone the export map
+				resolvedExports := make(map[string]ExportData)
+				for alias, name := range repr.AST.NamedExports {
+					resolvedExports[alias] = ExportData{
+						Ref:         name.Ref,
+						SourceIndex: sourceIndex,
+						NameLoc:     name.AliasLoc,
+					}
+				}
+
+				// Clone the top-level scope so we can generate more variables
+				{
+					new := &js_ast.Scope{}
+					*new = *repr.AST.ModuleScope
+					new.Generated = append([]ast.Ref{}, new.Generated...)
+					repr.AST.ModuleScope = new
+				}
+
+				// Also associate some default metadata with the file
+				repr.Meta.ResolvedExports = resolvedExports
+				repr.Meta.IsProbablyTypeScriptType = make(map[ast.Ref]bool)
+				repr.Meta.ImportsToBind = make(map[ast.Ref]ImportData)
+
+			case *CSSRepr:
+				// Clone the representation
+				{
+					clone := *repr
+					repr = &clone
+					file.InputFile.Repr = repr
+				}
+
+				// Clone the symbol map
+				fileSymbols := append([]ast.Symbol{}, repr.AST.Symbols...)
+				symbols.SymbolsForSource[sourceIndex] = fileSymbols
+				repr.AST.Symbols = nil
+
+				// Clone the import records
+				repr.AST.ImportRecords = append([]ast.ImportRecord{}, repr.AST.ImportRecords...)
+			}
+
+			// All files start off as far as possible from an entry point
+			file.DistanceFromEntryPoint = ^uint32(0)
+			waitGroup.Done()
+		}(sourceIndex)
+	}
+	waitGroup.Wait()
+
+	// Process dynamic entry points after merging control flow again
+	stableEntryPoints := make([]int, 0, len(dynamicImportEntryPoints))
+	for _, sourceIndex := range dynamicImportEntryPoints {
+		if otherFile := &files[sourceIndex]; otherFile.entryPointKind == entryPointNone {
+			stableEntryPoints = append(stableEntryPoints, int(stableSourceIndices[sourceIndex]))
+			otherFile.entryPointKind = entryPointDynamicImport
+		}
+	}
+
+	// Make sure to add dynamic entry points in a deterministic order
+	sort.Ints(stableEntryPoints)
+	for _, stableIndex := range stableEntryPoints {
+		entryPoints = append(entryPoints, EntryPoint{SourceIndex: reachableFiles[stableIndex]})
+	}
+
+	// Do a final quick pass over all files
+	var tsEnums map[ast.Ref]map[string]js_ast.TSEnumValue
+	var constValues map[ast.Ref]js_ast.ConstValue
+	bitCount := uint(len(entryPoints))
+	for _, sourceIndex := range reachableFiles {
+		file := &files[sourceIndex]
+
+		// Allocate the entry bit set now that the number of entry points is known
+		file.EntryBits = helpers.NewBitSet(bitCount)
+
+		// Merge TypeScript enums together into one big map. There likely aren't
+		// too many enum definitions relative to the overall size of the code so
+		// it should be fine to just merge them together in serial.
+		if repr, ok := file.InputFile.Repr.(*JSRepr); ok && repr.AST.TSEnums != nil {
+			if tsEnums == nil {
+				tsEnums = make(map[ast.Ref]map[string]js_ast.TSEnumValue)
+			}
+			for ref, enum := range repr.AST.TSEnums {
+				tsEnums[ref] = enum
+			}
+		}
+
+		// Also merge const values into one big map as well
+		if repr, ok := file.InputFile.Repr.(*JSRepr); ok && repr.AST.ConstValues != nil {
+			if constValues == nil {
+				constValues = make(map[ast.Ref]js_ast.ConstValue)
+			}
+			for ref, value := range repr.AST.ConstValues {
+				constValues[ref] = value
+			}
+		}
+	}
+
+	return LinkerGraph{
+		Symbols:             symbols,
+		TSEnums:             tsEnums,
+		ConstValues:         constValues,
+		entryPoints:         entryPoints,
+		Files:               files,
+		ReachableFiles:      reachableFiles,
+		StableSourceIndices: stableSourceIndices,
+	}
+}
+
+// Prevent packages that depend on us from adding or removing entry points
+func (g *LinkerGraph) EntryPoints() []EntryPoint {
+	return g.entryPoints
+}
+
+func (g *LinkerGraph) AddPartToFile(sourceIndex uint32, part js_ast.Part) uint32 {
+	// Invariant: this map is never null
+	if part.SymbolUses == nil {
+		part.SymbolUses = make(map[ast.Ref]js_ast.SymbolUse)
+	}
+
+	repr := g.Files[sourceIndex].InputFile.Repr.(*JSRepr)
+	partIndex := uint32(len(repr.AST.Parts))
+	repr.AST.Parts = append(repr.AST.Parts, part)
+
+	// Invariant: the parts for all top-level symbols can be found in the file-level map
+	for _, declaredSymbol := range part.DeclaredSymbols {
+		if declaredSymbol.IsTopLevel {
+			// Check for an existing overlay
+			partIndices, ok := repr.Meta.TopLevelSymbolToPartsOverlay[declaredSymbol.Ref]
+
+			// If missing, initialize using the original values from the parser
+			if !ok {
+				partIndices = append(partIndices, repr.AST.TopLevelSymbolToPartsFromParser[declaredSymbol.Ref]...)
+			}
+
+			// Add this part to the overlay
+			partIndices = append(partIndices, partIndex)
+			if repr.Meta.TopLevelSymbolToPartsOverlay == nil {
+				repr.Meta.TopLevelSymbolToPartsOverlay = make(map[ast.Ref][]uint32)
+			}
+			repr.Meta.TopLevelSymbolToPartsOverlay[declaredSymbol.Ref] = partIndices
+		}
+	}
+
+	return partIndex
+}
+
+func (g *LinkerGraph) GenerateNewSymbol(sourceIndex uint32, kind ast.SymbolKind, originalName string) ast.Ref {
+	sourceSymbols := &g.Symbols.SymbolsForSource[sourceIndex]
+
+	ref := ast.Ref{
+		SourceIndex: sourceIndex,
+		InnerIndex:  uint32(len(*sourceSymbols)),
+	}
+
+	*sourceSymbols = append(*sourceSymbols, ast.Symbol{
+		Kind:         kind,
+		OriginalName: originalName,
+		Link:         ast.InvalidRef,
+	})
+
+	generated := &g.Files[sourceIndex].InputFile.Repr.(*JSRepr).AST.ModuleScope.Generated
+	*generated = append(*generated, ref)
+	return ref
+}
+
+func (g *LinkerGraph) GenerateSymbolImportAndUse(
+	sourceIndex uint32,
+	partIndex uint32,
+	ref ast.Ref,
+	useCount uint32,
+	sourceIndexToImportFrom uint32,
+) {
+	if useCount == 0 {
+		return
+	}
+
+	repr := g.Files[sourceIndex].InputFile.Repr.(*JSRepr)
+	part := &repr.AST.Parts[partIndex]
+
+	// Mark this symbol as used by this part
+	use := part.SymbolUses[ref]
+	use.CountEstimate += useCount
+	part.SymbolUses[ref] = use
+
+	// Uphold invariants about the CommonJS "exports" and "module" symbols
+	if ref == repr.AST.ExportsRef {
+		repr.AST.UsesExportsRef = true
+	}
+	if ref == repr.AST.ModuleRef {
+		repr.AST.UsesModuleRef = true
+	}
+
+	// Track that this specific symbol was imported
+	if sourceIndexToImportFrom != sourceIndex {
+		repr.Meta.ImportsToBind[ref] = ImportData{
+			SourceIndex: sourceIndexToImportFrom,
+			Ref:         ref,
+		}
+	}
+
+	// Pull in all parts that declare this symbol
+	targetRepr := g.Files[sourceIndexToImportFrom].InputFile.Repr.(*JSRepr)
+	for _, partIndex := range targetRepr.TopLevelSymbolToParts(ref) {
+		part.Dependencies = append(part.Dependencies, js_ast.Dependency{
+			SourceIndex: sourceIndexToImportFrom,
+			PartIndex:   partIndex,
+		})
+	}
+}
+
+func (g *LinkerGraph) GenerateRuntimeSymbolImportAndUse(
+	sourceIndex uint32,
+	partIndex uint32,
+	name string,
+	useCount uint32,
+) {
+	if useCount == 0 {
+		return
+	}
+
+	runtimeRepr := g.Files[runtime.SourceIndex].InputFile.Repr.(*JSRepr)
+	ref := runtimeRepr.AST.NamedExports[name].Ref
+	g.GenerateSymbolImportAndUse(sourceIndex, partIndex, ref, useCount, runtime.SourceIndex)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/graph/input.go b/source/vendor/github.com/evanw/esbuild/internal/graph/input.go
new file mode 100644
index 0000000..7faa763
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/graph/input.go
@@ -0,0 +1,127 @@
+package graph
+
+// The code in this file mainly represents data that passes from the scan phase
+// to the compile phase of the bundler. There is currently one exception: the
+// "meta" member of the JavaScript file representation. That could have been
+// stored separately but is stored together for convenience and to avoid an
+// extra level of indirection. Instead it's kept in a separate type to keep
+// things organized.
+
+import (
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/resolver"
+	"github.com/evanw/esbuild/internal/sourcemap"
+)
+
+type InputFile struct {
+	Repr           InputFileRepr
+	InputSourceMap *sourcemap.SourceMap
+
+	// If this file ends up being used in the bundle, these are additional files
+	// that must be written to the output directory. It's used by the "file" and
+	// "copy" loaders.
+	AdditionalFiles            []OutputFile
+	UniqueKeyForAdditionalFile string
+
+	SideEffects SideEffects
+	Source      logger.Source
+	Loader      config.Loader
+
+	OmitFromSourceMapsAndMetafile bool
+}
+
+type OutputFile struct {
+	// If "AbsMetadataFile" is present, this will be filled out with information
+	// about this file in JSON format. This is a partial JSON file that will be
+	// fully assembled later.
+	JSONMetadataChunk string
+
+	AbsPath      string
+	Contents     []byte
+	IsExecutable bool
+}
+
+type SideEffects struct {
+	// This is optional additional information for use in error messages
+	Data *resolver.SideEffectsData
+
+	Kind SideEffectsKind
+}
+
+type SideEffectsKind uint8
+
+const (
+	// The default value conservatively considers all files to have side effects.
+	HasSideEffects SideEffectsKind = iota
+
+	// This file was listed as not having side effects by a "package.json"
+	// file in one of our containing directories with a "sideEffects" field.
+	NoSideEffects_PackageJSON
+
+	// This file is considered to have no side effects because the AST was empty
+	// after parsing finished. This should be the case for ".d.ts" files.
+	NoSideEffects_EmptyAST
+
+	// This file was loaded using a data-oriented loader (e.g. "text") that is
+	// known to not have side effects.
+	NoSideEffects_PureData
+
+	// Same as above but it came from a plugin. We don't want to warn about
+	// unused imports to these files since running the plugin is a side effect.
+	// Removing the import would not call the plugin which is observable.
+	NoSideEffects_PureData_FromPlugin
+)
+
+type InputFileRepr interface {
+	ImportRecords() *[]ast.ImportRecord
+}
+
+type JSRepr struct {
+	Meta JSReprMeta
+	AST  js_ast.AST
+
+	// If present, this is the CSS file that this JavaScript stub corresponds to.
+	// A JavaScript stub is automatically generated for a CSS file when it's
+	// imported from a JavaScript file.
+	CSSSourceIndex ast.Index32
+}
+
+func (repr *JSRepr) ImportRecords() *[]ast.ImportRecord {
+	return &repr.AST.ImportRecords
+}
+
+func (repr *JSRepr) TopLevelSymbolToParts(ref ast.Ref) []uint32 {
+	// Overlay the mutable map from the linker
+	if parts, ok := repr.Meta.TopLevelSymbolToPartsOverlay[ref]; ok {
+		return parts
+	}
+
+	// Fall back to the immutable map from the parser
+	return repr.AST.TopLevelSymbolToPartsFromParser[ref]
+}
+
+type CSSRepr struct {
+	AST css_ast.AST
+
+	// If present, this is the JavaScript stub corresponding to this CSS file.
+	// A JavaScript stub is automatically generated for a CSS file when it's
+	// imported from a JavaScript file.
+	JSSourceIndex ast.Index32
+}
+
+func (repr *CSSRepr) ImportRecords() *[]ast.ImportRecord {
+	return &repr.AST.ImportRecords
+}
+
+type CopyRepr struct {
+	// The URL that replaces the contents of any import record paths for this file
+	URLForCode string
+}
+
+func (repr *CopyRepr) ImportRecords() *[]ast.ImportRecord {
+	return nil
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/graph/meta.go b/source/vendor/github.com/evanw/esbuild/internal/graph/meta.go
new file mode 100644
index 0000000..208056c
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/graph/meta.go
@@ -0,0 +1,205 @@
+package graph
+
+// The code in this file represents data that is required by the compile phase
+// of the bundler but that is not required by the scan phase.
+
+import (
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type WrapKind uint8
+
+const (
+	WrapNone WrapKind = iota
+
+	// The module will be bundled CommonJS-style like this:
+	//
+	//   // foo.ts
+	//   let require_foo = __commonJS((exports, module) => {
+	//     exports.foo = 123;
+	//   });
+	//
+	//   // bar.ts
+	//   let foo = flag ? require_foo() : null;
+	//
+	WrapCJS
+
+	// The module will be bundled ESM-style like this:
+	//
+	//   // foo.ts
+	//   var foo, foo_exports = {};
+	//   __export(foo_exports, {
+	//     foo: () => foo
+	//   });
+	//   let init_foo = __esm(() => {
+	//     foo = 123;
+	//   });
+	//
+	//   // bar.ts
+	//   let foo = flag ? (init_foo(), __toCommonJS(foo_exports)) : null;
+	//
+	WrapESM
+)
+
+// This contains linker-specific metadata corresponding to a "file" struct
+// from the initial scan phase of the bundler. It's separated out because it's
+// conceptually only used for a single linking operation and because multiple
+// linking operations may be happening in parallel with different metadata for
+// the same file.
+type JSReprMeta struct {
+	// This is only for TypeScript files. If an import symbol is in this map, it
+	// means the import couldn't be found and doesn't actually exist. This is not
+	// an error in TypeScript because the import is probably just a type.
+	//
+	// Normally we remove all unused imports for TypeScript files during parsing,
+	// which automatically removes type-only imports. But there are certain re-
+	// export situations where it's impossible to tell if an import is a type or
+	// not:
+	//
+	//   import {typeOrNotTypeWhoKnows} from 'path';
+	//   export {typeOrNotTypeWhoKnows};
+	//
+	// Really people should be using the TypeScript "isolatedModules" flag with
+	// bundlers like this one that compile TypeScript files independently without
+	// type checking. That causes the TypeScript type checker to emit the error
+	// "Re-exporting a type when the '--isolatedModules' flag is provided requires
+	// using 'export type'." But we try to be robust to such code anyway.
+	IsProbablyTypeScriptType map[ast.Ref]bool
+
+	// Imports are matched with exports in a separate pass from when the matched
+	// exports are actually bound to the imports. Here "binding" means adding non-
+	// local dependencies on the parts in the exporting file that declare the
+	// exported symbol to all parts in the importing file that use the imported
+	// symbol.
+	//
+	// This must be a separate pass because of the "probably TypeScript type"
+	// check above. We can't generate the part for the export namespace until
+	// we've matched imports with exports because the generated code must omit
+	// type-only imports in the export namespace code. And we can't bind exports
+	// to imports until the part for the export namespace is generated since that
+	// part needs to participate in the binding.
+	//
+	// This array holds the deferred imports to bind so the pass can be split
+	// into two separate passes.
+	ImportsToBind map[ast.Ref]ImportData
+
+	// This includes both named exports and re-exports.
+	//
+	// Named exports come from explicit export statements in the original file,
+	// and are copied from the "NamedExports" field in the AST.
+	//
+	// Re-exports come from other files and are the result of resolving export
+	// star statements (i.e. "export * from 'foo'").
+	ResolvedExports     map[string]ExportData
+	ResolvedExportStar  *ExportData
+	ResolvedExportTypos *helpers.TypoDetector
+
+	// Never iterate over "resolvedExports" directly. Instead, iterate over this
+	// array. Some exports in that map aren't meant to end up in generated code.
+	// This array excludes these exports and is also sorted, which avoids non-
+	// determinism due to random map iteration order.
+	SortedAndFilteredExportAliases []string
+
+	// This is merged on top of the corresponding map from the parser in the AST.
+	// You should call "TopLevelSymbolToParts" to access this instead of accessing
+	// it directly.
+	TopLevelSymbolToPartsOverlay map[ast.Ref][]uint32
+
+	// If this is an entry point, this array holds a reference to one free
+	// temporary symbol for each entry in "sortedAndFilteredExportAliases".
+	// These may be needed to store copies of CommonJS re-exports in ESM.
+	CJSExportCopies []ast.Ref
+
+	// The index of the automatically-generated part used to represent the
+	// CommonJS or ESM wrapper. This part is empty and is only useful for tree
+	// shaking and code splitting. The wrapper can't be inserted into the part
+	// because the wrapper contains other parts, which can't be represented by
+	// the current part system. Only wrapped files have one of these.
+	WrapperPartIndex ast.Index32
+
+	// The index of the automatically-generated part used to handle entry point
+	// specific stuff. If a certain part is needed by the entry point, it's added
+	// as a dependency of this part. This is important for parts that are marked
+	// as removable when unused and that are not used by anything else. Only
+	// entry point files have one of these.
+	EntryPointPartIndex ast.Index32
+
+	// This is true if this file is affected by top-level await, either by having
+	// a top-level await inside this file or by having an import/export statement
+	// that transitively imports such a file. It is forbidden to call "require()"
+	// on these files since they are evaluated asynchronously.
+	IsAsyncOrHasAsyncDependency bool
+
+	Wrap WrapKind
+
+	// If true, we need to insert "var exports = {};". This is the case for ESM
+	// files when the import namespace is captured via "import * as" and also
+	// when they are the target of a "require()" call.
+	NeedsExportsVariable bool
+
+	// If true, the "__export(exports, { ... })" call will be force-included even
+	// if there are no parts that reference "exports". Otherwise this call will
+	// be removed due to the tree shaking pass. This is used when for entry point
+	// files when code related to the current output format needs to reference
+	// the "exports" variable.
+	ForceIncludeExportsForEntryPoint bool
+
+	// This is set when we need to pull in the "__export" symbol in to the part
+	// at "nsExportPartIndex". This can't be done in "createExportsForFile"
+	// because of concurrent map hazards. Instead, it must be done later.
+	NeedsExportSymbolFromRuntime bool
+
+	// Wrapped files must also ensure that their dependencies are wrapped. This
+	// flag is used during the traversal that enforces this invariant, and is used
+	// to detect when the fixed point has been reached.
+	DidWrapDependencies bool
+}
+
+type ImportData struct {
+	// This is an array of intermediate statements that re-exported this symbol
+	// in a chain before getting to the final symbol. This can be done either with
+	// "export * from" or "export {} from". If this is done with "export * from"
+	// then this may not be the result of a single chain but may instead form
+	// a diamond shape if this same symbol was re-exported multiple times from
+	// different files.
+	ReExports []js_ast.Dependency
+
+	NameLoc     logger.Loc // Optional, goes with sourceIndex, ignore if zero
+	Ref         ast.Ref
+	SourceIndex uint32
+}
+
+type ExportData struct {
+	// Export star resolution happens first before import resolution. That means
+	// it cannot yet determine if duplicate names from export star resolution are
+	// ambiguous (point to different symbols) or not (point to the same symbol).
+	// This issue can happen in the following scenario:
+	//
+	//   // entry.js
+	//   export * from './a'
+	//   export * from './b'
+	//
+	//   // a.js
+	//   export * from './c'
+	//
+	//   // b.js
+	//   export {x} from './c'
+	//
+	//   // c.js
+	//   export let x = 1, y = 2
+	//
+	// In this case "entry.js" should have two exports "x" and "y", neither of
+	// which are ambiguous. To handle this case, ambiguity resolution must be
+	// deferred until import resolution time. That is done using this array.
+	PotentiallyAmbiguousExportStarRefs []ImportData
+
+	Ref ast.Ref
+
+	// This is the file that the named export above came from. This will be
+	// different from the file that contains this object if this is a re-export.
+	NameLoc     logger.Loc // Optional, goes with sourceIndex, ignore if zero
+	SourceIndex uint32
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/bitset.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/bitset.go
new file mode 100644
index 0000000..47b0c1c
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/bitset.go
@@ -0,0 +1,27 @@
+package helpers
+
+import "bytes"
+
+type BitSet struct {
+	entries []byte
+}
+
+func NewBitSet(bitCount uint) BitSet {
+	return BitSet{make([]byte, (bitCount+7)/8)}
+}
+
+func (bs BitSet) HasBit(bit uint) bool {
+	return (bs.entries[bit/8] & (1 << (bit & 7))) != 0
+}
+
+func (bs BitSet) SetBit(bit uint) {
+	bs.entries[bit/8] |= 1 << (bit & 7)
+}
+
+func (bs BitSet) Equals(other BitSet) bool {
+	return bytes.Equal(bs.entries, other.entries)
+}
+
+func (bs BitSet) String() string {
+	return string(bs.entries)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/comment.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/comment.go
new file mode 100644
index 0000000..95b8684
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/comment.go
@@ -0,0 +1,29 @@
+package helpers
+
+import (
+	"strings"
+)
+
+func EscapeClosingTag(text string, slashTag string) string {
+	if slashTag == "" {
+		return text
+	}
+	i := strings.Index(text, "</")
+	if i < 0 {
+		return text
+	}
+	var b strings.Builder
+	for {
+		b.WriteString(text[:i+1])
+		text = text[i+1:]
+		if len(text) >= len(slashTag) && strings.EqualFold(text[:len(slashTag)], slashTag) {
+			b.WriteByte('\\')
+		}
+		i = strings.Index(text, "</")
+		if i < 0 {
+			break
+		}
+	}
+	b.WriteString(text)
+	return b.String()
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/dataurl.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/dataurl.go
new file mode 100644
index 0000000..2b5004c
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/dataurl.go
@@ -0,0 +1,72 @@
+package helpers
+
+import (
+	"encoding/base64"
+	"fmt"
+	"strings"
+	"unicode/utf8"
+)
+
+// Returns the shorter of either a base64-encoded or percent-escaped data URL
+func EncodeStringAsShortestDataURL(mimeType string, text string) string {
+	encoded := base64.StdEncoding.EncodeToString([]byte(text))
+	url := fmt.Sprintf("data:%s;base64,%s", mimeType, encoded)
+	if percentURL, ok := EncodeStringAsPercentEscapedDataURL(mimeType, text); ok && len(percentURL) < len(url) {
+		return percentURL
+	}
+	return url
+}
+
+// See "scripts/dataurl-escapes.html" for how this was derived
+func EncodeStringAsPercentEscapedDataURL(mimeType string, text string) (string, bool) {
+	hex := "0123456789ABCDEF"
+	sb := strings.Builder{}
+	n := len(text)
+	i := 0
+	runStart := 0
+	sb.WriteString("data:")
+	sb.WriteString(mimeType)
+	sb.WriteByte(',')
+
+	// Scan for trailing characters that need to be escaped
+	trailingStart := n
+	for trailingStart > 0 {
+		if c := text[trailingStart-1]; c > 0x20 || c == '\t' || c == '\n' || c == '\r' {
+			break
+		}
+		trailingStart--
+	}
+
+	for i < n {
+		c, width := utf8.DecodeRuneInString(text[i:])
+
+		// We can't encode invalid UTF-8 data
+		if c == utf8.RuneError && width == 1 {
+			return "", false
+		}
+
+		// Escape this character if needed
+		if c == '\t' || c == '\n' || c == '\r' || c == '#' || i >= trailingStart ||
+			(c == '%' && i+2 < n && isHex(text[i+1]) && isHex(text[i+2])) {
+			if runStart < i {
+				sb.WriteString(text[runStart:i])
+			}
+			sb.WriteByte('%')
+			sb.WriteByte(hex[c>>4])
+			sb.WriteByte(hex[c&15])
+			runStart = i + width
+		}
+
+		i += width
+	}
+
+	if runStart < n {
+		sb.WriteString(text[runStart:])
+	}
+
+	return sb.String(), true
+}
+
+func isHex(c byte) bool {
+	return c >= '0' && c <= '9' || c >= 'a' && c <= 'f' || c >= 'A' && c <= 'F'
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/float.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/float.go
new file mode 100644
index 0000000..02b3ac9
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/float.go
@@ -0,0 +1,158 @@
+package helpers
+
+import "math"
+
+// This wraps float64 math operations. Why does this exist? The Go compiler
+// contains some optimizations to take advantage of "fused multiply and add"
+// (FMA) instructions on certain processors. These instructions lead to
+// different output on those processors, which means esbuild's output is no
+// longer deterministic across all platforms. From the Go specification itself
+// (https://go.dev/ref/spec#Floating_point_operators):
+//
+//	An implementation may combine multiple floating-point operations into a
+//	single fused operation, possibly across statements, and produce a result
+//	that differs from the value obtained by executing and rounding the
+//	instructions individually. An explicit floating-point type conversion
+//	rounds to the precision of the target type, preventing fusion that would
+//	discard that rounding.
+//
+//	For instance, some architectures provide a "fused multiply and add" (FMA)
+//	instruction that computes x*y + z without rounding the intermediate result
+//	x*y.
+//
+// Therefore we need to add explicit type conversions such as "float64(x)" to
+// prevent optimizations that break correctness. Rather than adding them on a
+// case-by-case basis as real correctness issues are discovered, we instead
+// preemptively force them to be added everywhere by using this wrapper type
+// for all floating-point math.
+type F64 struct {
+	value float64
+}
+
+func NewF64(a float64) F64 {
+	return F64{value: float64(a)}
+}
+
+func (a F64) Value() float64 {
+	return a.value
+}
+
+func (a F64) IsNaN() bool {
+	return math.IsNaN(a.value)
+}
+
+func (a F64) Neg() F64 {
+	return NewF64(-a.value)
+}
+
+func (a F64) Abs() F64 {
+	return NewF64(math.Abs(a.value))
+}
+
+func (a F64) Sin() F64 {
+	return NewF64(math.Sin(a.value))
+}
+
+func (a F64) Cos() F64 {
+	return NewF64(math.Cos(a.value))
+}
+
+func (a F64) Log2() F64 {
+	return NewF64(math.Log2(a.value))
+}
+
+func (a F64) Round() F64 {
+	return NewF64(math.Round(a.value))
+}
+
+func (a F64) Floor() F64 {
+	return NewF64(math.Floor(a.value))
+}
+
+func (a F64) Ceil() F64 {
+	return NewF64(math.Ceil(a.value))
+}
+
+func (a F64) Squared() F64 {
+	return a.Mul(a)
+}
+
+func (a F64) Cubed() F64 {
+	return a.Mul(a).Mul(a)
+}
+
+func (a F64) Sqrt() F64 {
+	return NewF64(math.Sqrt(a.value))
+}
+
+func (a F64) Cbrt() F64 {
+	return NewF64(math.Cbrt(a.value))
+}
+
+func (a F64) Add(b F64) F64 {
+	return NewF64(a.value + b.value)
+}
+
+func (a F64) AddConst(b float64) F64 {
+	return NewF64(a.value + b)
+}
+
+func (a F64) Sub(b F64) F64 {
+	return NewF64(a.value - b.value)
+}
+
+func (a F64) SubConst(b float64) F64 {
+	return NewF64(a.value - b)
+}
+
+func (a F64) Mul(b F64) F64 {
+	return NewF64(a.value * b.value)
+}
+
+func (a F64) MulConst(b float64) F64 {
+	return NewF64(a.value * b)
+}
+
+func (a F64) Div(b F64) F64 {
+	return NewF64(a.value / b.value)
+}
+
+func (a F64) DivConst(b float64) F64 {
+	return NewF64(a.value / b)
+}
+
+func (a F64) Pow(b F64) F64 {
+	return NewF64(math.Pow(a.value, b.value))
+}
+
+func (a F64) PowConst(b float64) F64 {
+	return NewF64(math.Pow(a.value, b))
+}
+
+func (a F64) Atan2(b F64) F64 {
+	return NewF64(math.Atan2(a.value, b.value))
+}
+
+func (a F64) WithSignFrom(b F64) F64 {
+	return NewF64(math.Copysign(a.value, b.value))
+}
+
+func Min2(a F64, b F64) F64 {
+	return NewF64(math.Min(a.value, b.value))
+}
+
+func Max2(a F64, b F64) F64 {
+	return NewF64(math.Max(a.value, b.value))
+}
+
+func Min3(a F64, b F64, c F64) F64 {
+	return NewF64(math.Min(math.Min(a.value, b.value), c.value))
+}
+
+func Max3(a F64, b F64, c F64) F64 {
+	return NewF64(math.Max(math.Max(a.value, b.value), c.value))
+}
+
+func Lerp(a F64, b F64, t F64) F64 {
+	return b.Sub(a).Mul(t).Add(a)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/glob.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/glob.go
new file mode 100644
index 0000000..c8ffa8b
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/glob.go
@@ -0,0 +1,58 @@
+package helpers
+
+import "strings"
+
+type GlobWildcard uint8
+
+const (
+	GlobNone GlobWildcard = iota
+	GlobAllExceptSlash
+	GlobAllIncludingSlash
+)
+
+type GlobPart struct {
+	Prefix   string
+	Wildcard GlobWildcard
+}
+
+// The returned array will always be at least one element. If there are no
+// wildcards then it will be exactly one element, and if there are wildcards
+// then it will be more than one element.
+func ParseGlobPattern(text string) (pattern []GlobPart) {
+	for {
+		star := strings.IndexByte(text, '*')
+		if star < 0 {
+			pattern = append(pattern, GlobPart{Prefix: text})
+			break
+		}
+		count := 1
+		for star+count < len(text) && text[star+count] == '*' {
+			count++
+		}
+		wildcard := GlobAllExceptSlash
+
+		// Allow both "/" and "\" as slashes
+		if count > 1 && (star == 0 || text[star-1] == '/' || text[star-1] == '\\') &&
+			(star+count == len(text) || text[star+count] == '/' || text[star+count] == '\\') {
+			wildcard = GlobAllIncludingSlash // A "globstar" path segment
+		}
+
+		pattern = append(pattern, GlobPart{Prefix: text[:star], Wildcard: wildcard})
+		text = text[star+count:]
+	}
+	return
+}
+
+func GlobPatternToString(pattern []GlobPart) string {
+	sb := strings.Builder{}
+	for _, part := range pattern {
+		sb.WriteString(part.Prefix)
+		switch part.Wildcard {
+		case GlobAllExceptSlash:
+			sb.WriteByte('*')
+		case GlobAllIncludingSlash:
+			sb.WriteString("**")
+		}
+	}
+	return sb.String()
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/hash.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/hash.go
new file mode 100644
index 0000000..d702886
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/hash.go
@@ -0,0 +1,14 @@
+package helpers
+
+// From: http://boost.sourceforge.net/doc/html/boost/hash_combine.html
+func HashCombine(seed uint32, hash uint32) uint32 {
+	return seed ^ (hash + 0x9e3779b9 + (seed << 6) + (seed >> 2))
+}
+
+func HashCombineString(seed uint32, text string) uint32 {
+	seed = HashCombine(seed, uint32(len(text)))
+	for _, c := range text {
+		seed = HashCombine(seed, uint32(c))
+	}
+	return seed
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/joiner.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/joiner.go
new file mode 100644
index 0000000..649f80a
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/joiner.go
@@ -0,0 +1,86 @@
+package helpers
+
+import (
+	"bytes"
+	"strings"
+)
+
+// This provides an efficient way to join lots of big string and byte slices
+// together. It avoids the cost of repeatedly reallocating as the buffer grows
+// by measuring exactly how big the buffer should be and then allocating once.
+// This is a measurable speedup.
+type Joiner struct {
+	strings  []joinerString
+	bytes    []joinerBytes
+	length   uint32
+	lastByte byte
+}
+
+type joinerString struct {
+	data   string
+	offset uint32
+}
+
+type joinerBytes struct {
+	data   []byte
+	offset uint32
+}
+
+func (j *Joiner) AddString(data string) {
+	if len(data) > 0 {
+		j.lastByte = data[len(data)-1]
+	}
+	j.strings = append(j.strings, joinerString{data, j.length})
+	j.length += uint32(len(data))
+}
+
+func (j *Joiner) AddBytes(data []byte) {
+	if len(data) > 0 {
+		j.lastByte = data[len(data)-1]
+	}
+	j.bytes = append(j.bytes, joinerBytes{data, j.length})
+	j.length += uint32(len(data))
+}
+
+func (j *Joiner) LastByte() byte {
+	return j.lastByte
+}
+
+func (j *Joiner) Length() uint32 {
+	return j.length
+}
+
+func (j *Joiner) EnsureNewlineAtEnd() {
+	if j.length > 0 && j.lastByte != '\n' {
+		j.AddString("\n")
+	}
+}
+
+func (j *Joiner) Done() []byte {
+	if len(j.strings) == 0 && len(j.bytes) == 1 && j.bytes[0].offset == 0 {
+		// No need to allocate if there was only a single byte array written
+		return j.bytes[0].data
+	}
+	buffer := make([]byte, j.length)
+	for _, item := range j.strings {
+		copy(buffer[item.offset:], item.data)
+	}
+	for _, item := range j.bytes {
+		copy(buffer[item.offset:], item.data)
+	}
+	return buffer
+}
+
+func (j *Joiner) Contains(s string, b []byte) bool {
+	for _, item := range j.strings {
+		if strings.Contains(item.data, s) {
+			return true
+		}
+	}
+	for _, item := range j.bytes {
+		if bytes.Contains(item.data, b) {
+			return true
+		}
+	}
+	return false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/mime.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/mime.go
new file mode 100644
index 0000000..f928a84
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/mime.go
@@ -0,0 +1,49 @@
+package helpers
+
+import "strings"
+
+var builtinTypesLower = map[string]string{
+	// Text
+	".css":      "text/css; charset=utf-8",
+	".htm":      "text/html; charset=utf-8",
+	".html":     "text/html; charset=utf-8",
+	".js":       "text/javascript; charset=utf-8",
+	".json":     "application/json; charset=utf-8",
+	".markdown": "text/markdown; charset=utf-8",
+	".md":       "text/markdown; charset=utf-8",
+	".mjs":      "text/javascript; charset=utf-8",
+	".xhtml":    "application/xhtml+xml; charset=utf-8",
+	".xml":      "text/xml; charset=utf-8",
+
+	// Images
+	".avif": "image/avif",
+	".gif":  "image/gif",
+	".jpeg": "image/jpeg",
+	".jpg":  "image/jpeg",
+	".png":  "image/png",
+	".svg":  "image/svg+xml",
+	".webp": "image/webp",
+
+	// Fonts
+	".eot":   "application/vnd.ms-fontobject",
+	".otf":   "font/otf",
+	".sfnt":  "font/sfnt",
+	".ttf":   "font/ttf",
+	".woff":  "font/woff",
+	".woff2": "font/woff2",
+
+	// Other
+	".pdf":         "application/pdf",
+	".wasm":        "application/wasm",
+	".webmanifest": "application/manifest+json",
+}
+
+// This is used instead of Go's built-in "mime.TypeByExtension" function because
+// that function is broken on Windows: https://github.com/golang/go/issues/32350.
+func MimeTypeByExtension(ext string) string {
+	contentType := builtinTypesLower[ext]
+	if contentType == "" {
+		contentType = builtinTypesLower[strings.ToLower(ext)]
+	}
+	return contentType
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/path.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/path.go
new file mode 100644
index 0000000..87e90b8
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/path.go
@@ -0,0 +1,22 @@
+package helpers
+
+import "strings"
+
+func IsInsideNodeModules(path string) bool {
+	for {
+		// This is written in a platform-independent manner because it's run on
+		// user-specified paths which can be arbitrary non-file-system things. So
+		// for example Windows paths may end up being used on Unix or URLs may end
+		// up being used on Windows. Be consistently agnostic to which kind of
+		// slash is used on all platforms.
+		slash := strings.LastIndexAny(path, "/\\")
+		if slash == -1 {
+			return false
+		}
+		dir, base := path[:slash], path[slash+1:]
+		if base == "node_modules" {
+			return true
+		}
+		path = dir
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/quote.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/quote.go
new file mode 100644
index 0000000..a505ad4
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/quote.go
@@ -0,0 +1,142 @@
+package helpers
+
+import "unicode/utf8"
+
+const hexChars = "0123456789ABCDEF"
+const firstASCII = 0x20
+const lastASCII = 0x7E
+const firstHighSurrogate = 0xD800
+const firstLowSurrogate = 0xDC00
+const lastLowSurrogate = 0xDFFF
+
+func canPrintWithoutEscape(c rune, asciiOnly bool) bool {
+	if c <= lastASCII {
+		return c >= firstASCII && c != '\\' && c != '"'
+	} else {
+		return !asciiOnly && c != '\uFEFF' && (c < firstHighSurrogate || c > lastLowSurrogate)
+	}
+}
+
+func QuoteSingle(text string, asciiOnly bool) []byte {
+	return internalQuote(text, asciiOnly, '\'')
+}
+
+func QuoteForJSON(text string, asciiOnly bool) []byte {
+	return internalQuote(text, asciiOnly, '"')
+}
+
+func internalQuote(text string, asciiOnly bool, quoteChar byte) []byte {
+	// Estimate the required length
+	lenEstimate := 2
+	for _, c := range text {
+		if canPrintWithoutEscape(c, asciiOnly) {
+			lenEstimate += utf8.RuneLen(c)
+		} else {
+			switch c {
+			case '\b', '\f', '\n', '\r', '\t', '\\':
+				lenEstimate += 2
+			case '"':
+				if quoteChar == '"' {
+					lenEstimate += 2
+				}
+			case '\'':
+				if quoteChar == '\'' {
+					lenEstimate += 2
+				}
+			default:
+				if c <= 0xFFFF {
+					lenEstimate += 6
+				} else {
+					lenEstimate += 12
+				}
+			}
+		}
+	}
+
+	// Preallocate the array
+	bytes := make([]byte, 0, lenEstimate)
+	i := 0
+	n := len(text)
+	bytes = append(bytes, quoteChar)
+
+	for i < n {
+		c, width := DecodeWTF8Rune(text[i:])
+
+		// Fast path: a run of characters that don't need escaping
+		if canPrintWithoutEscape(c, asciiOnly) {
+			start := i
+			i += width
+			for i < n {
+				c, width = DecodeWTF8Rune(text[i:])
+				if !canPrintWithoutEscape(c, asciiOnly) {
+					break
+				}
+				i += width
+			}
+			bytes = append(bytes, text[start:i]...)
+			continue
+		}
+
+		switch c {
+		case '\b':
+			bytes = append(bytes, "\\b"...)
+			i++
+
+		case '\f':
+			bytes = append(bytes, "\\f"...)
+			i++
+
+		case '\n':
+			bytes = append(bytes, "\\n"...)
+			i++
+
+		case '\r':
+			bytes = append(bytes, "\\r"...)
+			i++
+
+		case '\t':
+			bytes = append(bytes, "\\t"...)
+			i++
+
+		case '\\':
+			bytes = append(bytes, "\\\\"...)
+			i++
+
+		case '"':
+			if quoteChar == '"' {
+				bytes = append(bytes, "\\\""...)
+			} else {
+				bytes = append(bytes, '"')
+			}
+			i++
+
+		case '\'':
+			if quoteChar == '\'' {
+				bytes = append(bytes, "\\'"...)
+			} else {
+				bytes = append(bytes, '\'')
+			}
+			i++
+
+		default:
+			i += width
+			if c <= 0xFFFF {
+				bytes = append(
+					bytes,
+					'\\', 'u', hexChars[c>>12], hexChars[(c>>8)&15], hexChars[(c>>4)&15], hexChars[c&15],
+				)
+			} else {
+				c -= 0x10000
+				lo := firstHighSurrogate + ((c >> 10) & 0x3FF)
+				hi := firstLowSurrogate + (c & 0x3FF)
+				bytes = append(
+					bytes,
+					'\\', 'u', hexChars[lo>>12], hexChars[(lo>>8)&15], hexChars[(lo>>4)&15], hexChars[lo&15],
+					'\\', 'u', hexChars[hi>>12], hexChars[(hi>>8)&15], hexChars[(hi>>4)&15], hexChars[hi&15],
+				)
+			}
+		}
+	}
+
+	return append(bytes, quoteChar)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/serializer.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/serializer.go
new file mode 100644
index 0000000..9b3ae0b
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/serializer.go
@@ -0,0 +1,26 @@
+package helpers
+
+import "sync"
+
+// Each call to "Enter(i)" doesn't start until "Leave(i-1)" is called
+type Serializer struct {
+	flags []sync.WaitGroup
+}
+
+func MakeSerializer(count int) Serializer {
+	flags := make([]sync.WaitGroup, count)
+	for i := 0; i < count; i++ {
+		flags[i].Add(1)
+	}
+	return Serializer{flags: flags}
+}
+
+func (s *Serializer) Enter(i int) {
+	if i > 0 {
+		s.flags[i-1].Wait()
+	}
+}
+
+func (s *Serializer) Leave(i int) {
+	s.flags[i].Done()
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/stack.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/stack.go
new file mode 100644
index 0000000..0c2e91c
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/stack.go
@@ -0,0 +1,50 @@
+package helpers
+
+import (
+	"runtime/debug"
+	"strings"
+)
+
+func PrettyPrintedStack() string {
+	lines := strings.Split(strings.TrimSpace(string(debug.Stack())), "\n")
+
+	// Strip the first "goroutine" line
+	if len(lines) > 0 {
+		if first := lines[0]; strings.HasPrefix(first, "goroutine ") && strings.HasSuffix(first, ":") {
+			lines = lines[1:]
+		}
+	}
+
+	sb := strings.Builder{}
+
+	for _, line := range lines {
+		// Indented lines are source locations
+		if strings.HasPrefix(line, "\t") {
+			line = line[1:]
+			line = strings.TrimPrefix(line, "github.com/evanw/esbuild/")
+			if offset := strings.LastIndex(line, " +0x"); offset != -1 {
+				line = line[:offset]
+			}
+			sb.WriteString(" (")
+			sb.WriteString(line)
+			sb.WriteString(")")
+			continue
+		}
+
+		// Other lines are function calls
+		if sb.Len() > 0 {
+			sb.WriteByte('\n')
+		}
+		if strings.HasSuffix(line, ")") {
+			if paren := strings.LastIndexByte(line, '('); paren != -1 {
+				line = line[:paren]
+			}
+		}
+		if slash := strings.LastIndexByte(line, '/'); slash != -1 {
+			line = line[slash+1:]
+		}
+		sb.WriteString(line)
+	}
+
+	return sb.String()
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/strings.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/strings.go
new file mode 100644
index 0000000..e077892
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/strings.go
@@ -0,0 +1,41 @@
+package helpers
+
+import (
+	"fmt"
+	"strings"
+)
+
+func StringArraysEqual(a []string, b []string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, x := range a {
+		if x != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+func StringArrayArraysEqual(a [][]string, b [][]string) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i, x := range a {
+		if !StringArraysEqual(x, b[i]) {
+			return false
+		}
+	}
+	return true
+}
+
+func StringArrayToQuotedCommaSeparatedString(a []string) string {
+	sb := strings.Builder{}
+	for i, str := range a {
+		if i > 0 {
+			sb.WriteString(", ")
+		}
+		sb.WriteString(fmt.Sprintf("%q", str))
+	}
+	return sb.String()
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/timer.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/timer.go
new file mode 100644
index 0000000..4502b23
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/timer.go
@@ -0,0 +1,94 @@
+package helpers
+
+import (
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type Timer struct {
+	data  []timerData
+	mutex sync.Mutex
+}
+
+type timerData struct {
+	time  time.Time
+	name  string
+	isEnd bool
+}
+
+func (t *Timer) Begin(name string) {
+	if t != nil {
+		t.data = append(t.data, timerData{
+			name: name,
+			time: time.Now(),
+		})
+	}
+}
+
+func (t *Timer) End(name string) {
+	if t != nil {
+		t.data = append(t.data, timerData{
+			name:  name,
+			time:  time.Now(),
+			isEnd: true,
+		})
+	}
+}
+
+func (t *Timer) Fork() *Timer {
+	if t != nil {
+		return &Timer{}
+	}
+	return nil
+}
+
+func (t *Timer) Join(other *Timer) {
+	if t != nil && other != nil {
+		t.mutex.Lock()
+		defer t.mutex.Unlock()
+		t.data = append(t.data, other.data...)
+	}
+}
+
+func (t *Timer) Log(log logger.Log) {
+	if t == nil {
+		return
+	}
+
+	type pair struct {
+		timerData
+		index uint32
+	}
+
+	var notes []logger.MsgData
+	var stack []pair
+	indent := 0
+
+	for _, item := range t.data {
+		if !item.isEnd {
+			top := pair{timerData: item, index: uint32(len(notes))}
+			notes = append(notes, logger.MsgData{DisableMaximumWidth: true})
+			stack = append(stack, top)
+			indent++
+		} else {
+			indent--
+			last := len(stack) - 1
+			top := stack[last]
+			stack = stack[:last]
+			if item.name != top.name {
+				panic("Internal error")
+			}
+			notes[top.index].Text = fmt.Sprintf("%s%s: %dms",
+				strings.Repeat("  ", indent),
+				top.name,
+				item.time.Sub(top.time).Milliseconds())
+		}
+	}
+
+	log.AddIDWithNotes(logger.MsgID_None, logger.Info, nil, logger.Range{},
+		"Timing information (times may not nest hierarchically due to parallelism)", notes)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/typos.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/typos.go
new file mode 100644
index 0000000..deef87e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/typos.go
@@ -0,0 +1,38 @@
+package helpers
+
+import "unicode/utf8"
+
+type TypoDetector struct {
+	oneCharTypos map[string]string
+}
+
+func MakeTypoDetector(valid []string) TypoDetector {
+	detector := TypoDetector{oneCharTypos: make(map[string]string)}
+
+	// Add all combinations of each valid word with one character missing
+	for _, correct := range valid {
+		if len(correct) > 3 {
+			for i, ch := range correct {
+				detector.oneCharTypos[correct[:i]+correct[i+utf8.RuneLen(ch):]] = correct
+			}
+		}
+	}
+
+	return detector
+}
+
+func (detector TypoDetector) MaybeCorrectTypo(typo string) (string, bool) {
+	// Check for a single deleted character
+	if corrected, ok := detector.oneCharTypos[typo]; ok {
+		return corrected, true
+	}
+
+	// Check for a single misplaced character
+	for i, ch := range typo {
+		if corrected, ok := detector.oneCharTypos[typo[:i]+typo[i+utf8.RuneLen(ch):]]; ok {
+			return corrected, true
+		}
+	}
+
+	return "", false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/utf.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/utf.go
new file mode 100644
index 0000000..59b24bb
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/utf.go
@@ -0,0 +1,230 @@
+package helpers
+
+import (
+	"strings"
+	"unicode/utf8"
+)
+
+func ContainsNonBMPCodePoint(text string) bool {
+	for _, c := range text {
+		if c > 0xFFFF {
+			return true
+		}
+	}
+	return false
+}
+
+// This does "ContainsNonBMPCodePoint(UTF16ToString(text))" without any allocations
+func ContainsNonBMPCodePointUTF16(text []uint16) bool {
+	if n := len(text); n > 0 {
+		for i, c := range text[:n-1] {
+			// Check for a high surrogate
+			if c >= 0xD800 && c <= 0xDBFF {
+				// Check for a low surrogate
+				if c2 := text[i+1]; c2 >= 0xDC00 && c2 <= 0xDFFF {
+					return true
+				}
+			}
+		}
+	}
+	return false
+}
+
+func StringToUTF16(text string) []uint16 {
+	decoded := make([]uint16, 0, len(text))
+	for _, c := range text {
+		if c <= 0xFFFF {
+			decoded = append(decoded, uint16(c))
+		} else {
+			c -= 0x10000
+			decoded = append(decoded, uint16(0xD800+((c>>10)&0x3FF)), uint16(0xDC00+(c&0x3FF)))
+		}
+	}
+	return decoded
+}
+
+func UTF16ToString(text []uint16) string {
+	var temp [utf8.UTFMax]byte
+	b := strings.Builder{}
+	n := len(text)
+	for i := 0; i < n; i++ {
+		r1 := rune(text[i])
+		if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n {
+			if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
+				r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000
+				i++
+			}
+		}
+		width := encodeWTF8Rune(temp[:], r1)
+		b.Write(temp[:width])
+	}
+	return b.String()
+}
+
+func UTF16ToStringWithValidation(text []uint16) (string, uint16, bool) {
+	var temp [utf8.UTFMax]byte
+	b := strings.Builder{}
+	n := len(text)
+	for i := 0; i < n; i++ {
+		r1 := rune(text[i])
+		if r1 >= 0xD800 && r1 <= 0xDBFF {
+			if i+1 < n {
+				if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
+					r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000
+					i++
+				} else {
+					return "", uint16(r1), false
+				}
+			} else {
+				return "", uint16(r1), false
+			}
+		} else if r1 >= 0xDC00 && r1 <= 0xDFFF {
+			return "", uint16(r1), false
+		}
+		width := encodeWTF8Rune(temp[:], r1)
+		b.Write(temp[:width])
+	}
+	return b.String(), 0, true
+}
+
+// Does "UTF16ToString(text) == str" without a temporary allocation
+func UTF16EqualsString(text []uint16, str string) bool {
+	if len(text) > len(str) {
+		// Strings can't be equal if UTF-16 encoding is longer than UTF-8 encoding
+		return false
+	}
+	var temp [utf8.UTFMax]byte
+	n := len(text)
+	j := 0
+	for i := 0; i < n; i++ {
+		r1 := rune(text[i])
+		if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n {
+			if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
+				r1 = (r1-0xD800)<<10 | (r2 - 0xDC00) + 0x10000
+				i++
+			}
+		}
+		width := encodeWTF8Rune(temp[:], r1)
+		if j+width > len(str) {
+			return false
+		}
+		for k := 0; k < width; k++ {
+			if temp[k] != str[j] {
+				return false
+			}
+			j++
+		}
+	}
+	return j == len(str)
+}
+
+func UTF16EqualsUTF16(a []uint16, b []uint16) bool {
+	if len(a) == len(b) {
+		for i, c := range a {
+			if c != b[i] {
+				return false
+			}
+		}
+		return true
+	}
+	return false
+}
+
+// This is a clone of "utf8.EncodeRune" that has been modified to encode using
+// WTF-8 instead. See https://simonsapin.github.io/wtf-8/ for more info.
+func encodeWTF8Rune(p []byte, r rune) int {
+	// Negative values are erroneous. Making it unsigned addresses the problem.
+	switch i := uint32(r); {
+	case i <= 0x7F:
+		p[0] = byte(r)
+		return 1
+	case i <= 0x7FF:
+		_ = p[1] // eliminate bounds checks
+		p[0] = 0xC0 | byte(r>>6)
+		p[1] = 0x80 | byte(r)&0x3F
+		return 2
+	case i > utf8.MaxRune:
+		r = utf8.RuneError
+		fallthrough
+	case i <= 0xFFFF:
+		_ = p[2] // eliminate bounds checks
+		p[0] = 0xE0 | byte(r>>12)
+		p[1] = 0x80 | byte(r>>6)&0x3F
+		p[2] = 0x80 | byte(r)&0x3F
+		return 3
+	default:
+		_ = p[3] // eliminate bounds checks
+		p[0] = 0xF0 | byte(r>>18)
+		p[1] = 0x80 | byte(r>>12)&0x3F
+		p[2] = 0x80 | byte(r>>6)&0x3F
+		p[3] = 0x80 | byte(r)&0x3F
+		return 4
+	}
+}
+
+// This is a clone of "utf8.DecodeRuneInString" that has been modified to
+// decode using WTF-8 instead. See https://simonsapin.github.io/wtf-8/ for
+// more info.
+func DecodeWTF8Rune(s string) (rune, int) {
+	n := len(s)
+	if n < 1 {
+		return utf8.RuneError, 0
+	}
+
+	s0 := s[0]
+	if s0 < 0x80 {
+		return rune(s0), 1
+	}
+
+	var sz int
+	if (s0 & 0xE0) == 0xC0 {
+		sz = 2
+	} else if (s0 & 0xF0) == 0xE0 {
+		sz = 3
+	} else if (s0 & 0xF8) == 0xF0 {
+		sz = 4
+	} else {
+		return utf8.RuneError, 1
+	}
+
+	if n < sz {
+		return utf8.RuneError, 0
+	}
+
+	s1 := s[1]
+	if (s1 & 0xC0) != 0x80 {
+		return utf8.RuneError, 1
+	}
+
+	if sz == 2 {
+		cp := rune(s0&0x1F)<<6 | rune(s1&0x3F)
+		if cp < 0x80 {
+			return utf8.RuneError, 1
+		}
+		return cp, 2
+	}
+	s2 := s[2]
+
+	if (s2 & 0xC0) != 0x80 {
+		return utf8.RuneError, 1
+	}
+
+	if sz == 3 {
+		cp := rune(s0&0x0F)<<12 | rune(s1&0x3F)<<6 | rune(s2&0x3F)
+		if cp < 0x0800 {
+			return utf8.RuneError, 1
+		}
+		return cp, 3
+	}
+	s3 := s[3]
+
+	if (s3 & 0xC0) != 0x80 {
+		return utf8.RuneError, 1
+	}
+
+	cp := rune(s0&0x07)<<18 | rune(s1&0x3F)<<12 | rune(s2&0x3F)<<6 | rune(s3&0x3F)
+	if cp < 0x010000 || cp > 0x10FFFF {
+		return utf8.RuneError, 1
+	}
+	return cp, 4
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/helpers/waitgroup.go b/source/vendor/github.com/evanw/esbuild/internal/helpers/waitgroup.go
new file mode 100644
index 0000000..850e088
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/helpers/waitgroup.go
@@ -0,0 +1,37 @@
+package helpers
+
+import "sync/atomic"
+
+// Go's "sync.WaitGroup" is not thread-safe. Specifically it's not safe to call
+// "Add" concurrently with "Wait", which is problematic because we have a case
+// where we would like to do that.
+//
+// This is a simple alternative implementation of "sync.WaitGroup" that is
+// thread-safe and that works for our purposes. We don't need to worry about
+// multiple waiters so the implementation can be very simple.
+type ThreadSafeWaitGroup struct {
+	counter int32
+	channel chan struct{}
+}
+
+func MakeThreadSafeWaitGroup() *ThreadSafeWaitGroup {
+	return &ThreadSafeWaitGroup{
+		channel: make(chan struct{}, 1),
+	}
+}
+
+func (wg *ThreadSafeWaitGroup) Add(delta int32) {
+	if counter := atomic.AddInt32(&wg.counter, delta); counter == 0 {
+		wg.channel <- struct{}{}
+	} else if counter < 0 {
+		panic("sync: negative WaitGroup counter")
+	}
+}
+
+func (wg *ThreadSafeWaitGroup) Done() {
+	wg.Add(-1)
+}
+
+func (wg *ThreadSafeWaitGroup) Wait() {
+	<-wg.channel
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast.go b/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast.go
new file mode 100644
index 0000000..f8d3fe3
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast.go
@@ -0,0 +1,1841 @@
+package js_ast
+
+import (
+	"strconv"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+// Every module (i.e. file) is parsed into a separate AST data structure. For
+// efficiency, the parser also resolves all scopes and binds all symbols in the
+// tree.
+//
+// Identifiers in the tree are referenced by a Ref, which is a pointer into the
+// symbol table for the file. The symbol table is stored as a top-level field
+// in the AST so it can be accessed without traversing the tree. For example,
+// a renaming pass can iterate over the symbol table without touching the tree.
+//
+// Parse trees are intended to be immutable. That makes it easy to build an
+// incremental compiler with a "watch" mode that can avoid re-parsing files
+// that have already been parsed. Any passes that operate on an AST after it
+// has been parsed should create a copy of the mutated parts of the tree
+// instead of mutating the original tree.
+
+type L uint8
+
+// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Operators/Operator_Precedence
+const (
+	LLowest L = iota
+	LComma
+	LSpread
+	LYield
+	LAssign
+	LConditional
+	LNullishCoalescing
+	LLogicalOr
+	LLogicalAnd
+	LBitwiseOr
+	LBitwiseXor
+	LBitwiseAnd
+	LEquals
+	LCompare
+	LShift
+	LAdd
+	LMultiply
+	LExponentiation
+	LPrefix
+	LPostfix
+	LNew
+	LCall
+	LMember
+)
+
+type OpCode uint8
+
+func (op OpCode) IsPrefix() bool {
+	return op < UnOpPostDec
+}
+
+func (op OpCode) UnaryAssignTarget() AssignTarget {
+	if op >= UnOpPreDec && op <= UnOpPostInc {
+		return AssignTargetUpdate
+	}
+	return AssignTargetNone
+}
+
+func (op OpCode) IsLeftAssociative() bool {
+	return op >= BinOpAdd && op < BinOpComma && op != BinOpPow
+}
+
+func (op OpCode) IsRightAssociative() bool {
+	return op >= BinOpAssign || op == BinOpPow
+}
+
+func (op OpCode) BinaryAssignTarget() AssignTarget {
+	if op == BinOpAssign {
+		return AssignTargetReplace
+	}
+	if op > BinOpAssign {
+		return AssignTargetUpdate
+	}
+	return AssignTargetNone
+}
+
+func (op OpCode) IsShortCircuit() bool {
+	switch op {
+	case BinOpLogicalOr, BinOpLogicalOrAssign,
+		BinOpLogicalAnd, BinOpLogicalAndAssign,
+		BinOpNullishCoalescing, BinOpNullishCoalescingAssign:
+		return true
+	}
+	return false
+}
+
+type AssignTarget uint8
+
+const (
+	AssignTargetNone    AssignTarget = iota
+	AssignTargetReplace              // "a = b"
+	AssignTargetUpdate               // "a += b"
+)
+
+// If you add a new token, remember to add it to "OpTable" too
+const (
+	// Prefix
+	UnOpPos OpCode = iota
+	UnOpNeg
+	UnOpCpl
+	UnOpNot
+	UnOpVoid
+	UnOpTypeof
+	UnOpDelete
+
+	// Prefix update
+	UnOpPreDec
+	UnOpPreInc
+
+	// Postfix update
+	UnOpPostDec
+	UnOpPostInc
+
+	// Left-associative
+	BinOpAdd
+	BinOpSub
+	BinOpMul
+	BinOpDiv
+	BinOpRem
+	BinOpPow
+	BinOpLt
+	BinOpLe
+	BinOpGt
+	BinOpGe
+	BinOpIn
+	BinOpInstanceof
+	BinOpShl
+	BinOpShr
+	BinOpUShr
+	BinOpLooseEq
+	BinOpLooseNe
+	BinOpStrictEq
+	BinOpStrictNe
+	BinOpNullishCoalescing
+	BinOpLogicalOr
+	BinOpLogicalAnd
+	BinOpBitwiseOr
+	BinOpBitwiseAnd
+	BinOpBitwiseXor
+
+	// Non-associative
+	BinOpComma
+
+	// Right-associative
+	BinOpAssign
+	BinOpAddAssign
+	BinOpSubAssign
+	BinOpMulAssign
+	BinOpDivAssign
+	BinOpRemAssign
+	BinOpPowAssign
+	BinOpShlAssign
+	BinOpShrAssign
+	BinOpUShrAssign
+	BinOpBitwiseOrAssign
+	BinOpBitwiseAndAssign
+	BinOpBitwiseXorAssign
+	BinOpNullishCoalescingAssign
+	BinOpLogicalOrAssign
+	BinOpLogicalAndAssign
+)
+
+type OpTableEntry struct {
+	Text      string
+	Level     L
+	IsKeyword bool
+}
+
+var OpTable = []OpTableEntry{
+	// Prefix
+	{"+", LPrefix, false},
+	{"-", LPrefix, false},
+	{"~", LPrefix, false},
+	{"!", LPrefix, false},
+	{"void", LPrefix, true},
+	{"typeof", LPrefix, true},
+	{"delete", LPrefix, true},
+
+	// Prefix update
+	{"--", LPrefix, false},
+	{"++", LPrefix, false},
+
+	// Postfix update
+	{"--", LPostfix, false},
+	{"++", LPostfix, false},
+
+	// Left-associative
+	{"+", LAdd, false},
+	{"-", LAdd, false},
+	{"*", LMultiply, false},
+	{"/", LMultiply, false},
+	{"%", LMultiply, false},
+	{"**", LExponentiation, false}, // Right-associative
+	{"<", LCompare, false},
+	{"<=", LCompare, false},
+	{">", LCompare, false},
+	{">=", LCompare, false},
+	{"in", LCompare, true},
+	{"instanceof", LCompare, true},
+	{"<<", LShift, false},
+	{">>", LShift, false},
+	{">>>", LShift, false},
+	{"==", LEquals, false},
+	{"!=", LEquals, false},
+	{"===", LEquals, false},
+	{"!==", LEquals, false},
+	{"??", LNullishCoalescing, false},
+	{"||", LLogicalOr, false},
+	{"&&", LLogicalAnd, false},
+	{"|", LBitwiseOr, false},
+	{"&", LBitwiseAnd, false},
+	{"^", LBitwiseXor, false},
+
+	// Non-associative
+	{",", LComma, false},
+
+	// Right-associative
+	{"=", LAssign, false},
+	{"+=", LAssign, false},
+	{"-=", LAssign, false},
+	{"*=", LAssign, false},
+	{"/=", LAssign, false},
+	{"%=", LAssign, false},
+	{"**=", LAssign, false},
+	{"<<=", LAssign, false},
+	{">>=", LAssign, false},
+	{">>>=", LAssign, false},
+	{"|=", LAssign, false},
+	{"&=", LAssign, false},
+	{"^=", LAssign, false},
+	{"??=", LAssign, false},
+	{"||=", LAssign, false},
+	{"&&=", LAssign, false},
+}
+
+type Decorator struct {
+	Value            Expr
+	AtLoc            logger.Loc
+	OmitNewlineAfter bool
+}
+
+type PropertyKind uint8
+
+const (
+	PropertyField PropertyKind = iota
+	PropertyMethod
+	PropertyGetter
+	PropertySetter
+	PropertyAutoAccessor
+	PropertySpread
+	PropertyDeclareOrAbstract
+	PropertyClassStaticBlock
+)
+
+// This returns true if and only if this property matches the "MethodDefinition"
+// grammar from the specification. That means it's one of the following forms:
+//
+//	foo() {}
+//	*foo() {}
+//	async foo() {}
+//	async *foo() {}
+//	get foo() {}
+//	set foo(_) {}
+//
+// If this returns true, the "ValueOrNil" field of the property is always an
+// "EFunction" expression and it is always printed as a method.
+func (kind PropertyKind) IsMethodDefinition() bool {
+	return kind == PropertyMethod || kind == PropertyGetter || kind == PropertySetter
+}
+
+type ClassStaticBlock struct {
+	Block SBlock
+	Loc   logger.Loc
+}
+
+type PropertyFlags uint8
+
+const (
+	PropertyIsComputed PropertyFlags = 1 << iota
+	PropertyIsStatic
+	PropertyWasShorthand
+	PropertyPreferQuotedKey
+)
+
+func (flags PropertyFlags) Has(flag PropertyFlags) bool {
+	return (flags & flag) != 0
+}
+
+type Property struct {
+	ClassStaticBlock *ClassStaticBlock
+
+	Key Expr
+
+	// This is omitted for class fields
+	ValueOrNil Expr
+
+	// This is used when parsing a pattern that uses default values:
+	//
+	//   [a = 1] = [];
+	//   ({a = 1} = {});
+	//
+	// It's also used for class fields:
+	//
+	//   class Foo { a = 1 }
+	//
+	InitializerOrNil Expr
+
+	Decorators []Decorator
+
+	Loc             logger.Loc
+	CloseBracketLoc logger.Loc
+	Kind            PropertyKind
+	Flags           PropertyFlags
+}
+
+type PropertyBinding struct {
+	Key               Expr
+	Value             Binding
+	DefaultValueOrNil Expr
+	Loc               logger.Loc
+	CloseBracketLoc   logger.Loc
+	IsComputed        bool
+	IsSpread          bool
+	PreferQuotedKey   bool
+}
+
+type Arg struct {
+	Binding      Binding
+	DefaultOrNil Expr
+	Decorators   []Decorator
+
+	// "constructor(public x: boolean) {}"
+	IsTypeScriptCtorField bool
+}
+
+type Fn struct {
+	Name         *ast.LocRef
+	Args         []Arg
+	Body         FnBody
+	ArgumentsRef ast.Ref
+	OpenParenLoc logger.Loc
+
+	IsAsync     bool
+	IsGenerator bool
+	HasRestArg  bool
+	HasIfScope  bool
+
+	// See: https://github.com/rollup/rollup/pull/5024
+	HasNoSideEffectsComment bool
+
+	// This is true if the function is a method
+	IsUniqueFormalParameters bool
+}
+
+type FnBody struct {
+	Block SBlock
+	Loc   logger.Loc
+}
+
+type Class struct {
+	Decorators    []Decorator
+	Name          *ast.LocRef
+	ExtendsOrNil  Expr
+	Properties    []Property
+	ClassKeyword  logger.Range
+	BodyLoc       logger.Loc
+	CloseBraceLoc logger.Loc
+
+	// If true, JavaScript decorators (i.e. not TypeScript experimental
+	// decorators) should be lowered. This is the case either if JavaScript
+	// decorators are not supported in the configured target environment, or
+	// if "useDefineForClassFields" is set to false and this class has
+	// decorators on it. Note that this flag is not necessarily set to true if
+	// "useDefineForClassFields" is false and a class has an "accessor" even
+	// though the accessor feature comes from the decorator specification.
+	ShouldLowerStandardDecorators bool
+
+	// If true, property field initializers cannot be assumed to have no side
+	// effects. For example:
+	//
+	//   class Foo {
+	//     static set foo(x) { importantSideEffect(x) }
+	//   }
+	//   class Bar extends Foo {
+	//     foo = 1
+	//   }
+	//
+	// This happens in TypeScript when "useDefineForClassFields" is disabled
+	// because TypeScript (and esbuild) transforms the above class into this:
+	//
+	//   class Foo {
+	//     static set foo(x) { importantSideEffect(x); }
+	//   }
+	//   class Bar extends Foo {
+	//   }
+	//   Bar.foo = 1;
+	//
+	UseDefineForClassFields bool
+}
+
+type ArrayBinding struct {
+	Binding           Binding
+	DefaultValueOrNil Expr
+	Loc               logger.Loc
+}
+
+type Binding struct {
+	Data B
+	Loc  logger.Loc
+}
+
+// This interface is never called. Its purpose is to encode a variant type in
+// Go's type system.
+type B interface{ isBinding() }
+
+func (*BMissing) isBinding()    {}
+func (*BIdentifier) isBinding() {}
+func (*BArray) isBinding()      {}
+func (*BObject) isBinding()     {}
+
+type BMissing struct{}
+
+type BIdentifier struct{ Ref ast.Ref }
+
+type BArray struct {
+	Items           []ArrayBinding
+	CloseBracketLoc logger.Loc
+	HasSpread       bool
+	IsSingleLine    bool
+}
+
+type BObject struct {
+	Properties    []PropertyBinding
+	CloseBraceLoc logger.Loc
+	IsSingleLine  bool
+}
+
+type Expr struct {
+	Data E
+	Loc  logger.Loc
+}
+
+// This interface is never called. Its purpose is to encode a variant type in
+// Go's type system.
+type E interface{ isExpr() }
+
+func (*EArray) isExpr()                {}
+func (*EUnary) isExpr()                {}
+func (*EBinary) isExpr()               {}
+func (*EBoolean) isExpr()              {}
+func (*ESuper) isExpr()                {}
+func (*ENull) isExpr()                 {}
+func (*EUndefined) isExpr()            {}
+func (*EThis) isExpr()                 {}
+func (*ENew) isExpr()                  {}
+func (*ENewTarget) isExpr()            {}
+func (*EImportMeta) isExpr()           {}
+func (*ECall) isExpr()                 {}
+func (*EDot) isExpr()                  {}
+func (*EIndex) isExpr()                {}
+func (*EArrow) isExpr()                {}
+func (*EFunction) isExpr()             {}
+func (*EClass) isExpr()                {}
+func (*EIdentifier) isExpr()           {}
+func (*EImportIdentifier) isExpr()     {}
+func (*EPrivateIdentifier) isExpr()    {}
+func (*ENameOfSymbol) isExpr()         {}
+func (*EJSXElement) isExpr()           {}
+func (*EJSXText) isExpr()              {}
+func (*EMissing) isExpr()              {}
+func (*ENumber) isExpr()               {}
+func (*EBigInt) isExpr()               {}
+func (*EObject) isExpr()               {}
+func (*ESpread) isExpr()               {}
+func (*EString) isExpr()               {}
+func (*ETemplate) isExpr()             {}
+func (*ERegExp) isExpr()               {}
+func (*EInlinedEnum) isExpr()          {}
+func (*EAnnotation) isExpr()           {}
+func (*EAwait) isExpr()                {}
+func (*EYield) isExpr()                {}
+func (*EIf) isExpr()                   {}
+func (*ERequireString) isExpr()        {}
+func (*ERequireResolveString) isExpr() {}
+func (*EImportString) isExpr()         {}
+func (*EImportCall) isExpr()           {}
+
+type EArray struct {
+	Items            []Expr
+	CommaAfterSpread logger.Loc
+	CloseBracketLoc  logger.Loc
+	IsSingleLine     bool
+	IsParenthesized  bool
+}
+
+type EUnary struct {
+	Value Expr
+	Op    OpCode
+
+	// The expression "typeof (0, x)" must not become "typeof x" if "x"
+	// is unbound because that could suppress a ReferenceError from "x".
+	//
+	// Also if we know a typeof operator was originally an identifier, then
+	// we know that this typeof operator always has no side effects (even if
+	// we consider the identifier by itself to have a side effect).
+	//
+	// Note that there *is* actually a case where "typeof x" can throw an error:
+	// when "x" is being referenced inside of its TDZ (temporal dead zone). TDZ
+	// checks are not yet handled correctly by esbuild, so this possibility is
+	// currently ignored.
+	WasOriginallyTypeofIdentifier bool
+
+	// Similarly the expression "delete (0, x)" must not become "delete x"
+	// because that syntax is invalid in strict mode. We also need to make sure
+	// we don't accidentally change the return value:
+	//
+	//   Returns false:
+	//     "var a; delete (a)"
+	//     "var a = Object.freeze({b: 1}); delete (a.b)"
+	//     "var a = Object.freeze({b: 1}); delete (a?.b)"
+	//     "var a = Object.freeze({b: 1}); delete (a['b'])"
+	//     "var a = Object.freeze({b: 1}); delete (a?.['b'])"
+	//
+	//   Returns true:
+	//     "var a; delete (0, a)"
+	//     "var a = Object.freeze({b: 1}); delete (true && a.b)"
+	//     "var a = Object.freeze({b: 1}); delete (false || a?.b)"
+	//     "var a = Object.freeze({b: 1}); delete (null ?? a?.['b'])"
+	//     "var a = Object.freeze({b: 1}); delete (true ? a['b'] : a['b'])"
+	//
+	WasOriginallyDeleteOfIdentifierOrPropertyAccess bool
+}
+
+type EBinary struct {
+	Left  Expr
+	Right Expr
+	Op    OpCode
+}
+
+type EBoolean struct{ Value bool }
+
+type EMissing struct{}
+
+type ESuper struct{}
+
+type ENull struct{}
+
+type EUndefined struct{}
+
+type EThis struct{}
+
+type ENewTarget struct {
+	Range logger.Range
+}
+
+type EImportMeta struct {
+	RangeLen int32
+}
+
+// These help reduce unnecessary memory allocations
+var BMissingShared = &BMissing{}
+var EMissingShared = &EMissing{}
+var ENullShared = &ENull{}
+var ESuperShared = &ESuper{}
+var EThisShared = &EThis{}
+var EUndefinedShared = &EUndefined{}
+var SDebuggerShared = &SDebugger{}
+var SEmptyShared = &SEmpty{}
+var STypeScriptShared = &STypeScript{}
+var STypeScriptSharedWasDeclareClass = &STypeScript{WasDeclareClass: true}
+
+type ENew struct {
+	Target Expr
+	Args   []Expr
+
+	CloseParenLoc logger.Loc
+	IsMultiLine   bool
+
+	// True if there is a comment containing "@__PURE__" or "#__PURE__" preceding
+	// this call expression. See the comment inside ECall for more details.
+	CanBeUnwrappedIfUnused bool
+}
+
+type CallKind uint8
+
+const (
+	NormalCall CallKind = iota
+	DirectEval
+	TargetWasOriginallyPropertyAccess
+)
+
+type OptionalChain uint8
+
+const (
+	// "a.b"
+	OptionalChainNone OptionalChain = iota
+
+	// "a?.b"
+	OptionalChainStart
+
+	// "a?.b.c" => ".c" is OptionalChainContinue
+	// "(a?.b).c" => ".c" is OptionalChainNone
+	OptionalChainContinue
+)
+
+type ECall struct {
+	Target        Expr
+	Args          []Expr
+	CloseParenLoc logger.Loc
+	OptionalChain OptionalChain
+	Kind          CallKind
+	IsMultiLine   bool
+
+	// True if there is a comment containing "@__PURE__" or "#__PURE__" preceding
+	// this call expression. This is an annotation used for tree shaking, and
+	// means that the call can be removed if it's unused. It does not mean the
+	// call is pure (e.g. it may still return something different if called twice).
+	//
+	// Note that the arguments are not considered to be part of the call. If the
+	// call itself is removed due to this annotation, the arguments must remain
+	// if they have side effects.
+	CanBeUnwrappedIfUnused bool
+}
+
+func (a *ECall) HasSameFlagsAs(b *ECall) bool {
+	return a.OptionalChain == b.OptionalChain &&
+		a.Kind == b.Kind &&
+		a.CanBeUnwrappedIfUnused == b.CanBeUnwrappedIfUnused
+}
+
+type EDot struct {
+	Target        Expr
+	Name          string
+	NameLoc       logger.Loc
+	OptionalChain OptionalChain
+
+	// If true, this property access is known to be free of side-effects. That
+	// means it can be removed if the resulting value isn't used.
+	CanBeRemovedIfUnused bool
+
+	// If true, this property access is a function that, when called, can be
+	// unwrapped if the resulting value is unused. Unwrapping means discarding
+	// the call target but keeping any arguments with side effects.
+	CallCanBeUnwrappedIfUnused bool
+
+	// Symbol values are known to not have side effects when used as property
+	// names in class declarations and object literals.
+	IsSymbolInstance bool
+}
+
+func (a *EDot) HasSameFlagsAs(b *EDot) bool {
+	return a.OptionalChain == b.OptionalChain &&
+		a.CanBeRemovedIfUnused == b.CanBeRemovedIfUnused &&
+		a.CallCanBeUnwrappedIfUnused == b.CallCanBeUnwrappedIfUnused &&
+		a.IsSymbolInstance == b.IsSymbolInstance
+}
+
+type EIndex struct {
+	Target          Expr
+	Index           Expr
+	CloseBracketLoc logger.Loc
+	OptionalChain   OptionalChain
+
+	// If true, this property access is known to be free of side-effects. That
+	// means it can be removed if the resulting value isn't used.
+	CanBeRemovedIfUnused bool
+
+	// If true, this property access is a function that, when called, can be
+	// unwrapped if the resulting value is unused. Unwrapping means discarding
+	// the call target but keeping any arguments with side effects.
+	CallCanBeUnwrappedIfUnused bool
+
+	// Symbol values are known to not have side effects when used as property
+	// names in class declarations and object literals.
+	IsSymbolInstance bool
+}
+
+func (a *EIndex) HasSameFlagsAs(b *EIndex) bool {
+	return a.OptionalChain == b.OptionalChain &&
+		a.CanBeRemovedIfUnused == b.CanBeRemovedIfUnused &&
+		a.CallCanBeUnwrappedIfUnused == b.CallCanBeUnwrappedIfUnused &&
+		a.IsSymbolInstance == b.IsSymbolInstance
+}
+
+type EArrow struct {
+	Args []Arg
+	Body FnBody
+
+	IsAsync    bool
+	HasRestArg bool
+	PreferExpr bool // Use shorthand if true and "Body" is a single return statement
+
+	// See: https://github.com/rollup/rollup/pull/5024
+	HasNoSideEffectsComment bool
+}
+
+type EFunction struct{ Fn Fn }
+
+type EClass struct{ Class Class }
+
+type EIdentifier struct {
+	Ref ast.Ref
+
+	// If we're inside a "with" statement, this identifier may be a property
+	// access. In that case it would be incorrect to remove this identifier since
+	// the property access may be a getter or setter with side effects.
+	MustKeepDueToWithStmt bool
+
+	// If true, this identifier is known to not have a side effect (i.e. to not
+	// throw an exception) when referenced. If false, this identifier may or may
+	// not have side effects when referenced. This is used to allow the removal
+	// of known globals such as "Object" if they aren't used.
+	CanBeRemovedIfUnused bool
+
+	// If true, this identifier represents a function that, when called, can be
+	// unwrapped if the resulting value is unused. Unwrapping means discarding
+	// the call target but keeping any arguments with side effects.
+	CallCanBeUnwrappedIfUnused bool
+}
+
+// This is similar to an EIdentifier but it represents a reference to an ES6
+// import item.
+//
+// Depending on how the code is linked, the file containing this EImportIdentifier
+// may or may not be in the same module group as the file it was imported from.
+//
+// If it's the same module group than we can just merge the import item symbol
+// with the corresponding symbol that was imported, effectively renaming them
+// to be the same thing and statically binding them together.
+//
+// But if it's a different module group, then the import must be dynamically
+// evaluated using a property access off the corresponding namespace symbol,
+// which represents the result of a require() call.
+//
+// It's stored as a separate type so it's not easy to confuse with a plain
+// identifier. For example, it'd be bad if code trying to convert "{x: x}" into
+// "{x}" shorthand syntax wasn't aware that the "x" in this case is actually
+// "{x: importedNamespace.x}". This separate type forces code to opt-in to
+// doing this instead of opt-out.
+type EImportIdentifier struct {
+	Ref             ast.Ref
+	PreferQuotedKey bool
+
+	// If true, this was originally an identifier expression such as "foo". If
+	// false, this could potentially have been a member access expression such
+	// as "ns.foo" off of an imported namespace object.
+	WasOriginallyIdentifier bool
+}
+
+// This is similar to EIdentifier but it represents class-private fields and
+// methods. It can be used where computed properties can be used, such as
+// EIndex and Property.
+type EPrivateIdentifier struct {
+	Ref ast.Ref
+}
+
+// This represents an internal property name that can be mangled. The symbol
+// referenced by this expression should be a "SymbolMangledProp" symbol.
+type ENameOfSymbol struct {
+	Ref                   ast.Ref
+	HasPropertyKeyComment bool // If true, a preceding comment contains "@__KEY__"
+}
+
+type EJSXElement struct {
+	TagOrNil   Expr
+	Properties []Property
+
+	// Note: This array may contain nil entries. Be careful about nil entries
+	// when iterating over this array.
+	//
+	// Each nil entry corresponds to the "JSXChildExpression_opt" part of the
+	// grammar (https://facebook.github.io/jsx/#prod-JSXChild):
+	//
+	//   JSXChild :
+	//       JSXText
+	//       JSXElement
+	//       JSXFragment
+	//       { JSXChildExpression_opt }
+	//
+	// This is the "{}" part in "<a>{}</a>". We allow this because some people
+	// put comments there and then expect to be able to process them from
+	// esbuild's output. These absent AST nodes are completely omitted when
+	// JSX is transformed to JS. They are only present when JSX preservation is
+	// enabled.
+	NullableChildren []Expr
+
+	CloseLoc        logger.Loc
+	IsTagSingleLine bool
+}
+
+// The JSX specification doesn't say how JSX text is supposed to be interpreted
+// so our "preserve" JSX transform should reproduce the original source code
+// verbatim. One reason why this matters is because there is no canonical way
+// to interpret JSX text (Babel and TypeScript differ in what newlines mean).
+// Another reason is that some people want to do custom things such as this:
+// https://github.com/evanw/esbuild/issues/3605
+type EJSXText struct {
+	Raw string
+}
+
+type ENumber struct{ Value float64 }
+
+type EBigInt struct{ Value string }
+
+type EObject struct {
+	Properties       []Property
+	CommaAfterSpread logger.Loc
+	CloseBraceLoc    logger.Loc
+	IsSingleLine     bool
+	IsParenthesized  bool
+}
+
+type ESpread struct{ Value Expr }
+
+// This is used for both strings and no-substitution template literals to reduce
+// the number of cases that need to be checked for string optimization code
+type EString struct {
+	Value                 []uint16
+	LegacyOctalLoc        logger.Loc
+	PreferTemplate        bool
+	HasPropertyKeyComment bool // If true, a preceding comment contains "@__KEY__"
+	ContainsUniqueKey     bool // If true, this string must not be wrapped
+}
+
+type TemplatePart struct {
+	Value      Expr
+	TailRaw    string   // Only use when "TagOrNil" is not nil
+	TailCooked []uint16 // Only use when "TagOrNil" is nil
+	TailLoc    logger.Loc
+}
+
+type ETemplate struct {
+	TagOrNil       Expr
+	HeadRaw        string   // Only use when "TagOrNil" is not nil
+	HeadCooked     []uint16 // Only use when "TagOrNil" is nil
+	Parts          []TemplatePart
+	HeadLoc        logger.Loc
+	LegacyOctalLoc logger.Loc
+
+	// True if this is a tagged template literal with a comment that indicates
+	// this function call can be removed if the result is unused. Note that the
+	// arguments are not considered to be part of the call. If the call itself
+	// is removed due to this annotation, the arguments must remain if they have
+	// side effects (including the string conversions).
+	CanBeUnwrappedIfUnused bool
+
+	// If the tag is present, it is expected to be a function and is called. If
+	// the tag is a syntactic property access, then the value for "this" in the
+	// function call is the object whose property was accessed (e.g. in "a.b``"
+	// the value for "this" in "a.b" is "a"). We need to ensure that if "a``"
+	// ever becomes "b.c``" later on due to optimizations, it is written as
+	// "(0, b.c)``" to avoid a behavior change.
+	TagWasOriginallyPropertyAccess bool
+}
+
+type ERegExp struct{ Value string }
+
+type EInlinedEnum struct {
+	Value   Expr
+	Comment string
+}
+
+type AnnotationFlags uint8
+
+const (
+	// This is sort of like an IIFE with a "/* @__PURE__ */" comment except it's an
+	// inline annotation on an expression itself without the nested scope. Sometimes
+	// we can't easily introduce a new scope (e.g. if the expression uses "await").
+	CanBeRemovedIfUnusedFlag AnnotationFlags = 1 << iota
+)
+
+func (flags AnnotationFlags) Has(flag AnnotationFlags) bool {
+	return (flags & flag) != 0
+}
+
+type EAnnotation struct {
+	Value Expr
+	Flags AnnotationFlags
+}
+
+type EAwait struct {
+	Value Expr
+}
+
+type EYield struct {
+	ValueOrNil Expr
+	IsStar     bool
+}
+
+type EIf struct {
+	Test Expr
+	Yes  Expr
+	No   Expr
+}
+
+type ERequireString struct {
+	ImportRecordIndex uint32
+	CloseParenLoc     logger.Loc
+}
+
+type ERequireResolveString struct {
+	ImportRecordIndex uint32
+	CloseParenLoc     logger.Loc
+}
+
+type EImportString struct {
+	ImportRecordIndex uint32
+	CloseParenLoc     logger.Loc
+}
+
+type EImportCall struct {
+	Expr          Expr
+	OptionsOrNil  Expr
+	CloseParenLoc logger.Loc
+}
+
+type Stmt struct {
+	Data S
+	Loc  logger.Loc
+}
+
+// This interface is never called. Its purpose is to encode a variant type in
+// Go's type system.
+type S interface{ isStmt() }
+
+func (*SBlock) isStmt()         {}
+func (*SComment) isStmt()       {}
+func (*SDebugger) isStmt()      {}
+func (*SDirective) isStmt()     {}
+func (*SEmpty) isStmt()         {}
+func (*STypeScript) isStmt()    {}
+func (*SExportClause) isStmt()  {}
+func (*SExportFrom) isStmt()    {}
+func (*SExportDefault) isStmt() {}
+func (*SExportStar) isStmt()    {}
+func (*SExportEquals) isStmt()  {}
+func (*SLazyExport) isStmt()    {}
+func (*SExpr) isStmt()          {}
+func (*SEnum) isStmt()          {}
+func (*SNamespace) isStmt()     {}
+func (*SFunction) isStmt()      {}
+func (*SClass) isStmt()         {}
+func (*SLabel) isStmt()         {}
+func (*SIf) isStmt()            {}
+func (*SFor) isStmt()           {}
+func (*SForIn) isStmt()         {}
+func (*SForOf) isStmt()         {}
+func (*SDoWhile) isStmt()       {}
+func (*SWhile) isStmt()         {}
+func (*SWith) isStmt()          {}
+func (*STry) isStmt()           {}
+func (*SSwitch) isStmt()        {}
+func (*SImport) isStmt()        {}
+func (*SReturn) isStmt()        {}
+func (*SThrow) isStmt()         {}
+func (*SLocal) isStmt()         {}
+func (*SBreak) isStmt()         {}
+func (*SContinue) isStmt()      {}
+
+type SBlock struct {
+	Stmts         []Stmt
+	CloseBraceLoc logger.Loc
+}
+
+type SEmpty struct{}
+
+// This is a stand-in for a TypeScript type declaration
+type STypeScript struct {
+	WasDeclareClass bool
+}
+
+type SComment struct {
+	Text           string
+	IsLegalComment bool
+}
+
+type SDebugger struct{}
+
+type SDirective struct {
+	Value          []uint16
+	LegacyOctalLoc logger.Loc
+}
+
+type SExportClause struct {
+	Items        []ClauseItem
+	IsSingleLine bool
+}
+
+type SExportFrom struct {
+	Items             []ClauseItem
+	NamespaceRef      ast.Ref
+	ImportRecordIndex uint32
+	IsSingleLine      bool
+}
+
+type SExportDefault struct {
+	Value       Stmt // May be a SExpr or SFunction or SClass
+	DefaultName ast.LocRef
+}
+
+type ExportStarAlias struct {
+	// Although this alias name starts off as being the same as the statement's
+	// namespace symbol, it may diverge if the namespace symbol name is minified.
+	// The original alias name is preserved here to avoid this scenario.
+	OriginalName string
+
+	Loc logger.Loc
+}
+
+type SExportStar struct {
+	Alias             *ExportStarAlias
+	NamespaceRef      ast.Ref
+	ImportRecordIndex uint32
+}
+
+// This is an "export = value;" statement in TypeScript
+type SExportEquals struct {
+	Value Expr
+}
+
+// The decision of whether to export an expression using "module.exports" or
+// "export default" is deferred until linking using this statement kind
+type SLazyExport struct {
+	Value Expr
+}
+
+type SExpr struct {
+	Value Expr
+
+	// This is set to true for automatically-generated expressions that are part
+	// of class syntax lowering. A single class declaration may end up with many
+	// generated expressions after it (e.g. class field initializations, a call
+	// to keep the original value of the "name" property). When this happens we
+	// can't tell that the class is side-effect free anymore because all of these
+	// methods mutate the class. We use this annotation for that instead.
+	IsFromClassOrFnThatCanBeRemovedIfUnused bool
+}
+
+type EnumValue struct {
+	ValueOrNil Expr
+	Name       []uint16
+	Ref        ast.Ref
+	Loc        logger.Loc
+}
+
+type SEnum struct {
+	Values   []EnumValue
+	Name     ast.LocRef
+	Arg      ast.Ref
+	IsExport bool
+}
+
+type SNamespace struct {
+	Stmts    []Stmt
+	Name     ast.LocRef
+	Arg      ast.Ref
+	IsExport bool
+}
+
+type SFunction struct {
+	Fn       Fn
+	IsExport bool
+}
+
+type SClass struct {
+	Class    Class
+	IsExport bool
+}
+
+type SLabel struct {
+	Stmt             Stmt
+	Name             ast.LocRef
+	IsSingleLineStmt bool
+}
+
+type SIf struct {
+	Test            Expr
+	Yes             Stmt
+	NoOrNil         Stmt
+	IsSingleLineYes bool
+	IsSingleLineNo  bool
+}
+
+type SFor struct {
+	InitOrNil        Stmt // May be a SConst, SLet, SVar, or SExpr
+	TestOrNil        Expr
+	UpdateOrNil      Expr
+	Body             Stmt
+	IsSingleLineBody bool
+}
+
+type SForIn struct {
+	Init             Stmt // May be a SConst, SLet, SVar, or SExpr
+	Value            Expr
+	Body             Stmt
+	IsSingleLineBody bool
+}
+
+type SForOf struct {
+	Init             Stmt // May be a SConst, SLet, SVar, or SExpr
+	Value            Expr
+	Body             Stmt
+	Await            logger.Range
+	IsSingleLineBody bool
+}
+
+type SDoWhile struct {
+	Body Stmt
+	Test Expr
+}
+
+type SWhile struct {
+	Test             Expr
+	Body             Stmt
+	IsSingleLineBody bool
+}
+
+type SWith struct {
+	Value            Expr
+	Body             Stmt
+	BodyLoc          logger.Loc
+	IsSingleLineBody bool
+}
+
+type Catch struct {
+	BindingOrNil Binding
+	Block        SBlock
+	Loc          logger.Loc
+	BlockLoc     logger.Loc
+}
+
+type Finally struct {
+	Block SBlock
+	Loc   logger.Loc
+}
+
+type STry struct {
+	Catch    *Catch
+	Finally  *Finally
+	Block    SBlock
+	BlockLoc logger.Loc
+}
+
+type Case struct {
+	ValueOrNil Expr // If this is nil, this is "default" instead of "case"
+	Body       []Stmt
+	Loc        logger.Loc
+}
+
+type SSwitch struct {
+	Test          Expr
+	Cases         []Case
+	BodyLoc       logger.Loc
+	CloseBraceLoc logger.Loc
+}
+
+// This object represents all of these types of import statements:
+//
+//	import 'path'
+//	import {item1, item2} from 'path'
+//	import * as ns from 'path'
+//	import defaultItem, {item1, item2} from 'path'
+//	import defaultItem, * as ns from 'path'
+//
+// Many parts are optional and can be combined in different ways. The only
+// restriction is that you cannot have both a clause and a star namespace.
+type SImport struct {
+	DefaultName *ast.LocRef
+	Items       *[]ClauseItem
+	StarNameLoc *logger.Loc
+
+	// If this is a star import: This is a Ref for the namespace symbol. The Loc
+	// for the symbol is StarLoc.
+	//
+	// Otherwise: This is an auto-generated Ref for the namespace representing
+	// the imported file. In this case StarLoc is nil. The NamespaceRef is used
+	// when converting this module to a CommonJS module.
+	NamespaceRef ast.Ref
+
+	ImportRecordIndex uint32
+	IsSingleLine      bool
+}
+
+type SReturn struct {
+	ValueOrNil Expr
+}
+
+type SThrow struct {
+	Value Expr
+}
+
+type LocalKind uint8
+
+const (
+	LocalVar LocalKind = iota
+	LocalLet
+	LocalConst
+	LocalUsing
+	LocalAwaitUsing
+)
+
+func (kind LocalKind) IsUsing() bool {
+	return kind >= LocalUsing
+}
+
+type SLocal struct {
+	Decls    []Decl
+	Kind     LocalKind
+	IsExport bool
+
+	// The TypeScript compiler doesn't generate code for "import foo = bar"
+	// statements where the import is never used.
+	WasTSImportEquals bool
+}
+
+type SBreak struct {
+	Label *ast.LocRef
+}
+
+type SContinue struct {
+	Label *ast.LocRef
+}
+
+type ClauseItem struct {
+	Alias string
+
+	// This is the original name of the symbol stored in "Name". It's needed for
+	// "SExportClause" statements such as this:
+	//
+	//   export {foo as bar} from 'path'
+	//
+	// In this case both "foo" and "bar" are aliases because it's a re-export.
+	// We need to preserve both aliases in case the symbol is renamed. In this
+	// example, "foo" is "OriginalName" and "bar" is "Alias".
+	OriginalName string
+
+	AliasLoc logger.Loc
+	Name     ast.LocRef
+}
+
+type Decl struct {
+	Binding    Binding
+	ValueOrNil Expr
+}
+
+type ScopeKind uint8
+
+const (
+	ScopeBlock ScopeKind = iota
+	ScopeWith
+	ScopeLabel
+	ScopeClassName
+	ScopeClassBody
+	ScopeCatchBinding
+
+	// The scopes below stop hoisted variables from extending into parent scopes
+	ScopeEntry // This is a module, TypeScript enum, or TypeScript namespace
+	ScopeFunctionArgs
+	ScopeFunctionBody
+	ScopeClassStaticInit
+)
+
+func (kind ScopeKind) StopsHoisting() bool {
+	return kind >= ScopeEntry
+}
+
+type ScopeMember struct {
+	Ref ast.Ref
+	Loc logger.Loc
+}
+
+type Scope struct {
+	// This will be non-nil if this is a TypeScript "namespace" or "enum"
+	TSNamespace *TSNamespaceScope
+
+	Parent    *Scope
+	Children  []*Scope
+	Members   map[string]ScopeMember
+	Replaced  []ScopeMember
+	Generated []ast.Ref
+
+	// The location of the "use strict" directive for ExplicitStrictMode
+	UseStrictLoc logger.Loc
+
+	// This is used to store the ref of the label symbol for ScopeLabel scopes.
+	Label           ast.LocRef
+	LabelStmtIsLoop bool
+
+	// If a scope contains a direct eval() expression, then none of the symbols
+	// inside that scope can be renamed. We conservatively assume that the
+	// evaluated code might reference anything that it has access to.
+	ContainsDirectEval bool
+
+	// This is to help forbid "arguments" inside class body scopes
+	ForbidArguments bool
+
+	// As a special case, we enable constant propagation for any chain of "const"
+	// declarations at the start of a statement list. This special case doesn't
+	// have any TDZ considerations because no other statements come before it.
+	IsAfterConstLocalPrefix bool
+
+	StrictMode StrictModeKind
+	Kind       ScopeKind
+}
+
+type StrictModeKind uint8
+
+const (
+	SloppyMode StrictModeKind = iota
+	ExplicitStrictMode
+	ImplicitStrictModeClass
+	ImplicitStrictModeESM
+	ImplicitStrictModeTSAlwaysStrict
+	ImplicitStrictModeJSXAutomaticRuntime
+)
+
+func (s *Scope) RecursiveSetStrictMode(kind StrictModeKind) {
+	if s.StrictMode == SloppyMode {
+		s.StrictMode = kind
+		for _, child := range s.Children {
+			child.RecursiveSetStrictMode(kind)
+		}
+	}
+}
+
+// This is for TypeScript "enum" and "namespace" blocks. Each block can
+// potentially be instantiated multiple times. The exported members of each
+// block are merged into a single namespace while the non-exported code is
+// still scoped to just within that block:
+//
+//	let x = 1;
+//	namespace Foo {
+//	  let x = 2;
+//	  export let y = 3;
+//	}
+//	namespace Foo {
+//	  console.log(x); // 1
+//	  console.log(y); // 3
+//	}
+//
+// Doing this also works inside an enum:
+//
+//	enum Foo {
+//	  A = 3,
+//	  B = A + 1,
+//	}
+//	enum Foo {
+//	  C = A + 2,
+//	}
+//	console.log(Foo.B) // 4
+//	console.log(Foo.C) // 5
+//
+// This is a form of identifier lookup that works differently than the
+// hierarchical scope-based identifier lookup in JavaScript. Lookup now needs
+// to search sibling scopes in addition to parent scopes. This is accomplished
+// by sharing the map of exported members between all matching sibling scopes.
+type TSNamespaceScope struct {
+	// This is shared between all sibling namespace blocks
+	ExportedMembers TSNamespaceMembers
+
+	// This is a lazily-generated map of identifiers that actually represent
+	// property accesses to this namespace's properties. For example:
+	//
+	//   namespace x {
+	//     export let y = 123
+	//   }
+	//   namespace x {
+	//     export let z = y
+	//   }
+	//
+	// This should be compiled into the following code:
+	//
+	//   var x;
+	//   (function(x2) {
+	//     x2.y = 123;
+	//   })(x || (x = {}));
+	//   (function(x3) {
+	//     x3.z = x3.y;
+	//   })(x || (x = {}));
+	//
+	// When we try to find the symbol "y", we instead return one of these lazily
+	// generated proxy symbols that represent the property access "x3.y". This
+	// map is unique per namespace block because "x3" is the argument symbol that
+	// is specific to that particular namespace block.
+	LazilyGeneratedProperyAccesses map[string]ast.Ref
+
+	// This is specific to this namespace block. It's the argument of the
+	// immediately-invoked function expression that the namespace block is
+	// compiled into:
+	//
+	//   var ns;
+	//   (function (ns2) {
+	//     ns2.x = 123;
+	//   })(ns || (ns = {}));
+	//
+	// This variable is "ns2" in the above example. It's the symbol to use when
+	// generating property accesses off of this namespace when it's in scope.
+	ArgRef ast.Ref
+
+	// Even though enums are like namespaces and both enums and namespaces allow
+	// implicit references to properties of sibling scopes, they behave like
+	// separate, er, namespaces. Implicit references only work namespace-to-
+	// namespace and enum-to-enum. They do not work enum-to-namespace. And I'm
+	// not sure what's supposed to happen for the namespace-to-enum case because
+	// the compiler crashes: https://github.com/microsoft/TypeScript/issues/46891.
+	// So basically these both work:
+	//
+	//   enum a { b = 1 }
+	//   enum a { c = b }
+	//
+	//   namespace x { export let y = 1 }
+	//   namespace x { export let z = y }
+	//
+	// This doesn't work:
+	//
+	//   enum a { b = 1 }
+	//   namespace a { export let c = b }
+	//
+	// And this crashes the TypeScript compiler:
+	//
+	//   namespace a { export let b = 1 }
+	//   enum a { c = b }
+	//
+	// Therefore we only allow enum/enum and namespace/namespace interactions.
+	IsEnumScope bool
+}
+
+type TSNamespaceMembers map[string]TSNamespaceMember
+
+type TSNamespaceMember struct {
+	Data        TSNamespaceMemberData
+	Loc         logger.Loc
+	IsEnumValue bool
+}
+
+type TSNamespaceMemberData interface {
+	isTSNamespaceMember()
+}
+
+func (TSNamespaceMemberProperty) isTSNamespaceMember()   {}
+func (TSNamespaceMemberNamespace) isTSNamespaceMember()  {}
+func (TSNamespaceMemberEnumNumber) isTSNamespaceMember() {}
+func (TSNamespaceMemberEnumString) isTSNamespaceMember() {}
+
+// "namespace ns { export let it }"
+type TSNamespaceMemberProperty struct{}
+
+// "namespace ns { export namespace it {} }"
+type TSNamespaceMemberNamespace struct {
+	ExportedMembers TSNamespaceMembers
+}
+
+// "enum ns { it }"
+type TSNamespaceMemberEnumNumber struct {
+	Value float64
+}
+
+// "enum ns { it = 'it' }"
+type TSNamespaceMemberEnumString struct {
+	Value []uint16
+}
+
+type ExportsKind uint8
+
+const (
+	// This file doesn't have any kind of export, so it's impossible to say what
+	// kind of file this is. An empty file is in this category, for example.
+	ExportsNone ExportsKind = iota
+
+	// The exports are stored on "module" and/or "exports". Calling "require()"
+	// on this module returns "module.exports". All imports to this module are
+	// allowed but may return undefined.
+	ExportsCommonJS
+
+	// All export names are known explicitly. Calling "require()" on this module
+	// generates an exports object (stored in "exports") with getters for the
+	// export names. Named imports to this module are only allowed if they are
+	// in the set of export names.
+	ExportsESM
+
+	// Some export names are known explicitly, but others fall back to a dynamic
+	// run-time object. This is necessary when using the "export * from" syntax
+	// with either a CommonJS module or an external module (i.e. a module whose
+	// export names are not known at compile-time).
+	//
+	// Calling "require()" on this module generates an exports object (stored in
+	// "exports") with getters for the export names. All named imports to this
+	// module are allowed. Direct named imports reference the corresponding export
+	// directly. Other imports go through property accesses on "exports".
+	ExportsESMWithDynamicFallback
+)
+
+func (kind ExportsKind) IsDynamic() bool {
+	return kind == ExportsCommonJS || kind == ExportsESMWithDynamicFallback
+}
+
+type ModuleType uint8
+
+const (
+	ModuleUnknown ModuleType = iota
+
+	// ".cjs" or ".cts" or "type: commonjs" in package.json
+	ModuleCommonJS_CJS
+	ModuleCommonJS_CTS
+	ModuleCommonJS_PackageJSON
+
+	// ".mjs" or ".mts" or "type: module" in package.json
+	ModuleESM_MJS
+	ModuleESM_MTS
+	ModuleESM_PackageJSON
+)
+
+func (mt ModuleType) IsCommonJS() bool {
+	return mt >= ModuleCommonJS_CJS && mt <= ModuleCommonJS_PackageJSON
+}
+
+func (mt ModuleType) IsESM() bool {
+	return mt >= ModuleESM_MJS && mt <= ModuleESM_PackageJSON
+}
+
+type ModuleTypeData struct {
+	Source *logger.Source
+	Range  logger.Range
+	Type   ModuleType
+}
+
+// This is the index to the automatically-generated part containing code that
+// calls "__export(exports, { ... getters ... })". This is used to generate
+// getters on an exports object for ES6 export statements, and is both for
+// ES6 star imports and CommonJS-style modules. All files have one of these,
+// although it may contain no statements if there is nothing to export.
+const NSExportPartIndex = uint32(0)
+
+type AST struct {
+	ModuleTypeData ModuleTypeData
+	Parts          []Part
+	Symbols        []ast.Symbol
+	ExprComments   map[logger.Loc][]string
+	ModuleScope    *Scope
+	CharFreq       *ast.CharFreq
+
+	// This is internal-only data used for the implementation of Yarn PnP
+	ManifestForYarnPnP Expr
+
+	Hashbang   string
+	Directives []string
+	URLForCSS  string
+
+	// Note: If you're in the linker, do not use this map directly. This map is
+	// filled in by the parser and is considered immutable. For performance reasons,
+	// the linker doesn't mutate this map (cloning a map is slow in Go). Instead the
+	// linker super-imposes relevant information on top in a method call. You should
+	// call "TopLevelSymbolToParts" instead.
+	TopLevelSymbolToPartsFromParser map[ast.Ref][]uint32
+
+	// This contains all top-level exported TypeScript enum constants. It exists
+	// to enable cross-module inlining of constant enums.
+	TSEnums map[ast.Ref]map[string]TSEnumValue
+
+	// This contains the values of all detected inlinable constants. It exists
+	// to enable cross-module inlining of these constants.
+	ConstValues map[ast.Ref]ConstValue
+
+	// Properties in here are represented as symbols instead of strings, which
+	// allows them to be renamed to smaller names.
+	MangledProps map[string]ast.Ref
+
+	// Properties in here are existing non-mangled properties in the source code
+	// and must not be used when generating mangled names to avoid a collision.
+	ReservedProps map[string]bool
+
+	// These are stored at the AST level instead of on individual AST nodes so
+	// they can be manipulated efficiently without a full AST traversal
+	ImportRecords []ast.ImportRecord
+
+	// These are used when bundling. They are filled in during the parser pass
+	// since we already have to traverse the AST then anyway and the parser pass
+	// is conveniently fully parallelized.
+	NamedImports            map[ast.Ref]NamedImport
+	NamedExports            map[string]NamedExport
+	ExportStarImportRecords []uint32
+
+	SourceMapComment logger.Span
+
+	// This is a list of ES6 features. They are ranges instead of booleans so
+	// that they can be used in log messages. Check to see if "Len > 0".
+	ExportKeyword            logger.Range // Does not include TypeScript-specific syntax
+	TopLevelAwaitKeyword     logger.Range
+	LiveTopLevelAwaitKeyword logger.Range // Excludes top-level await in dead branches
+
+	ExportsRef ast.Ref
+	ModuleRef  ast.Ref
+	WrapperRef ast.Ref
+
+	ApproximateLineCount  int32
+	NestedScopeSlotCounts ast.SlotCounts
+	HasLazyExport         bool
+
+	// This is a list of CommonJS features. When a file uses CommonJS features,
+	// it's not a candidate for "flat bundling" and must be wrapped in its own
+	// closure. Note that this also includes top-level "return" but these aren't
+	// here because only the parser checks those.
+	UsesExportsRef bool
+	UsesModuleRef  bool
+	ExportsKind    ExportsKind
+}
+
+type TSEnumValue struct {
+	String []uint16 // Use this if it's not nil
+	Number float64  // Use this if "String" is nil
+}
+
+type ConstValueKind uint8
+
+const (
+	ConstValueNone ConstValueKind = iota
+	ConstValueNull
+	ConstValueUndefined
+	ConstValueTrue
+	ConstValueFalse
+	ConstValueNumber
+)
+
+type ConstValue struct {
+	Number float64 // Use this for "ConstValueNumber"
+	Kind   ConstValueKind
+}
+
+func ExprToConstValue(expr Expr) ConstValue {
+	switch v := expr.Data.(type) {
+	case *ENull:
+		return ConstValue{Kind: ConstValueNull}
+
+	case *EUndefined:
+		return ConstValue{Kind: ConstValueUndefined}
+
+	case *EBoolean:
+		if v.Value {
+			return ConstValue{Kind: ConstValueTrue}
+		} else {
+			return ConstValue{Kind: ConstValueFalse}
+		}
+
+	case *ENumber:
+		// Inline integers and other small numbers. Don't inline large
+		// real numbers because people may not want them to be inlined
+		// as it will increase the minified code size by too much.
+		if asInt := int64(v.Value); v.Value == float64(asInt) || len(strconv.FormatFloat(v.Value, 'g', -1, 64)) <= 8 {
+			return ConstValue{Kind: ConstValueNumber, Number: v.Value}
+		}
+
+	case *EString:
+		// I'm deliberately not inlining strings here. It seems more likely that
+		// people won't want them to be inlined since they can be arbitrarily long.
+
+	case *EBigInt:
+		// I'm deliberately not inlining bigints here for the same reason (they can
+		// be arbitrarily long).
+	}
+
+	return ConstValue{}
+}
+
+func ConstValueToExpr(loc logger.Loc, value ConstValue) Expr {
+	switch value.Kind {
+	case ConstValueNull:
+		return Expr{Loc: loc, Data: ENullShared}
+
+	case ConstValueUndefined:
+		return Expr{Loc: loc, Data: EUndefinedShared}
+
+	case ConstValueTrue:
+		return Expr{Loc: loc, Data: &EBoolean{Value: true}}
+
+	case ConstValueFalse:
+		return Expr{Loc: loc, Data: &EBoolean{Value: false}}
+
+	case ConstValueNumber:
+		return Expr{Loc: loc, Data: &ENumber{Value: value.Number}}
+	}
+
+	panic("Internal error: invalid constant value")
+}
+
+type NamedImport struct {
+	Alias string
+
+	// Parts within this file that use this import
+	LocalPartsWithUses []uint32
+
+	AliasLoc          logger.Loc
+	NamespaceRef      ast.Ref
+	ImportRecordIndex uint32
+
+	// If true, the alias refers to the entire export namespace object of a
+	// module. This is no longer represented as an alias called "*" because of
+	// the upcoming "Arbitrary module namespace identifier names" feature:
+	// https://github.com/tc39/ecma262/pull/2154
+	AliasIsStar bool
+
+	// It's useful to flag exported imports because if they are in a TypeScript
+	// file, we can't tell if they are a type or a value.
+	IsExported bool
+}
+
+type NamedExport struct {
+	Ref      ast.Ref
+	AliasLoc logger.Loc
+}
+
+// Each file is made up of multiple parts, and each part consists of one or
+// more top-level statements. Parts are used for tree shaking and code
+// splitting analysis. Individual parts of a file can be discarded by tree
+// shaking and can be assigned to separate chunks (i.e. output files) by code
+// splitting.
+type Part struct {
+	Stmts  []Stmt
+	Scopes []*Scope
+
+	// Each is an index into the file-level import record list
+	ImportRecordIndices []uint32
+
+	// All symbols that are declared in this part. Note that a given symbol may
+	// have multiple declarations, and so may end up being declared in multiple
+	// parts (e.g. multiple "var" declarations with the same name). Also note
+	// that this list isn't deduplicated and may contain duplicates.
+	DeclaredSymbols []DeclaredSymbol
+
+	// An estimate of the number of uses of all symbols used within this part.
+	SymbolUses map[ast.Ref]SymbolUse
+
+	// An estimate of the number of uses of all symbols used as the target of
+	// function calls within this part.
+	SymbolCallUses map[ast.Ref]SymbolCallUse
+
+	// This tracks property accesses off of imported symbols. We don't know
+	// during parsing if an imported symbol is going to be an inlined enum
+	// value or not. This is only known during linking. So we defer adding
+	// a dependency on these imported symbols until we know whether the
+	// property access is an inlined enum value or not.
+	ImportSymbolPropertyUses map[ast.Ref]map[string]SymbolUse
+
+	// The indices of the other parts in this file that are needed if this part
+	// is needed.
+	Dependencies []Dependency
+
+	// If true, this part can be removed if none of the declared symbols are
+	// used. If the file containing this part is imported, then all parts that
+	// don't have this flag enabled must be included.
+	CanBeRemovedIfUnused bool
+
+	// This is used for generated parts that we don't want to be present if they
+	// aren't needed. This enables tree shaking for these parts even if global
+	// tree shaking isn't enabled.
+	ForceTreeShaking bool
+
+	// This is true if this file has been marked as live by the tree shaking
+	// algorithm.
+	IsLive bool
+}
+
+type Dependency struct {
+	SourceIndex uint32
+	PartIndex   uint32
+}
+
+type DeclaredSymbol struct {
+	Ref        ast.Ref
+	IsTopLevel bool
+}
+
+type SymbolUse struct {
+	CountEstimate uint32
+}
+
+type SymbolCallUse struct {
+	CallCountEstimate                   uint32
+	SingleArgNonSpreadCallCountEstimate uint32
+}
+
+// For readability, the names of certain automatically-generated symbols are
+// derived from the file name. For example, instead of the CommonJS wrapper for
+// a file being called something like "require273" it can be called something
+// like "require_react" instead. This function generates the part of these
+// identifiers that's specific to the file path. It can take both an absolute
+// path (OS-specific) and a path in the source code (OS-independent).
+//
+// Note that these generated names do not at all relate to the correctness of
+// the code as far as avoiding symbol name collisions. These names still go
+// through the renaming logic that all other symbols go through to avoid name
+// collisions.
+func GenerateNonUniqueNameFromPath(path string) string {
+	// Get the file name without the extension
+	dir, base, _ := logger.PlatformIndependentPathDirBaseExt(path)
+
+	// If the name is "index", use the directory name instead. This is because
+	// many packages in npm use the file name "index.js" because it triggers
+	// node's implicit module resolution rules that allows you to import it by
+	// just naming the directory.
+	if base == "index" {
+		_, dirBase, _ := logger.PlatformIndependentPathDirBaseExt(dir)
+		if dirBase != "" {
+			base = dirBase
+		}
+	}
+
+	return EnsureValidIdentifier(base)
+}
+
+func EnsureValidIdentifier(base string) string {
+	// Convert it to an ASCII identifier. Note: If you change this to a non-ASCII
+	// identifier, you're going to potentially cause trouble with non-BMP code
+	// points in target environments that don't support bracketed Unicode escapes.
+	bytes := []byte{}
+	needsGap := false
+	for _, c := range base {
+		if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || (len(bytes) > 0 && c >= '0' && c <= '9') {
+			if needsGap {
+				bytes = append(bytes, '_')
+				needsGap = false
+			}
+			bytes = append(bytes, byte(c))
+		} else if len(bytes) > 0 {
+			needsGap = true
+		}
+	}
+
+	// Make sure the name isn't empty
+	if len(bytes) == 0 {
+		return "_"
+	}
+	return string(bytes)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast_helpers.go b/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast_helpers.go
new file mode 100644
index 0000000..da78ea7
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ast_helpers.go
@@ -0,0 +1,2973 @@
+package js_ast
+
+import (
+	"math"
+	"strconv"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type HelperContext struct {
+	isUnbound func(ast.Ref) bool
+}
+
+func MakeHelperContext(isUnbound func(ast.Ref) bool) HelperContext {
+	return HelperContext{
+		isUnbound: isUnbound,
+	}
+}
+
+// If this returns true, then calling this expression captures the target of
+// the property access as "this" when calling the function in the property.
+func IsPropertyAccess(expr Expr) bool {
+	switch expr.Data.(type) {
+	case *EDot, *EIndex:
+		return true
+	}
+	return false
+}
+
+func IsOptionalChain(value Expr) bool {
+	switch e := value.Data.(type) {
+	case *EDot:
+		return e.OptionalChain != OptionalChainNone
+	case *EIndex:
+		return e.OptionalChain != OptionalChainNone
+	case *ECall:
+		return e.OptionalChain != OptionalChainNone
+	}
+	return false
+}
+
+func Assign(a Expr, b Expr) Expr {
+	return Expr{Loc: a.Loc, Data: &EBinary{Op: BinOpAssign, Left: a, Right: b}}
+}
+
+func AssignStmt(a Expr, b Expr) Stmt {
+	return Stmt{Loc: a.Loc, Data: &SExpr{Value: Assign(a, b)}}
+}
+
+// Wraps the provided expression in the "!" prefix operator. The expression
+// will potentially be simplified to avoid generating unnecessary extra "!"
+// operators. For example, calling this with "!!x" will return "!x" instead
+// of returning "!!!x".
+func Not(expr Expr) Expr {
+	if result, ok := MaybeSimplifyNot(expr); ok {
+		return result
+	}
+	return Expr{Loc: expr.Loc, Data: &EUnary{Op: UnOpNot, Value: expr}}
+}
+
+// The given "expr" argument should be the operand of a "!" prefix operator
+// (i.e. the "x" in "!x"). This returns a simplified expression for the
+// whole operator (i.e. the "!x") if it can be simplified, or false if not.
+// It's separate from "Not()" above to avoid allocation on failure in case
+// that is undesired.
+//
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func MaybeSimplifyNot(expr Expr) (Expr, bool) {
+	switch e := expr.Data.(type) {
+	case *EAnnotation:
+		return MaybeSimplifyNot(e.Value)
+
+	case *EInlinedEnum:
+		if value, ok := MaybeSimplifyNot(e.Value); ok {
+			return value, true
+		}
+
+	case *ENull, *EUndefined:
+		return Expr{Loc: expr.Loc, Data: &EBoolean{Value: true}}, true
+
+	case *EBoolean:
+		return Expr{Loc: expr.Loc, Data: &EBoolean{Value: !e.Value}}, true
+
+	case *ENumber:
+		return Expr{Loc: expr.Loc, Data: &EBoolean{Value: e.Value == 0 || math.IsNaN(e.Value)}}, true
+
+	case *EBigInt:
+		if equal, ok := CheckEqualityBigInt(e.Value, "0"); ok {
+			return Expr{Loc: expr.Loc, Data: &EBoolean{Value: equal}}, true
+		}
+
+	case *EString:
+		return Expr{Loc: expr.Loc, Data: &EBoolean{Value: len(e.Value) == 0}}, true
+
+	case *EFunction, *EArrow, *ERegExp:
+		return Expr{Loc: expr.Loc, Data: &EBoolean{Value: false}}, true
+
+	case *EUnary:
+		// "!!!a" => "!a"
+		if e.Op == UnOpNot && KnownPrimitiveType(e.Value.Data) == PrimitiveBoolean {
+			return e.Value, true
+		}
+
+	case *EBinary:
+		// Make sure that these transformations are all safe for special values.
+		// For example, "!(a < b)" is not the same as "a >= b" if a and/or b are
+		// NaN (or undefined, or null, or possibly other problem cases too).
+		switch e.Op {
+		case BinOpLooseEq:
+			// "!(a == b)" => "a != b"
+			return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpLooseNe, Left: e.Left, Right: e.Right}}, true
+
+		case BinOpLooseNe:
+			// "!(a != b)" => "a == b"
+			return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpLooseEq, Left: e.Left, Right: e.Right}}, true
+
+		case BinOpStrictEq:
+			// "!(a === b)" => "a !== b"
+			return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpStrictNe, Left: e.Left, Right: e.Right}}, true
+
+		case BinOpStrictNe:
+			// "!(a !== b)" => "a === b"
+			return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpStrictEq, Left: e.Left, Right: e.Right}}, true
+
+		case BinOpComma:
+			// "!(a, b)" => "a, !b"
+			return Expr{Loc: expr.Loc, Data: &EBinary{Op: BinOpComma, Left: e.Left, Right: Not(e.Right)}}, true
+		}
+	}
+
+	return Expr{}, false
+}
+
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func MaybeSimplifyEqualityComparison(loc logger.Loc, e *EBinary, unsupportedFeatures compat.JSFeature) (Expr, bool) {
+	value, primitive := e.Left, e.Right
+
+	// Detect when the primitive comes first and flip the order of our checks
+	if IsPrimitiveLiteral(value.Data) {
+		value, primitive = primitive, value
+	}
+
+	// "!x === true" => "!x"
+	// "!x === false" => "!!x"
+	// "!x !== true" => "!!x"
+	// "!x !== false" => "!x"
+	if boolean, ok := primitive.Data.(*EBoolean); ok && KnownPrimitiveType(value.Data) == PrimitiveBoolean {
+		if boolean.Value == (e.Op == BinOpLooseNe || e.Op == BinOpStrictNe) {
+			return Not(value), true
+		} else {
+			return value, true
+		}
+	}
+
+	// "typeof x != 'undefined'" => "typeof x < 'u'"
+	// "typeof x == 'undefined'" => "typeof x > 'u'"
+	if !unsupportedFeatures.Has(compat.TypeofExoticObjectIsObject) {
+		// Only do this optimization if we know that the "typeof" operator won't
+		// return something random. The only case of this happening was Internet
+		// Explorer returning "unknown" for some objects, which messes with this
+		// optimization. So we don't do this when targeting Internet Explorer.
+		if typeof, ok := value.Data.(*EUnary); ok && typeof.Op == UnOpTypeof {
+			if str, ok := primitive.Data.(*EString); ok && helpers.UTF16EqualsString(str.Value, "undefined") {
+				flip := value == e.Right
+				op := BinOpLt
+				if (e.Op == BinOpLooseEq || e.Op == BinOpStrictEq) != flip {
+					op = BinOpGt
+				}
+				primitive.Data = &EString{Value: []uint16{'u'}}
+				if flip {
+					value, primitive = primitive, value
+				}
+				return Expr{Loc: loc, Data: &EBinary{Op: op, Left: value, Right: primitive}}, true
+			}
+		}
+	}
+
+	return Expr{}, false
+}
+
+func IsSymbolInstance(data E) bool {
+	switch e := data.(type) {
+	case *EDot:
+		return e.IsSymbolInstance
+
+	case *EIndex:
+		return e.IsSymbolInstance
+	}
+	return false
+}
+
+func IsPrimitiveLiteral(data E) bool {
+	switch e := data.(type) {
+	case *EAnnotation:
+		return IsPrimitiveLiteral(e.Value.Data)
+
+	case *EInlinedEnum:
+		return IsPrimitiveLiteral(e.Value.Data)
+
+	case *ENull, *EUndefined, *EString, *EBoolean, *ENumber, *EBigInt:
+		return true
+	}
+	return false
+}
+
+type PrimitiveType uint8
+
+const (
+	PrimitiveUnknown PrimitiveType = iota
+	PrimitiveMixed
+	PrimitiveNull
+	PrimitiveUndefined
+	PrimitiveBoolean
+	PrimitiveNumber
+	PrimitiveString
+	PrimitiveBigInt
+)
+
+// This can be used when the returned type is either one or the other
+func MergedKnownPrimitiveTypes(a Expr, b Expr) PrimitiveType {
+	x := KnownPrimitiveType(a.Data)
+	if x == PrimitiveUnknown {
+		return PrimitiveUnknown
+	}
+
+	y := KnownPrimitiveType(b.Data)
+	if y == PrimitiveUnknown {
+		return PrimitiveUnknown
+	}
+
+	if x == y {
+		return x
+	}
+	return PrimitiveMixed // Definitely some kind of primitive
+}
+
+// Note: This function does not say whether the expression is side-effect free
+// or not. For example, the expression "++x" always returns a primitive.
+func KnownPrimitiveType(expr E) PrimitiveType {
+	switch e := expr.(type) {
+	case *EAnnotation:
+		return KnownPrimitiveType(e.Value.Data)
+
+	case *EInlinedEnum:
+		return KnownPrimitiveType(e.Value.Data)
+
+	case *ENull:
+		return PrimitiveNull
+
+	case *EUndefined:
+		return PrimitiveUndefined
+
+	case *EBoolean:
+		return PrimitiveBoolean
+
+	case *ENumber:
+		return PrimitiveNumber
+
+	case *EString:
+		return PrimitiveString
+
+	case *EBigInt:
+		return PrimitiveBigInt
+
+	case *ETemplate:
+		if e.TagOrNil.Data == nil {
+			return PrimitiveString
+		}
+
+	case *EIf:
+		return MergedKnownPrimitiveTypes(e.Yes, e.No)
+
+	case *EUnary:
+		switch e.Op {
+		case UnOpVoid:
+			return PrimitiveUndefined
+
+		case UnOpTypeof:
+			return PrimitiveString
+
+		case UnOpNot, UnOpDelete:
+			return PrimitiveBoolean
+
+		case UnOpPos:
+			return PrimitiveNumber // Cannot be bigint because that throws an exception
+
+		case UnOpNeg, UnOpCpl:
+			value := KnownPrimitiveType(e.Value.Data)
+			if value == PrimitiveBigInt {
+				return PrimitiveBigInt
+			}
+			if value != PrimitiveUnknown && value != PrimitiveMixed {
+				return PrimitiveNumber
+			}
+			return PrimitiveMixed // Can be number or bigint
+
+		case UnOpPreDec, UnOpPreInc, UnOpPostDec, UnOpPostInc:
+			return PrimitiveMixed // Can be number or bigint
+		}
+
+	case *EBinary:
+		switch e.Op {
+		case BinOpStrictEq, BinOpStrictNe, BinOpLooseEq, BinOpLooseNe,
+			BinOpLt, BinOpGt, BinOpLe, BinOpGe,
+			BinOpInstanceof, BinOpIn:
+			return PrimitiveBoolean
+
+		case BinOpLogicalOr, BinOpLogicalAnd:
+			return MergedKnownPrimitiveTypes(e.Left, e.Right)
+
+		case BinOpNullishCoalescing:
+			left := KnownPrimitiveType(e.Left.Data)
+			right := KnownPrimitiveType(e.Right.Data)
+			if left == PrimitiveNull || left == PrimitiveUndefined {
+				return right
+			}
+			if left != PrimitiveUnknown {
+				if left != PrimitiveMixed {
+					return left // Definitely not null or undefined
+				}
+				if right != PrimitiveUnknown {
+					return PrimitiveMixed // Definitely some kind of primitive
+				}
+			}
+
+		case BinOpAdd:
+			left := KnownPrimitiveType(e.Left.Data)
+			right := KnownPrimitiveType(e.Right.Data)
+			if left == PrimitiveString || right == PrimitiveString {
+				return PrimitiveString
+			}
+			if left == PrimitiveBigInt && right == PrimitiveBigInt {
+				return PrimitiveBigInt
+			}
+			if left != PrimitiveUnknown && left != PrimitiveMixed && left != PrimitiveBigInt &&
+				right != PrimitiveUnknown && right != PrimitiveMixed && right != PrimitiveBigInt {
+				return PrimitiveNumber
+			}
+			return PrimitiveMixed // Can be number or bigint or string (or an exception)
+
+		case BinOpAddAssign:
+			right := KnownPrimitiveType(e.Right.Data)
+			if right == PrimitiveString {
+				return PrimitiveString
+			}
+			return PrimitiveMixed // Can be number or bigint or string (or an exception)
+
+		case
+			BinOpSub, BinOpSubAssign,
+			BinOpMul, BinOpMulAssign,
+			BinOpDiv, BinOpDivAssign,
+			BinOpRem, BinOpRemAssign,
+			BinOpPow, BinOpPowAssign,
+			BinOpBitwiseAnd, BinOpBitwiseAndAssign,
+			BinOpBitwiseOr, BinOpBitwiseOrAssign,
+			BinOpBitwiseXor, BinOpBitwiseXorAssign,
+			BinOpShl, BinOpShlAssign,
+			BinOpShr, BinOpShrAssign,
+			BinOpUShr, BinOpUShrAssign:
+			return PrimitiveMixed // Can be number or bigint (or an exception)
+
+		case BinOpAssign, BinOpComma:
+			return KnownPrimitiveType(e.Right.Data)
+		}
+	}
+
+	return PrimitiveUnknown
+}
+
+func CanChangeStrictToLoose(a Expr, b Expr) bool {
+	x := KnownPrimitiveType(a.Data)
+	y := KnownPrimitiveType(b.Data)
+	return x == y && x != PrimitiveUnknown && x != PrimitiveMixed
+}
+
+// Returns true if the result of the "typeof" operator on this expression is
+// statically determined and this expression has no side effects (i.e. can be
+// removed without consequence).
+func TypeofWithoutSideEffects(data E) (string, bool) {
+	switch e := data.(type) {
+	case *EAnnotation:
+		if e.Flags.Has(CanBeRemovedIfUnusedFlag) {
+			return TypeofWithoutSideEffects(e.Value.Data)
+		}
+
+	case *EInlinedEnum:
+		return TypeofWithoutSideEffects(e.Value.Data)
+
+	case *ENull:
+		return "object", true
+
+	case *EUndefined:
+		return "undefined", true
+
+	case *EBoolean:
+		return "boolean", true
+
+	case *ENumber:
+		return "number", true
+
+	case *EBigInt:
+		return "bigint", true
+
+	case *EString:
+		return "string", true
+
+	case *EFunction, *EArrow:
+		return "function", true
+	}
+
+	return "", false
+}
+
+// The goal of this function is to "rotate" the AST if it's possible to use the
+// left-associative property of the operator to avoid unnecessary parentheses.
+//
+// When using this, make absolutely sure that the operator is actually
+// associative. For example, the "+" operator is not associative for
+// floating-point numbers.
+//
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func JoinWithLeftAssociativeOp(op OpCode, a Expr, b Expr) Expr {
+	// "(a, b) op c" => "a, b op c"
+	if comma, ok := a.Data.(*EBinary); ok && comma.Op == BinOpComma {
+		// Don't mutate the original AST
+		clone := *comma
+		clone.Right = JoinWithLeftAssociativeOp(op, clone.Right, b)
+		return Expr{Loc: a.Loc, Data: &clone}
+	}
+
+	// "a op (b op c)" => "(a op b) op c"
+	// "a op (b op (c op d))" => "((a op b) op c) op d"
+	for {
+		if binary, ok := b.Data.(*EBinary); ok && binary.Op == op {
+			a = JoinWithLeftAssociativeOp(op, a, binary.Left)
+			b = binary.Right
+		} else {
+			break
+		}
+	}
+
+	// "a op b" => "a op b"
+	// "(a op b) op c" => "(a op b) op c"
+	return Expr{Loc: a.Loc, Data: &EBinary{Op: op, Left: a, Right: b}}
+}
+
+func JoinWithComma(a Expr, b Expr) Expr {
+	if a.Data == nil {
+		return b
+	}
+	if b.Data == nil {
+		return a
+	}
+	return Expr{Loc: a.Loc, Data: &EBinary{Op: BinOpComma, Left: a, Right: b}}
+}
+
+func JoinAllWithComma(all []Expr) (result Expr) {
+	for _, value := range all {
+		result = JoinWithComma(result, value)
+	}
+	return
+}
+
+func ConvertBindingToExpr(binding Binding, wrapIdentifier func(logger.Loc, ast.Ref) Expr) Expr {
+	loc := binding.Loc
+
+	switch b := binding.Data.(type) {
+	case *BMissing:
+		return Expr{Loc: loc, Data: &EMissing{}}
+
+	case *BIdentifier:
+		if wrapIdentifier != nil {
+			return wrapIdentifier(loc, b.Ref)
+		}
+		return Expr{Loc: loc, Data: &EIdentifier{Ref: b.Ref}}
+
+	case *BArray:
+		exprs := make([]Expr, len(b.Items))
+		for i, item := range b.Items {
+			expr := ConvertBindingToExpr(item.Binding, wrapIdentifier)
+			if b.HasSpread && i+1 == len(b.Items) {
+				expr = Expr{Loc: expr.Loc, Data: &ESpread{Value: expr}}
+			} else if item.DefaultValueOrNil.Data != nil {
+				expr = Assign(expr, item.DefaultValueOrNil)
+			}
+			exprs[i] = expr
+		}
+		return Expr{Loc: loc, Data: &EArray{
+			Items:        exprs,
+			IsSingleLine: b.IsSingleLine,
+		}}
+
+	case *BObject:
+		properties := make([]Property, len(b.Properties))
+		for i, property := range b.Properties {
+			value := ConvertBindingToExpr(property.Value, wrapIdentifier)
+			kind := PropertyField
+			if property.IsSpread {
+				kind = PropertySpread
+			}
+			var flags PropertyFlags
+			if property.IsComputed {
+				flags |= PropertyIsComputed
+			}
+			properties[i] = Property{
+				Kind:             kind,
+				Flags:            flags,
+				Key:              property.Key,
+				ValueOrNil:       value,
+				InitializerOrNil: property.DefaultValueOrNil,
+			}
+		}
+		return Expr{Loc: loc, Data: &EObject{
+			Properties:   properties,
+			IsSingleLine: b.IsSingleLine,
+		}}
+
+	default:
+		panic("Internal error")
+	}
+}
+
+// This will return a nil expression if the expression can be totally removed.
+//
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func (ctx HelperContext) SimplifyUnusedExpr(expr Expr, unsupportedFeatures compat.JSFeature) Expr {
+	switch e := expr.Data.(type) {
+	case *EAnnotation:
+		if e.Flags.Has(CanBeRemovedIfUnusedFlag) {
+			return Expr{}
+		}
+
+	case *EInlinedEnum:
+		return ctx.SimplifyUnusedExpr(e.Value, unsupportedFeatures)
+
+	case *ENull, *EUndefined, *EMissing, *EBoolean, *ENumber, *EBigInt,
+		*EString, *EThis, *ERegExp, *EFunction, *EArrow, *EImportMeta:
+		return Expr{}
+
+	case *EDot:
+		if e.CanBeRemovedIfUnused {
+			return Expr{}
+		}
+
+	case *EIdentifier:
+		if e.MustKeepDueToWithStmt {
+			break
+		}
+		if e.CanBeRemovedIfUnused || !ctx.isUnbound(e.Ref) {
+			return Expr{}
+		}
+
+	case *ETemplate:
+		if e.TagOrNil.Data == nil {
+			var comma Expr
+			var templateLoc logger.Loc
+			var template *ETemplate
+			for _, part := range e.Parts {
+				// If we know this value is some kind of primitive, then we know that
+				// "ToString" has no side effects and can be avoided.
+				if KnownPrimitiveType(part.Value.Data) != PrimitiveUnknown {
+					if template != nil {
+						comma = JoinWithComma(comma, Expr{Loc: templateLoc, Data: template})
+						template = nil
+					}
+					comma = JoinWithComma(comma, ctx.SimplifyUnusedExpr(part.Value, unsupportedFeatures))
+					continue
+				}
+
+				// Make sure "ToString" is still evaluated on the value. We can't use
+				// string addition here because that may evaluate "ValueOf" instead.
+				if template == nil {
+					template = &ETemplate{}
+					templateLoc = part.Value.Loc
+				}
+				template.Parts = append(template.Parts, TemplatePart{Value: part.Value})
+			}
+			if template != nil {
+				comma = JoinWithComma(comma, Expr{Loc: templateLoc, Data: template})
+			}
+			return comma
+		} else if e.CanBeUnwrappedIfUnused {
+			// If the function call was annotated as being able to be removed if the
+			// result is unused, then we can remove it and just keep the arguments.
+			// Note that there are no implicit "ToString" operations for tagged
+			// template literals.
+			var comma Expr
+			for _, part := range e.Parts {
+				comma = JoinWithComma(comma, ctx.SimplifyUnusedExpr(part.Value, unsupportedFeatures))
+			}
+			return comma
+		}
+
+	case *EArray:
+		// Arrays with "..." spread expressions can't be unwrapped because the
+		// "..." triggers code evaluation via iterators. In that case, just trim
+		// the other items instead and leave the array expression there.
+		for _, spread := range e.Items {
+			if _, ok := spread.Data.(*ESpread); ok {
+				items := make([]Expr, 0, len(e.Items))
+				for _, item := range e.Items {
+					item = ctx.SimplifyUnusedExpr(item, unsupportedFeatures)
+					if item.Data != nil {
+						items = append(items, item)
+					}
+				}
+
+				// Don't mutate the original AST
+				clone := *e
+				clone.Items = items
+				return Expr{Loc: expr.Loc, Data: &clone}
+			}
+		}
+
+		// Otherwise, the array can be completely removed. We only need to keep any
+		// array items with side effects. Apply this simplification recursively.
+		var result Expr
+		for _, item := range e.Items {
+			result = JoinWithComma(result, ctx.SimplifyUnusedExpr(item, unsupportedFeatures))
+		}
+		return result
+
+	case *EObject:
+		// Objects with "..." spread expressions can't be unwrapped because the
+		// "..." triggers code evaluation via getters. In that case, just trim
+		// the other items instead and leave the object expression there.
+		for _, spread := range e.Properties {
+			if spread.Kind == PropertySpread {
+				properties := make([]Property, 0, len(e.Properties))
+				for _, property := range e.Properties {
+					// Spread properties must always be evaluated
+					if property.Kind != PropertySpread {
+						value := ctx.SimplifyUnusedExpr(property.ValueOrNil, unsupportedFeatures)
+						if value.Data != nil {
+							// Keep the value
+							property.ValueOrNil = value
+						} else if !property.Flags.Has(PropertyIsComputed) {
+							// Skip this property if the key doesn't need to be computed
+							continue
+						} else {
+							// Replace values without side effects with "0" because it's short
+							property.ValueOrNil.Data = &ENumber{}
+						}
+					}
+					properties = append(properties, property)
+				}
+
+				// Don't mutate the original AST
+				clone := *e
+				clone.Properties = properties
+				return Expr{Loc: expr.Loc, Data: &clone}
+			}
+		}
+
+		// Otherwise, the object can be completely removed. We only need to keep any
+		// object properties with side effects. Apply this simplification recursively.
+		var result Expr
+		for _, property := range e.Properties {
+			if property.Flags.Has(PropertyIsComputed) {
+				// Make sure "ToString" is still evaluated on the key
+				result = JoinWithComma(result, Expr{Loc: property.Key.Loc, Data: &EBinary{
+					Op:    BinOpAdd,
+					Left:  property.Key,
+					Right: Expr{Loc: property.Key.Loc, Data: &EString{}},
+				}})
+			}
+			result = JoinWithComma(result, ctx.SimplifyUnusedExpr(property.ValueOrNil, unsupportedFeatures))
+		}
+		return result
+
+	case *EIf:
+		yes := ctx.SimplifyUnusedExpr(e.Yes, unsupportedFeatures)
+		no := ctx.SimplifyUnusedExpr(e.No, unsupportedFeatures)
+
+		// "foo() ? 1 : 2" => "foo()"
+		if yes.Data == nil && no.Data == nil {
+			return ctx.SimplifyUnusedExpr(e.Test, unsupportedFeatures)
+		}
+
+		// "foo() ? 1 : bar()" => "foo() || bar()"
+		if yes.Data == nil {
+			return JoinWithLeftAssociativeOp(BinOpLogicalOr, e.Test, no)
+		}
+
+		// "foo() ? bar() : 2" => "foo() && bar()"
+		if no.Data == nil {
+			return JoinWithLeftAssociativeOp(BinOpLogicalAnd, e.Test, yes)
+		}
+
+		if yes != e.Yes || no != e.No {
+			return Expr{Loc: expr.Loc, Data: &EIf{Test: e.Test, Yes: yes, No: no}}
+		}
+
+	case *EUnary:
+		switch e.Op {
+		// These operators must not have any type conversions that can execute code
+		// such as "toString" or "valueOf". They must also never throw any exceptions.
+		case UnOpVoid, UnOpNot:
+			return ctx.SimplifyUnusedExpr(e.Value, unsupportedFeatures)
+
+		case UnOpTypeof:
+			if _, ok := e.Value.Data.(*EIdentifier); ok && e.WasOriginallyTypeofIdentifier {
+				// "typeof x" must not be transformed into if "x" since doing so could
+				// cause an exception to be thrown. Instead we can just remove it since
+				// "typeof x" is special-cased in the standard to never throw.
+				return Expr{}
+			}
+			return ctx.SimplifyUnusedExpr(e.Value, unsupportedFeatures)
+		}
+
+	case *EBinary:
+		left := e.Left
+		right := e.Right
+
+		switch e.Op {
+		// These operators must not have any type conversions that can execute code
+		// such as "toString" or "valueOf". They must also never throw any exceptions.
+		case BinOpStrictEq, BinOpStrictNe, BinOpComma:
+			return JoinWithComma(ctx.SimplifyUnusedExpr(left, unsupportedFeatures), ctx.SimplifyUnusedExpr(right, unsupportedFeatures))
+
+		// We can simplify "==" and "!=" even though they can call "toString" and/or
+		// "valueOf" if we can statically determine that the types of both sides are
+		// primitives. In that case there won't be any chance for user-defined
+		// "toString" and/or "valueOf" to be called.
+		case BinOpLooseEq, BinOpLooseNe:
+			if MergedKnownPrimitiveTypes(left, right) != PrimitiveUnknown {
+				return JoinWithComma(ctx.SimplifyUnusedExpr(left, unsupportedFeatures), ctx.SimplifyUnusedExpr(right, unsupportedFeatures))
+			}
+
+		case BinOpLogicalAnd, BinOpLogicalOr, BinOpNullishCoalescing:
+			// If this is a boolean logical operation and the result is unused, then
+			// we know the left operand will only be used for its boolean value and
+			// can be simplified under that assumption
+			if e.Op != BinOpNullishCoalescing {
+				left = ctx.SimplifyBooleanExpr(left)
+			}
+
+			// Preserve short-circuit behavior: the left expression is only unused if
+			// the right expression can be completely removed. Otherwise, the left
+			// expression is important for the branch.
+			right = ctx.SimplifyUnusedExpr(right, unsupportedFeatures)
+			if right.Data == nil {
+				return ctx.SimplifyUnusedExpr(left, unsupportedFeatures)
+			}
+
+			// Try to take advantage of the optional chain operator to shorten code
+			if !unsupportedFeatures.Has(compat.OptionalChain) {
+				if binary, ok := left.Data.(*EBinary); ok {
+					// "a != null && a.b()" => "a?.b()"
+					// "a == null || a.b()" => "a?.b()"
+					if (binary.Op == BinOpLooseNe && e.Op == BinOpLogicalAnd) || (binary.Op == BinOpLooseEq && e.Op == BinOpLogicalOr) {
+						var test Expr
+						if _, ok := binary.Right.Data.(*ENull); ok {
+							test = binary.Left
+						} else if _, ok := binary.Left.Data.(*ENull); ok {
+							test = binary.Right
+						}
+
+						// Note: Technically unbound identifiers can refer to a getter on
+						// the global object and that getter can have side effects that can
+						// be observed if we run that getter once instead of twice. But this
+						// seems like terrible coding practice and very unlikely to come up
+						// in real software, so we deliberately ignore this possibility and
+						// optimize for size instead of for this obscure edge case.
+						//
+						// If this is ever changed, then we must also pessimize the lowering
+						// of "foo?.bar" to save the value of "foo" to ensure that it's only
+						// evaluated once. Specifically "foo?.bar" would have to expand to:
+						//
+						//   var _a;
+						//   (_a = foo) == null ? void 0 : _a.bar;
+						//
+						// instead of:
+						//
+						//   foo == null ? void 0 : foo.bar;
+						//
+						// Babel does the first one while TypeScript does the second one.
+						// Since TypeScript doesn't handle this extreme edge case and
+						// TypeScript is very widely used, I think it's fine for us to not
+						// handle this edge case either.
+						if id, ok := test.Data.(*EIdentifier); ok && !id.MustKeepDueToWithStmt && TryToInsertOptionalChain(test, right) {
+							return right
+						}
+					}
+				}
+			}
+
+		case BinOpAdd:
+			if result, isStringAddition := simplifyUnusedStringAdditionChain(expr); isStringAddition {
+				return result
+			}
+		}
+
+		if left != e.Left || right != e.Right {
+			return Expr{Loc: expr.Loc, Data: &EBinary{Op: e.Op, Left: left, Right: right}}
+		}
+
+	case *ECall:
+		// A call that has been marked "__PURE__" can be removed if all arguments
+		// can be removed. The annotation causes us to ignore the target.
+		if e.CanBeUnwrappedIfUnused {
+			var result Expr
+			for _, arg := range e.Args {
+				if _, ok := arg.Data.(*ESpread); ok {
+					arg.Data = &EArray{Items: []Expr{arg}, IsSingleLine: true}
+				}
+				result = JoinWithComma(result, ctx.SimplifyUnusedExpr(arg, unsupportedFeatures))
+			}
+			return result
+		}
+
+		// Attempt to shorten IIFEs
+		if len(e.Args) == 0 {
+			switch target := e.Target.Data.(type) {
+			case *EFunction:
+				if len(target.Fn.Args) != 0 {
+					break
+				}
+
+				// Just delete "(function() {})()" completely
+				if len(target.Fn.Body.Block.Stmts) == 0 {
+					return Expr{}
+				}
+
+			case *EArrow:
+				if len(target.Args) != 0 {
+					break
+				}
+
+				// Just delete "(() => {})()" completely
+				if len(target.Body.Block.Stmts) == 0 {
+					return Expr{}
+				}
+
+				if len(target.Body.Block.Stmts) == 1 {
+					switch s := target.Body.Block.Stmts[0].Data.(type) {
+					case *SExpr:
+						if !target.IsAsync {
+							// Replace "(() => { foo() })()" with "foo()"
+							return s.Value
+						} else {
+							// Replace "(async () => { foo() })()" with "(async () => foo())()"
+							clone := *target
+							clone.Body.Block.Stmts[0].Data = &SReturn{ValueOrNil: s.Value}
+							clone.PreferExpr = true
+							return Expr{Loc: expr.Loc, Data: &ECall{Target: Expr{Loc: e.Target.Loc, Data: &clone}}}
+						}
+
+					case *SReturn:
+						if !target.IsAsync {
+							// Replace "(() => foo())()" with "foo()"
+							return s.ValueOrNil
+						}
+					}
+				}
+			}
+		}
+
+	case *ENew:
+		// A constructor call that has been marked "__PURE__" can be removed if all
+		// arguments can be removed. The annotation causes us to ignore the target.
+		if e.CanBeUnwrappedIfUnused {
+			var result Expr
+			for _, arg := range e.Args {
+				if _, ok := arg.Data.(*ESpread); ok {
+					arg.Data = &EArray{Items: []Expr{arg}, IsSingleLine: true}
+				}
+				result = JoinWithComma(result, ctx.SimplifyUnusedExpr(arg, unsupportedFeatures))
+			}
+			return result
+		}
+	}
+
+	return expr
+}
+
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func simplifyUnusedStringAdditionChain(expr Expr) (Expr, bool) {
+	switch e := expr.Data.(type) {
+	case *EString:
+		// "'x' + y" => "'' + y"
+		return Expr{Loc: expr.Loc, Data: &EString{}}, true
+
+	case *EBinary:
+		if e.Op == BinOpAdd {
+			left, leftIsStringAddition := simplifyUnusedStringAdditionChain(e.Left)
+
+			if right, rightIsString := e.Right.Data.(*EString); rightIsString {
+				// "('' + x) + 'y'" => "'' + x"
+				if leftIsStringAddition {
+					return left, true
+				}
+
+				// "x + 'y'" => "x + ''"
+				if !leftIsStringAddition && len(right.Value) > 0 {
+					return Expr{Loc: expr.Loc, Data: &EBinary{
+						Op:    BinOpAdd,
+						Left:  left,
+						Right: Expr{Loc: e.Right.Loc, Data: &EString{}},
+					}}, true
+				}
+			}
+
+			// Don't mutate the original AST
+			if left != e.Left {
+				expr.Data = &EBinary{Op: BinOpAdd, Left: left, Right: e.Right}
+			}
+
+			return expr, leftIsStringAddition
+		}
+	}
+
+	return expr, false
+}
+
+func ToInt32(f float64) int32 {
+	// The easy way
+	i := int32(f)
+	if float64(i) == f {
+		return i
+	}
+
+	// Special-case non-finite numbers (casting them is unspecified behavior in Go)
+	if math.IsNaN(f) || math.IsInf(f, 0) {
+		return 0
+	}
+
+	// The hard way
+	i = int32(uint32(math.Mod(math.Abs(f), 4294967296)))
+	if math.Signbit(f) {
+		return -i
+	}
+	return i
+}
+
+func ToUint32(f float64) uint32 {
+	return uint32(ToInt32(f))
+}
+
+func isInt32OrUint32(data E) bool {
+	switch e := data.(type) {
+	case *EUnary:
+		return e.Op == UnOpCpl
+
+	case *EBinary:
+		switch e.Op {
+		case BinOpBitwiseAnd, BinOpBitwiseOr, BinOpBitwiseXor, BinOpShl, BinOpShr, BinOpUShr:
+			return true
+
+		case BinOpLogicalOr, BinOpLogicalAnd:
+			return isInt32OrUint32(e.Left.Data) && isInt32OrUint32(e.Right.Data)
+		}
+
+	case *EIf:
+		return isInt32OrUint32(e.Yes.Data) && isInt32OrUint32(e.No.Data)
+	}
+	return false
+}
+
+func ToNumberWithoutSideEffects(data E) (float64, bool) {
+	switch e := data.(type) {
+	case *EAnnotation:
+		return ToNumberWithoutSideEffects(e.Value.Data)
+
+	case *EInlinedEnum:
+		return ToNumberWithoutSideEffects(e.Value.Data)
+
+	case *ENull:
+		return 0, true
+
+	case *EUndefined, *ERegExp:
+		return math.NaN(), true
+
+	case *EArray:
+		if len(e.Items) == 0 {
+			// "+[]" => "0"
+			return 0, true
+		}
+
+	case *EObject:
+		if len(e.Properties) == 0 {
+			// "+{}" => "NaN"
+			return math.NaN(), true
+		}
+
+	case *EBoolean:
+		if e.Value {
+			return 1, true
+		} else {
+			return 0, true
+		}
+
+	case *ENumber:
+		return e.Value, true
+
+	case *EString:
+		// "+''" => "0"
+		if len(e.Value) == 0 {
+			return 0, true
+		}
+
+		// "+'1'" => "1"
+		if num, ok := StringToEquivalentNumberValue(e.Value); ok {
+			return num, true
+		}
+	}
+
+	return 0, false
+}
+
+func ToStringWithoutSideEffects(data E) (string, bool) {
+	switch e := data.(type) {
+	case *ENull:
+		return "null", true
+
+	case *EUndefined:
+		return "undefined", true
+
+	case *EBoolean:
+		if e.Value {
+			return "true", true
+		} else {
+			return "false", true
+		}
+
+	case *EBigInt:
+		// Only do this if there is no radix
+		if len(e.Value) < 2 || e.Value[0] != '0' {
+			return e.Value, true
+		}
+
+	case *ENumber:
+		if str, ok := TryToStringOnNumberSafely(e.Value, 10); ok {
+			return str, true
+		}
+
+	case *ERegExp:
+		return e.Value, true
+
+	case *EDot:
+		// This is dumb but some JavaScript obfuscators use this to generate string literals
+		if e.Name == "constructor" {
+			switch e.Target.Data.(type) {
+			case *EString:
+				return "function String() { [native code] }", true
+
+			case *ERegExp:
+				return "function RegExp() { [native code] }", true
+			}
+		}
+	}
+
+	return "", false
+}
+
+func extractNumericValue(data E) (float64, bool) {
+	switch e := data.(type) {
+	case *EAnnotation:
+		return extractNumericValue(e.Value.Data)
+
+	case *EInlinedEnum:
+		return extractNumericValue(e.Value.Data)
+
+	case *ENumber:
+		return e.Value, true
+	}
+
+	return 0, false
+}
+
+func extractNumericValues(left Expr, right Expr) (float64, float64, bool) {
+	if a, ok := extractNumericValue(left.Data); ok {
+		if b, ok := extractNumericValue(right.Data); ok {
+			return a, b, true
+		}
+	}
+	return 0, 0, false
+}
+
+func extractStringValue(data E) ([]uint16, bool) {
+	switch e := data.(type) {
+	case *EAnnotation:
+		return extractStringValue(e.Value.Data)
+
+	case *EInlinedEnum:
+		return extractStringValue(e.Value.Data)
+
+	case *EString:
+		return e.Value, true
+	}
+
+	return nil, false
+}
+
+func extractStringValues(left Expr, right Expr) ([]uint16, []uint16, bool) {
+	if a, ok := extractStringValue(left.Data); ok {
+		if b, ok := extractStringValue(right.Data); ok {
+			return a, b, true
+		}
+	}
+	return nil, nil, false
+}
+
+func stringCompareUCS2(a []uint16, b []uint16) int {
+	var n int
+	if len(a) < len(b) {
+		n = len(a)
+	} else {
+		n = len(b)
+	}
+	for i := 0; i < n; i++ {
+		if delta := int(a[i]) - int(b[i]); delta != 0 {
+			return delta
+		}
+	}
+	return len(a) - len(b)
+}
+
+func approximatePrintedIntCharCount(intValue float64) int {
+	count := 1 + (int)(math.Max(0, math.Floor(math.Log10(math.Abs(intValue)))))
+	if intValue < 0 {
+		count++
+	}
+	return count
+}
+
+func ShouldFoldBinaryOperatorWhenMinifying(binary *EBinary) bool {
+	switch binary.Op {
+	case
+		// Equality tests should always result in smaller code when folded
+		BinOpLooseEq,
+		BinOpLooseNe,
+		BinOpStrictEq,
+		BinOpStrictNe,
+
+		// Minification always folds right signed shift operations since they are
+		// unlikely to result in larger output. Note: ">>>" could result in
+		// bigger output such as "-1 >>> 0" becoming "4294967295".
+		BinOpShr,
+
+		// Minification always folds the following bitwise operations since they
+		// are unlikely to result in larger output.
+		BinOpBitwiseAnd,
+		BinOpBitwiseOr,
+		BinOpBitwiseXor,
+		BinOpLt,
+		BinOpGt,
+		BinOpLe,
+		BinOpGe:
+		return true
+
+	case BinOpAdd:
+		// Addition of small-ish integers can definitely be folded without issues
+		// "1 + 2" => "3"
+		if left, right, ok := extractNumericValues(binary.Left, binary.Right); ok &&
+			left == math.Trunc(left) && math.Abs(left) <= 0xFFFF_FFFF &&
+			right == math.Trunc(right) && math.Abs(right) <= 0xFFFF_FFFF {
+			return true
+		}
+
+		// String addition should pretty much always be more compact when folded
+		if _, _, ok := extractStringValues(binary.Left, binary.Right); ok {
+			return true
+		}
+
+	case BinOpSub:
+		// Subtraction of small-ish integers can definitely be folded without issues
+		// "3 - 1" => "2"
+		if left, right, ok := extractNumericValues(binary.Left, binary.Right); ok &&
+			left == math.Trunc(left) && math.Abs(left) <= 0xFFFF_FFFF &&
+			right == math.Trunc(right) && math.Abs(right) <= 0xFFFF_FFFF {
+			return true
+		}
+
+	case BinOpDiv:
+		// "0/0" => "NaN"
+		// "1/0" => "Infinity"
+		// "1/-0" => "-Infinity"
+		if _, right, ok := extractNumericValues(binary.Left, binary.Right); ok && right == 0 {
+			return true
+		}
+
+	case BinOpShl:
+		// "1 << 3" => "8"
+		// "1 << 24" => "1 << 24" (since "1<<24" is shorter than "16777216")
+		if left, right, ok := extractNumericValues(binary.Left, binary.Right); ok {
+			leftLen := approximatePrintedIntCharCount(left)
+			rightLen := approximatePrintedIntCharCount(right)
+			resultLen := approximatePrintedIntCharCount(float64(ToInt32(left) << (ToUint32(right) & 31)))
+			return resultLen <= leftLen+2+rightLen
+		}
+
+	case BinOpUShr:
+		// "10 >>> 1" => "5"
+		// "-1 >>> 0" => "-1 >>> 0" (since "-1>>>0" is shorter than "4294967295")
+		if left, right, ok := extractNumericValues(binary.Left, binary.Right); ok {
+			leftLen := approximatePrintedIntCharCount(left)
+			rightLen := approximatePrintedIntCharCount(right)
+			resultLen := approximatePrintedIntCharCount(float64(ToUint32(left) >> (ToUint32(right) & 31)))
+			return resultLen <= leftLen+3+rightLen
+		}
+
+	case BinOpLogicalAnd, BinOpLogicalOr, BinOpNullishCoalescing:
+		if IsPrimitiveLiteral(binary.Left.Data) {
+			return true
+		}
+	}
+	return false
+}
+
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func FoldBinaryOperator(loc logger.Loc, e *EBinary) Expr {
+	switch e.Op {
+	case BinOpAdd:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: left + right}}
+		}
+		if left, right, ok := extractStringValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EString{Value: joinStrings(left, right)}}
+		}
+
+	case BinOpSub:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: left - right}}
+		}
+
+	case BinOpMul:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: left * right}}
+		}
+
+	case BinOpDiv:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: left / right}}
+		}
+
+	case BinOpRem:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: math.Mod(left, right)}}
+		}
+
+	case BinOpPow:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: math.Pow(left, right)}}
+		}
+
+	case BinOpShl:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: float64(ToInt32(left) << (ToUint32(right) & 31))}}
+		}
+
+	case BinOpShr:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: float64(ToInt32(left) >> (ToUint32(right) & 31))}}
+		}
+
+	case BinOpUShr:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: float64(ToUint32(left) >> (ToUint32(right) & 31))}}
+		}
+
+	case BinOpBitwiseAnd:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: float64(ToInt32(left) & ToInt32(right))}}
+		}
+
+	case BinOpBitwiseOr:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: float64(ToInt32(left) | ToInt32(right))}}
+		}
+
+	case BinOpBitwiseXor:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &ENumber{Value: float64(ToInt32(left) ^ ToInt32(right))}}
+		}
+
+	case BinOpLt:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: left < right}}
+		}
+		if left, right, ok := extractStringValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: stringCompareUCS2(left, right) < 0}}
+		}
+
+	case BinOpGt:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: left > right}}
+		}
+		if left, right, ok := extractStringValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: stringCompareUCS2(left, right) > 0}}
+		}
+
+	case BinOpLe:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: left <= right}}
+		}
+		if left, right, ok := extractStringValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: stringCompareUCS2(left, right) <= 0}}
+		}
+
+	case BinOpGe:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: left >= right}}
+		}
+		if left, right, ok := extractStringValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: stringCompareUCS2(left, right) >= 0}}
+		}
+
+	case BinOpLooseEq, BinOpStrictEq:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: left == right}}
+		}
+		if left, right, ok := extractStringValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: stringCompareUCS2(left, right) == 0}}
+		}
+
+	case BinOpLooseNe, BinOpStrictNe:
+		if left, right, ok := extractNumericValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: left != right}}
+		}
+		if left, right, ok := extractStringValues(e.Left, e.Right); ok {
+			return Expr{Loc: loc, Data: &EBoolean{Value: stringCompareUCS2(left, right) != 0}}
+		}
+
+	case BinOpLogicalAnd:
+		if boolean, sideEffects, ok := ToBooleanWithSideEffects(e.Left.Data); ok {
+			if !boolean {
+				return e.Left
+			} else if sideEffects == NoSideEffects {
+				return e.Right
+			}
+		}
+
+	case BinOpLogicalOr:
+		if boolean, sideEffects, ok := ToBooleanWithSideEffects(e.Left.Data); ok {
+			if boolean {
+				return e.Left
+			} else if sideEffects == NoSideEffects {
+				return e.Right
+			}
+		}
+
+	case BinOpNullishCoalescing:
+		if isNullOrUndefined, sideEffects, ok := ToNullOrUndefinedWithSideEffects(e.Left.Data); ok {
+			if !isNullOrUndefined {
+				return e.Left
+			} else if sideEffects == NoSideEffects {
+				return e.Right
+			}
+		}
+	}
+
+	return Expr{}
+}
+
+func IsBinaryNullAndUndefined(left Expr, right Expr, op OpCode) (Expr, Expr, bool) {
+	if a, ok := left.Data.(*EBinary); ok && a.Op == op {
+		if b, ok := right.Data.(*EBinary); ok && b.Op == op {
+			idA, eqA := a.Left, a.Right
+			idB, eqB := b.Left, b.Right
+
+			// Detect when the identifier comes second and flip the order of our checks
+			if _, ok := eqA.Data.(*EIdentifier); ok {
+				idA, eqA = eqA, idA
+			}
+			if _, ok := eqB.Data.(*EIdentifier); ok {
+				idB, eqB = eqB, idB
+			}
+
+			if idA, ok := idA.Data.(*EIdentifier); ok {
+				if idB, ok := idB.Data.(*EIdentifier); ok && idA.Ref == idB.Ref {
+					// "a === null || a === void 0"
+					if _, ok := eqA.Data.(*ENull); ok {
+						if _, ok := eqB.Data.(*EUndefined); ok {
+							return a.Left, a.Right, true
+						}
+					}
+
+					// "a === void 0 || a === null"
+					if _, ok := eqA.Data.(*EUndefined); ok {
+						if _, ok := eqB.Data.(*ENull); ok {
+							return b.Left, b.Right, true
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return Expr{}, Expr{}, false
+}
+
+func CheckEqualityBigInt(a string, b string) (equal bool, ok bool) {
+	// Equal literals are always equal
+	if a == b {
+		return true, true
+	}
+
+	// Unequal literals are unequal if neither has a radix. Leading zeros are
+	// disallowed in bigint literals without a radix, so in this case we know
+	// each value is in canonical form.
+	if (len(a) < 2 || a[0] != '0') && (len(b) < 2 || b[0] != '0') {
+		return false, true
+	}
+
+	return false, false
+}
+
+type EqualityKind uint8
+
+const (
+	LooseEquality EqualityKind = iota
+	StrictEquality
+)
+
+// Returns "equal, ok". If "ok" is false, then nothing is known about the two
+// values. If "ok" is true, the equality or inequality of the two values is
+// stored in "equal".
+func CheckEqualityIfNoSideEffects(left E, right E, kind EqualityKind) (equal bool, ok bool) {
+	if r, ok := right.(*EInlinedEnum); ok {
+		return CheckEqualityIfNoSideEffects(left, r.Value.Data, kind)
+	}
+
+	switch l := left.(type) {
+	case *EInlinedEnum:
+		return CheckEqualityIfNoSideEffects(l.Value.Data, right, kind)
+
+	case *ENull:
+		switch right.(type) {
+		case *ENull:
+			// "null === null" is true
+			return true, true
+
+		case *EUndefined:
+			// "null == undefined" is true
+			// "null === undefined" is false
+			return kind == LooseEquality, true
+
+		default:
+			if IsPrimitiveLiteral(right) {
+				// "null == (not null or undefined)" is false
+				return false, true
+			}
+		}
+
+	case *EUndefined:
+		switch right.(type) {
+		case *EUndefined:
+			// "undefined === undefined" is true
+			return true, true
+
+		case *ENull:
+			// "undefined == null" is true
+			// "undefined === null" is false
+			return kind == LooseEquality, true
+
+		default:
+			if IsPrimitiveLiteral(right) {
+				// "undefined == (not null or undefined)" is false
+				return false, true
+			}
+		}
+
+	case *EBoolean:
+		switch r := right.(type) {
+		case *EBoolean:
+			// "false === false" is true
+			// "false === true" is false
+			return l.Value == r.Value, true
+
+		case *ENumber:
+			if kind == LooseEquality {
+				if l.Value {
+					// "true == 1" is true
+					return r.Value == 1, true
+				} else {
+					// "false == 0" is true
+					return r.Value == 0, true
+				}
+			} else {
+				// "true === 1" is false
+				// "false === 0" is false
+				return false, true
+			}
+
+		case *ENull, *EUndefined:
+			// "(not null or undefined) == undefined" is false
+			return false, true
+		}
+
+	case *ENumber:
+		switch r := right.(type) {
+		case *ENumber:
+			// "0 === 0" is true
+			// "0 === 1" is false
+			return l.Value == r.Value, true
+
+		case *EBoolean:
+			if kind == LooseEquality {
+				if r.Value {
+					// "1 == true" is true
+					return l.Value == 1, true
+				} else {
+					// "0 == false" is true
+					return l.Value == 0, true
+				}
+			} else {
+				// "1 === true" is false
+				// "0 === false" is false
+				return false, true
+			}
+
+		case *ENull, *EUndefined:
+			// "(not null or undefined) == undefined" is false
+			return false, true
+		}
+
+	case *EBigInt:
+		switch r := right.(type) {
+		case *EBigInt:
+			// "0n === 0n" is true
+			// "0n === 1n" is false
+			return CheckEqualityBigInt(l.Value, r.Value)
+
+		case *ENull, *EUndefined:
+			// "(not null or undefined) == undefined" is false
+			return false, true
+		}
+
+	case *EString:
+		switch r := right.(type) {
+		case *EString:
+			// "'a' === 'a'" is true
+			// "'a' === 'b'" is false
+			return helpers.UTF16EqualsUTF16(l.Value, r.Value), true
+
+		case *ENull, *EUndefined:
+			// "(not null or undefined) == undefined" is false
+			return false, true
+		}
+	}
+
+	return false, false
+}
+
+func ValuesLookTheSame(left E, right E) bool {
+	if b, ok := right.(*EInlinedEnum); ok {
+		return ValuesLookTheSame(left, b.Value.Data)
+	}
+
+	switch a := left.(type) {
+	case *EInlinedEnum:
+		return ValuesLookTheSame(a.Value.Data, right)
+
+	case *EIdentifier:
+		if b, ok := right.(*EIdentifier); ok && a.Ref == b.Ref {
+			return true
+		}
+
+	case *EDot:
+		if b, ok := right.(*EDot); ok && a.HasSameFlagsAs(b) &&
+			a.Name == b.Name && ValuesLookTheSame(a.Target.Data, b.Target.Data) {
+			return true
+		}
+
+	case *EIndex:
+		if b, ok := right.(*EIndex); ok && a.HasSameFlagsAs(b) &&
+			ValuesLookTheSame(a.Target.Data, b.Target.Data) && ValuesLookTheSame(a.Index.Data, b.Index.Data) {
+			return true
+		}
+
+	case *EIf:
+		if b, ok := right.(*EIf); ok && ValuesLookTheSame(a.Test.Data, b.Test.Data) &&
+			ValuesLookTheSame(a.Yes.Data, b.Yes.Data) && ValuesLookTheSame(a.No.Data, b.No.Data) {
+			return true
+		}
+
+	case *EUnary:
+		if b, ok := right.(*EUnary); ok && a.Op == b.Op && ValuesLookTheSame(a.Value.Data, b.Value.Data) {
+			return true
+		}
+
+	case *EBinary:
+		if b, ok := right.(*EBinary); ok && a.Op == b.Op && ValuesLookTheSame(a.Left.Data, b.Left.Data) &&
+			ValuesLookTheSame(a.Right.Data, b.Right.Data) {
+			return true
+		}
+
+	case *ECall:
+		if b, ok := right.(*ECall); ok && a.HasSameFlagsAs(b) &&
+			len(a.Args) == len(b.Args) && ValuesLookTheSame(a.Target.Data, b.Target.Data) {
+			for i := range a.Args {
+				if !ValuesLookTheSame(a.Args[i].Data, b.Args[i].Data) {
+					return false
+				}
+			}
+			return true
+		}
+
+	// Special-case to distinguish between negative an non-negative zero when mangling
+	// "a ? -0 : 0" => "a ? -0 : 0"
+	// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Equality_comparisons_and_sameness
+	case *ENumber:
+		b, ok := right.(*ENumber)
+		if ok && a.Value == 0 && b.Value == 0 && math.Signbit(a.Value) != math.Signbit(b.Value) {
+			return false
+		}
+	}
+
+	equal, ok := CheckEqualityIfNoSideEffects(left, right, StrictEquality)
+	return ok && equal
+}
+
+func TryToInsertOptionalChain(test Expr, expr Expr) bool {
+	switch e := expr.Data.(type) {
+	case *EDot:
+		if ValuesLookTheSame(test.Data, e.Target.Data) {
+			e.OptionalChain = OptionalChainStart
+			return true
+		}
+		if TryToInsertOptionalChain(test, e.Target) {
+			if e.OptionalChain == OptionalChainNone {
+				e.OptionalChain = OptionalChainContinue
+			}
+			return true
+		}
+
+	case *EIndex:
+		if ValuesLookTheSame(test.Data, e.Target.Data) {
+			e.OptionalChain = OptionalChainStart
+			return true
+		}
+		if TryToInsertOptionalChain(test, e.Target) {
+			if e.OptionalChain == OptionalChainNone {
+				e.OptionalChain = OptionalChainContinue
+			}
+			return true
+		}
+
+	case *ECall:
+		if ValuesLookTheSame(test.Data, e.Target.Data) {
+			e.OptionalChain = OptionalChainStart
+			return true
+		}
+		if TryToInsertOptionalChain(test, e.Target) {
+			if e.OptionalChain == OptionalChainNone {
+				e.OptionalChain = OptionalChainContinue
+			}
+			return true
+		}
+	}
+
+	return false
+}
+
+func joinStrings(a []uint16, b []uint16) []uint16 {
+	data := make([]uint16, len(a)+len(b))
+	copy(data[:len(a)], a)
+	copy(data[len(a):], b)
+	return data
+}
+
+// String concatenation with numbers is required by the TypeScript compiler for
+// "constant expression" handling in enums. However, we don't want to introduce
+// correctness bugs by accidentally stringifying a number differently than how
+// a real JavaScript VM would do it. So we are conservative and we only do this
+// when we know it'll be the same result.
+func TryToStringOnNumberSafely(n float64, radix int) (string, bool) {
+	if i := int32(n); float64(i) == n {
+		return strconv.FormatInt(int64(i), radix), true
+	}
+	if math.IsNaN(n) {
+		return "NaN", true
+	}
+	if math.IsInf(n, 1) {
+		return "Infinity", true
+	}
+	if math.IsInf(n, -1) {
+		return "-Infinity", true
+	}
+	return "", false
+}
+
+// Note: We don't know if this is string addition yet at this point
+func foldAdditionPreProcess(expr Expr) Expr {
+	switch e := expr.Data.(type) {
+	case *EInlinedEnum:
+		// "See through" inline enum constants
+		expr = e.Value
+
+	case *EArray:
+		// "[] + x" => "'' + x"
+		// "[1,2] + x" => "'1,2' + x"
+		items := make([]string, 0, len(e.Items))
+		for _, item := range e.Items {
+			switch item.Data.(type) {
+			case *EUndefined, *ENull:
+				items = append(items, "")
+				continue
+			}
+			if str, ok := ToStringWithoutSideEffects(item.Data); ok {
+				item.Data = &EString{Value: helpers.StringToUTF16(str)}
+			}
+			str, ok := item.Data.(*EString)
+			if !ok {
+				break
+			}
+			items = append(items, helpers.UTF16ToString(str.Value))
+		}
+		if len(items) == len(e.Items) {
+			expr.Data = &EString{Value: helpers.StringToUTF16(strings.Join(items, ","))}
+		}
+
+	case *EObject:
+		// "{} + x" => "'[object Object]' + x"
+		if len(e.Properties) == 0 {
+			expr.Data = &EString{Value: helpers.StringToUTF16("[object Object]")}
+		}
+	}
+	return expr
+}
+
+type StringAdditionKind uint8
+
+const (
+	StringAdditionNormal StringAdditionKind = iota
+	StringAdditionWithNestedLeft
+)
+
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func FoldStringAddition(left Expr, right Expr, kind StringAdditionKind) Expr {
+	left = foldAdditionPreProcess(left)
+	right = foldAdditionPreProcess(right)
+
+	// Transforming the left operand into a string is not safe if it comes from
+	// a nested AST node. The following transforms are invalid:
+	//
+	//   "0 + 1 + 'x'" => "0 + '1x'"
+	//   "0 + 1 + `${x}`" => "0 + `1${x}`"
+	//
+	if kind != StringAdditionWithNestedLeft {
+		switch right.Data.(type) {
+		case *EString, *ETemplate:
+			if str, ok := ToStringWithoutSideEffects(left.Data); ok {
+				left.Data = &EString{Value: helpers.StringToUTF16(str)}
+			}
+		}
+	}
+
+	switch l := left.Data.(type) {
+	case *EString:
+		// "'x' + 0" => "'x' + '0'"
+		if str, ok := ToStringWithoutSideEffects(right.Data); ok {
+			right.Data = &EString{Value: helpers.StringToUTF16(str)}
+		}
+
+		switch r := right.Data.(type) {
+		case *EString:
+			// "'x' + 'y'" => "'xy'"
+			return Expr{Loc: left.Loc, Data: &EString{
+				Value:          joinStrings(l.Value, r.Value),
+				PreferTemplate: l.PreferTemplate || r.PreferTemplate,
+			}}
+
+		case *ETemplate:
+			if r.TagOrNil.Data == nil {
+				// "'x' + `y${z}`" => "`xy${z}`"
+				return Expr{Loc: left.Loc, Data: &ETemplate{
+					HeadLoc:    left.Loc,
+					HeadCooked: joinStrings(l.Value, r.HeadCooked),
+					Parts:      r.Parts,
+				}}
+			}
+		}
+
+		// "'' + typeof x" => "typeof x"
+		if len(l.Value) == 0 && KnownPrimitiveType(right.Data) == PrimitiveString {
+			return right
+		}
+
+	case *ETemplate:
+		if l.TagOrNil.Data == nil {
+			// "`${x}` + 0" => "`${x}` + '0'"
+			if str, ok := ToStringWithoutSideEffects(right.Data); ok {
+				right.Data = &EString{Value: helpers.StringToUTF16(str)}
+			}
+
+			switch r := right.Data.(type) {
+			case *EString:
+				// "`${x}y` + 'z'" => "`${x}yz`"
+				n := len(l.Parts)
+				head := l.HeadCooked
+				parts := make([]TemplatePart, n)
+				if n == 0 {
+					head = joinStrings(head, r.Value)
+				} else {
+					copy(parts, l.Parts)
+					parts[n-1].TailCooked = joinStrings(parts[n-1].TailCooked, r.Value)
+				}
+				return Expr{Loc: left.Loc, Data: &ETemplate{
+					HeadLoc:    l.HeadLoc,
+					HeadCooked: head,
+					Parts:      parts,
+				}}
+
+			case *ETemplate:
+				if r.TagOrNil.Data == nil {
+					// "`${a}b` + `x${y}`" => "`${a}bx${y}`"
+					n := len(l.Parts)
+					head := l.HeadCooked
+					parts := make([]TemplatePart, n+len(r.Parts))
+					copy(parts[n:], r.Parts)
+					if n == 0 {
+						head = joinStrings(head, r.HeadCooked)
+					} else {
+						copy(parts[:n], l.Parts)
+						parts[n-1].TailCooked = joinStrings(parts[n-1].TailCooked, r.HeadCooked)
+					}
+					return Expr{Loc: left.Loc, Data: &ETemplate{
+						HeadLoc:    l.HeadLoc,
+						HeadCooked: head,
+						Parts:      parts,
+					}}
+				}
+			}
+		}
+	}
+
+	// "typeof x + ''" => "typeof x"
+	if r, ok := right.Data.(*EString); ok && len(r.Value) == 0 && KnownPrimitiveType(left.Data) == PrimitiveString {
+		return left
+	}
+
+	return Expr{}
+}
+
+// "`a${'b'}c`" => "`abc`"
+//
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func InlinePrimitivesIntoTemplate(loc logger.Loc, e *ETemplate) Expr {
+	// Can't inline strings if there's a custom template tag
+	if e.TagOrNil.Data != nil {
+		return Expr{Loc: loc, Data: e}
+	}
+
+	headCooked := e.HeadCooked
+	parts := make([]TemplatePart, 0, len(e.Parts))
+
+	for _, part := range e.Parts {
+		if value, ok := part.Value.Data.(*EInlinedEnum); ok {
+			part.Value = value.Value
+		}
+		if str, ok := ToStringWithoutSideEffects(part.Value.Data); ok {
+			part.Value.Data = &EString{Value: helpers.StringToUTF16(str)}
+		}
+		if str, ok := part.Value.Data.(*EString); ok {
+			if len(parts) == 0 {
+				headCooked = append(append(headCooked, str.Value...), part.TailCooked...)
+			} else {
+				prevPart := &parts[len(parts)-1]
+				prevPart.TailCooked = append(append(prevPart.TailCooked, str.Value...), part.TailCooked...)
+			}
+		} else {
+			parts = append(parts, part)
+		}
+	}
+
+	// Become a plain string if there are no substitutions
+	if len(parts) == 0 {
+		return Expr{Loc: loc, Data: &EString{
+			Value:          headCooked,
+			PreferTemplate: true,
+		}}
+	}
+
+	return Expr{Loc: loc, Data: &ETemplate{
+		HeadLoc:    e.HeadLoc,
+		HeadCooked: headCooked,
+		Parts:      parts,
+	}}
+}
+
+type SideEffects uint8
+
+const (
+	CouldHaveSideEffects SideEffects = iota
+	NoSideEffects
+)
+
+func ToNullOrUndefinedWithSideEffects(data E) (isNullOrUndefined bool, sideEffects SideEffects, ok bool) {
+	switch e := data.(type) {
+	case *EAnnotation:
+		isNullOrUndefined, sideEffects, ok = ToNullOrUndefinedWithSideEffects(e.Value.Data)
+		if e.Flags.Has(CanBeRemovedIfUnusedFlag) {
+			sideEffects = NoSideEffects
+		}
+		return
+
+	case *EInlinedEnum:
+		return ToNullOrUndefinedWithSideEffects(e.Value.Data)
+
+		// Never null or undefined
+	case *EBoolean, *ENumber, *EString, *ERegExp,
+		*EFunction, *EArrow, *EBigInt:
+		return false, NoSideEffects, true
+
+	// Never null or undefined
+	case *EObject, *EArray, *EClass:
+		return false, CouldHaveSideEffects, true
+
+	// Always null or undefined
+	case *ENull, *EUndefined:
+		return true, NoSideEffects, true
+
+	case *EUnary:
+		switch e.Op {
+		case
+			// Always number or bigint
+			UnOpPos, UnOpNeg, UnOpCpl,
+			UnOpPreDec, UnOpPreInc, UnOpPostDec, UnOpPostInc,
+			// Always boolean
+			UnOpNot, UnOpDelete:
+			return false, CouldHaveSideEffects, true
+
+		// Always boolean
+		case UnOpTypeof:
+			if e.WasOriginallyTypeofIdentifier {
+				// Expressions such as "typeof x" never have any side effects
+				return false, NoSideEffects, true
+			}
+			return false, CouldHaveSideEffects, true
+
+		// Always undefined
+		case UnOpVoid:
+			return true, CouldHaveSideEffects, true
+		}
+
+	case *EBinary:
+		switch e.Op {
+		case
+			// Always string or number or bigint
+			BinOpAdd, BinOpAddAssign,
+			// Always number or bigint
+			BinOpSub, BinOpMul, BinOpDiv, BinOpRem, BinOpPow,
+			BinOpSubAssign, BinOpMulAssign, BinOpDivAssign, BinOpRemAssign, BinOpPowAssign,
+			BinOpShl, BinOpShr, BinOpUShr,
+			BinOpShlAssign, BinOpShrAssign, BinOpUShrAssign,
+			BinOpBitwiseOr, BinOpBitwiseAnd, BinOpBitwiseXor,
+			BinOpBitwiseOrAssign, BinOpBitwiseAndAssign, BinOpBitwiseXorAssign,
+			// Always boolean
+			BinOpLt, BinOpLe, BinOpGt, BinOpGe, BinOpIn, BinOpInstanceof,
+			BinOpLooseEq, BinOpLooseNe, BinOpStrictEq, BinOpStrictNe:
+			return false, CouldHaveSideEffects, true
+
+		case BinOpComma:
+			if isNullOrUndefined, _, ok := ToNullOrUndefinedWithSideEffects(e.Right.Data); ok {
+				return isNullOrUndefined, CouldHaveSideEffects, true
+			}
+		}
+	}
+
+	return false, NoSideEffects, false
+}
+
+func ToBooleanWithSideEffects(data E) (boolean bool, sideEffects SideEffects, ok bool) {
+	switch e := data.(type) {
+	case *EAnnotation:
+		boolean, sideEffects, ok = ToBooleanWithSideEffects(e.Value.Data)
+		if e.Flags.Has(CanBeRemovedIfUnusedFlag) {
+			sideEffects = NoSideEffects
+		}
+		return
+
+	case *EInlinedEnum:
+		return ToBooleanWithSideEffects(e.Value.Data)
+
+	case *ENull, *EUndefined:
+		return false, NoSideEffects, true
+
+	case *EBoolean:
+		return e.Value, NoSideEffects, true
+
+	case *ENumber:
+		return e.Value != 0 && !math.IsNaN(e.Value), NoSideEffects, true
+
+	case *EBigInt:
+		equal, ok := CheckEqualityBigInt(e.Value, "0")
+		return !equal, NoSideEffects, ok
+
+	case *EString:
+		return len(e.Value) > 0, NoSideEffects, true
+
+	case *EFunction, *EArrow, *ERegExp:
+		return true, NoSideEffects, true
+
+	case *EObject, *EArray, *EClass:
+		return true, CouldHaveSideEffects, true
+
+	case *EUnary:
+		switch e.Op {
+		case UnOpVoid:
+			return false, CouldHaveSideEffects, true
+
+		case UnOpTypeof:
+			// Never an empty string
+			if e.WasOriginallyTypeofIdentifier {
+				// Expressions such as "typeof x" never have any side effects
+				return true, NoSideEffects, true
+			}
+			return true, CouldHaveSideEffects, true
+
+		case UnOpNot:
+			if boolean, SideEffects, ok := ToBooleanWithSideEffects(e.Value.Data); ok {
+				return !boolean, SideEffects, true
+			}
+		}
+
+	case *EBinary:
+		switch e.Op {
+		case BinOpLogicalOr:
+			// "anything || truthy" is truthy
+			if boolean, _, ok := ToBooleanWithSideEffects(e.Right.Data); ok && boolean {
+				return true, CouldHaveSideEffects, true
+			}
+
+		case BinOpLogicalAnd:
+			// "anything && falsy" is falsy
+			if boolean, _, ok := ToBooleanWithSideEffects(e.Right.Data); ok && !boolean {
+				return false, CouldHaveSideEffects, true
+			}
+
+		case BinOpComma:
+			// "anything, truthy/falsy" is truthy/falsy
+			if boolean, _, ok := ToBooleanWithSideEffects(e.Right.Data); ok {
+				return boolean, CouldHaveSideEffects, true
+			}
+		}
+	}
+
+	return false, CouldHaveSideEffects, false
+}
+
+// Simplify syntax when we know it's used inside a boolean context
+//
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func (ctx HelperContext) SimplifyBooleanExpr(expr Expr) Expr {
+	switch e := expr.Data.(type) {
+	case *EUnary:
+		if e.Op == UnOpNot {
+			// "!!a" => "a"
+			if e2, ok2 := e.Value.Data.(*EUnary); ok2 && e2.Op == UnOpNot {
+				return ctx.SimplifyBooleanExpr(e2.Value)
+			}
+
+			// "!!!a" => "!a"
+			return Expr{Loc: expr.Loc, Data: &EUnary{Op: UnOpNot, Value: ctx.SimplifyBooleanExpr(e.Value)}}
+		}
+
+	case *EBinary:
+		left := e.Left
+		right := e.Right
+
+		switch e.Op {
+		case BinOpStrictEq, BinOpStrictNe, BinOpLooseEq, BinOpLooseNe:
+			if r, ok := extractNumericValue(right.Data); ok && r == 0 && isInt32OrUint32(left.Data) {
+				// If the left is guaranteed to be an integer (e.g. not NaN,
+				// Infinity, or a non-numeric value) then a test against zero
+				// in a boolean context is unnecessary because the value is
+				// only truthy if it's not zero.
+				if e.Op == BinOpStrictNe || e.Op == BinOpLooseNe {
+					// "if ((a | b) !== 0)" => "if (a | b)"
+					return left
+				} else {
+					// "if ((a | b) === 0)" => "if (!(a | b))"
+					return Not(left)
+				}
+			}
+
+		case BinOpLogicalAnd:
+			// "if (!!a && !!b)" => "if (a && b)"
+			left = ctx.SimplifyBooleanExpr(left)
+			right = ctx.SimplifyBooleanExpr(right)
+
+			if boolean, SideEffects, ok := ToBooleanWithSideEffects(right.Data); ok && boolean && SideEffects == NoSideEffects {
+				// "if (anything && truthyNoSideEffects)" => "if (anything)"
+				return left
+			}
+
+		case BinOpLogicalOr:
+			// "if (!!a || !!b)" => "if (a || b)"
+			left = ctx.SimplifyBooleanExpr(left)
+			right = ctx.SimplifyBooleanExpr(right)
+
+			if boolean, SideEffects, ok := ToBooleanWithSideEffects(right.Data); ok && !boolean && SideEffects == NoSideEffects {
+				// "if (anything || falsyNoSideEffects)" => "if (anything)"
+				return left
+			}
+		}
+
+		if left != e.Left || right != e.Right {
+			return Expr{Loc: expr.Loc, Data: &EBinary{Op: e.Op, Left: left, Right: right}}
+		}
+
+	case *EIf:
+		// "if (a ? !!b : !!c)" => "if (a ? b : c)"
+		yes := ctx.SimplifyBooleanExpr(e.Yes)
+		no := ctx.SimplifyBooleanExpr(e.No)
+
+		if boolean, SideEffects, ok := ToBooleanWithSideEffects(yes.Data); ok && SideEffects == NoSideEffects {
+			if boolean {
+				// "if (anything1 ? truthyNoSideEffects : anything2)" => "if (anything1 || anything2)"
+				return JoinWithLeftAssociativeOp(BinOpLogicalOr, e.Test, no)
+			} else {
+				// "if (anything1 ? falsyNoSideEffects : anything2)" => "if (!anything1 || anything2)"
+				return JoinWithLeftAssociativeOp(BinOpLogicalAnd, Not(e.Test), no)
+			}
+		}
+
+		if boolean, SideEffects, ok := ToBooleanWithSideEffects(no.Data); ok && SideEffects == NoSideEffects {
+			if boolean {
+				// "if (anything1 ? anything2 : truthyNoSideEffects)" => "if (!anything1 || anything2)"
+				return JoinWithLeftAssociativeOp(BinOpLogicalOr, Not(e.Test), yes)
+			} else {
+				// "if (anything1 ? anything2 : falsyNoSideEffects)" => "if (anything1 && anything2)"
+				return JoinWithLeftAssociativeOp(BinOpLogicalAnd, e.Test, yes)
+			}
+		}
+
+		if yes != e.Yes || no != e.No {
+			return Expr{Loc: expr.Loc, Data: &EIf{Test: e.Test, Yes: yes, No: no}}
+		}
+
+	default:
+		// "!![]" => "true"
+		if boolean, sideEffects, ok := ToBooleanWithSideEffects(expr.Data); ok && (sideEffects == NoSideEffects || ctx.ExprCanBeRemovedIfUnused(expr)) {
+			return Expr{Loc: expr.Loc, Data: &EBoolean{Value: boolean}}
+		}
+	}
+
+	return expr
+}
+
+type StmtsCanBeRemovedIfUnusedFlags uint8
+
+const (
+	KeepExportClauses StmtsCanBeRemovedIfUnusedFlags = 1 << iota
+	ReturnCanBeRemovedIfUnused
+)
+
+func (ctx HelperContext) StmtsCanBeRemovedIfUnused(stmts []Stmt, flags StmtsCanBeRemovedIfUnusedFlags) bool {
+	for _, stmt := range stmts {
+		switch s := stmt.Data.(type) {
+		case *SFunction, *SEmpty:
+			// These never have side effects
+
+		case *SImport:
+			// Let these be removed if they are unused. Note that we also need to
+			// check if the imported file is marked as "sideEffects: false" before we
+			// can remove a SImport statement. Otherwise the import must be kept for
+			// its side effects.
+
+		case *SClass:
+			if !ctx.ClassCanBeRemovedIfUnused(s.Class) {
+				return false
+			}
+
+		case *SReturn:
+			if (flags&ReturnCanBeRemovedIfUnused) == 0 || (s.ValueOrNil.Data != nil && !ctx.ExprCanBeRemovedIfUnused(s.ValueOrNil)) {
+				return false
+			}
+
+		case *SExpr:
+			if !ctx.ExprCanBeRemovedIfUnused(s.Value) {
+				if s.IsFromClassOrFnThatCanBeRemovedIfUnused {
+					// This statement was automatically generated when lowering a class
+					// or function that we were able to analyze as having no side effects
+					// before lowering. So we consider it to be removable. The assumption
+					// here is that we are seeing at least all of the statements from the
+					// class lowering operation all at once (although we may possibly be
+					// seeing even more statements than that). Since we're making a binary
+					// all-or-nothing decision about the side effects of these statements,
+					// we can safely consider these to be side-effect free because we
+					// aren't in danger of partially dropping some of the class setup code.
+				} else {
+					return false
+				}
+			}
+
+		case *SLocal:
+			// "await" is a side effect because it affects code timing
+			if s.Kind == LocalAwaitUsing {
+				return false
+			}
+
+			for _, decl := range s.Decls {
+				// Check that the bindings are side-effect free
+				switch binding := decl.Binding.Data.(type) {
+				case *BIdentifier:
+					// An identifier binding has no side effects
+
+				case *BArray:
+					// Destructuring the initializer has no side effects if the
+					// initializer is an array, since we assume the iterator is then
+					// the built-in side-effect free array iterator.
+					if _, ok := decl.ValueOrNil.Data.(*EArray); ok {
+						for _, item := range binding.Items {
+							if item.DefaultValueOrNil.Data != nil && !ctx.ExprCanBeRemovedIfUnused(item.DefaultValueOrNil) {
+								return false
+							}
+
+							switch item.Binding.Data.(type) {
+							case *BIdentifier, *BMissing:
+								// Right now we only handle an array pattern with identifier
+								// bindings or with empty holes (i.e. "missing" elements)
+							default:
+								return false
+							}
+						}
+						break
+					}
+					return false
+
+				default:
+					// Consider anything else to potentially have side effects
+					return false
+				}
+
+				// Check that the initializer is side-effect free
+				if decl.ValueOrNil.Data != nil {
+					if !ctx.ExprCanBeRemovedIfUnused(decl.ValueOrNil) {
+						return false
+					}
+
+					// "using" declarations are only side-effect free if they are initialized to null or undefined
+					if s.Kind.IsUsing() {
+						if t := KnownPrimitiveType(decl.ValueOrNil.Data); t != PrimitiveNull && t != PrimitiveUndefined {
+							return false
+						}
+					}
+				}
+			}
+
+		case *STry:
+			if !ctx.StmtsCanBeRemovedIfUnused(s.Block.Stmts, 0) || (s.Finally != nil && !ctx.StmtsCanBeRemovedIfUnused(s.Finally.Block.Stmts, 0)) {
+				return false
+			}
+
+		case *SExportFrom:
+			// Exports are tracked separately, so this isn't necessary
+
+		case *SExportClause:
+			if (flags & KeepExportClauses) != 0 {
+				return false
+			}
+
+		case *SExportDefault:
+			switch s2 := s.Value.Data.(type) {
+			case *SExpr:
+				if !ctx.ExprCanBeRemovedIfUnused(s2.Value) {
+					return false
+				}
+
+			case *SFunction:
+				// These never have side effects
+
+			case *SClass:
+				if !ctx.ClassCanBeRemovedIfUnused(s2.Class) {
+					return false
+				}
+
+			default:
+				panic("Internal error")
+			}
+
+		default:
+			// Assume that all statements not explicitly special-cased here have side
+			// effects, and cannot be removed even if unused
+			return false
+		}
+	}
+
+	return true
+}
+
+func (ctx HelperContext) ClassCanBeRemovedIfUnused(class Class) bool {
+	if len(class.Decorators) > 0 {
+		return false
+	}
+
+	// Note: This check is incorrect. Extending a non-constructible object can
+	// throw an error, which is a side effect:
+	//
+	//   async function x() {}
+	//   class y extends x {}
+	//
+	// But refusing to tree-shake every class with a base class is not a useful
+	// thing for a bundler to do. So we pretend that this edge case doesn't
+	// exist. At the time of writing, both Rollup and Terser don't consider this
+	// to be a side effect either.
+	if class.ExtendsOrNil.Data != nil && !ctx.ExprCanBeRemovedIfUnused(class.ExtendsOrNil) {
+		return false
+	}
+
+	for _, property := range class.Properties {
+		if property.Kind == PropertyClassStaticBlock {
+			if !ctx.StmtsCanBeRemovedIfUnused(property.ClassStaticBlock.Block.Stmts, 0) {
+				return false
+			}
+			continue
+		}
+
+		if len(property.Decorators) > 0 {
+			return false
+		}
+
+		if property.Flags.Has(PropertyIsComputed) && !IsPrimitiveLiteral(property.Key.Data) && !IsSymbolInstance(property.Key.Data) {
+			return false
+		}
+
+		if property.Kind.IsMethodDefinition() {
+			if fn, ok := property.ValueOrNil.Data.(*EFunction); ok {
+				for _, arg := range fn.Fn.Args {
+					if len(arg.Decorators) > 0 {
+						return false
+					}
+				}
+			}
+		}
+
+		if property.Flags.Has(PropertyIsStatic) {
+			if property.ValueOrNil.Data != nil && !ctx.ExprCanBeRemovedIfUnused(property.ValueOrNil) {
+				return false
+			}
+
+			if property.InitializerOrNil.Data != nil && !ctx.ExprCanBeRemovedIfUnused(property.InitializerOrNil) {
+				return false
+			}
+
+			// Legacy TypeScript static class fields are considered to have side
+			// effects because they use assign semantics, not define semantics, and
+			// that can trigger getters. For example:
+			//
+			//   class Foo {
+			//     static set foo(x) { importantSideEffect(x) }
+			//   }
+			//   class Bar extends Foo {
+			//     foo = 1
+			//   }
+			//
+			// This happens in TypeScript when "useDefineForClassFields" is disabled
+			// because TypeScript (and esbuild) transforms the above class into this:
+			//
+			//   class Foo {
+			//     static set foo(x) { importantSideEffect(x); }
+			//   }
+			//   class Bar extends Foo {
+			//   }
+			//   Bar.foo = 1;
+			//
+			// Note that it's not possible to analyze the base class to determine that
+			// these assignments are side-effect free. For example:
+			//
+			//   // Some code that already ran before your code
+			//   Object.defineProperty(Object.prototype, 'foo', {
+			//     set(x) { imporantSideEffect(x) }
+			//   })
+			//
+			//   // Your code
+			//   class Foo {
+			//     static foo = 1
+			//   }
+			//
+			if property.Kind == PropertyField && !class.UseDefineForClassFields {
+				return false
+			}
+		}
+	}
+
+	return true
+}
+
+func (ctx HelperContext) ExprCanBeRemovedIfUnused(expr Expr) bool {
+	switch e := expr.Data.(type) {
+	case *EAnnotation:
+		return e.Flags.Has(CanBeRemovedIfUnusedFlag)
+
+	case *EInlinedEnum:
+		return ctx.ExprCanBeRemovedIfUnused(e.Value)
+
+	case *ENull, *EUndefined, *EMissing, *EBoolean, *ENumber, *EBigInt,
+		*EString, *EThis, *ERegExp, *EFunction, *EArrow, *EImportMeta:
+		return true
+
+	case *EDot:
+		return e.CanBeRemovedIfUnused
+
+	case *EClass:
+		return ctx.ClassCanBeRemovedIfUnused(e.Class)
+
+	case *EIdentifier:
+		if e.MustKeepDueToWithStmt {
+			return false
+		}
+
+		// Unbound identifiers cannot be removed because they can have side effects.
+		// One possible side effect is throwing a ReferenceError if they don't exist.
+		// Another one is a getter with side effects on the global object:
+		//
+		//   Object.defineProperty(globalThis, 'x', {
+		//     get() {
+		//       sideEffect();
+		//     },
+		//   });
+		//
+		// Be very careful about this possibility. It's tempting to treat all
+		// identifier expressions as not having side effects but that's wrong. We
+		// must make sure they have been declared by the code we are currently
+		// compiling before we can tell that they have no side effects.
+		//
+		// Note that we currently ignore ReferenceErrors due to TDZ access. This is
+		// incorrect but proper TDZ analysis is very complicated and would have to
+		// be very conservative, which would inhibit a lot of optimizations of code
+		// inside closures. This may need to be revisited if it proves problematic.
+		if e.CanBeRemovedIfUnused || !ctx.isUnbound(e.Ref) {
+			return true
+		}
+
+	case *EImportIdentifier:
+		// References to an ES6 import item are always side-effect free in an
+		// ECMAScript environment.
+		//
+		// They could technically have side effects if the imported module is a
+		// CommonJS module and the import item was translated to a property access
+		// (which esbuild's bundler does) and the property has a getter with side
+		// effects.
+		//
+		// But this is very unlikely and respecting this edge case would mean
+		// disabling tree shaking of all code that references an export from a
+		// CommonJS module. It would also likely violate the expectations of some
+		// developers because the code *looks* like it should be able to be tree
+		// shaken.
+		//
+		// So we deliberately ignore this edge case and always treat import item
+		// references as being side-effect free.
+		return true
+
+	case *EIf:
+		return ctx.ExprCanBeRemovedIfUnused(e.Test) &&
+			((ctx.isSideEffectFreeUnboundIdentifierRef(e.Yes, e.Test, true) || ctx.ExprCanBeRemovedIfUnused(e.Yes)) &&
+				(ctx.isSideEffectFreeUnboundIdentifierRef(e.No, e.Test, false) || ctx.ExprCanBeRemovedIfUnused(e.No)))
+
+	case *EArray:
+		for _, item := range e.Items {
+			if spread, ok := item.Data.(*ESpread); ok {
+				if _, ok := spread.Value.Data.(*EArray); ok {
+					// Spread of an inline array such as "[...[x]]" is side-effect free
+					item = spread.Value
+				}
+			}
+
+			if !ctx.ExprCanBeRemovedIfUnused(item) {
+				return false
+			}
+		}
+		return true
+
+	case *EObject:
+		for _, property := range e.Properties {
+			// The key must still be evaluated if it's computed or a spread
+			if property.Kind == PropertySpread {
+				return false
+			}
+			if property.Flags.Has(PropertyIsComputed) && !IsPrimitiveLiteral(property.Key.Data) && !IsSymbolInstance(property.Key.Data) {
+				return false
+			}
+			if property.ValueOrNil.Data != nil && !ctx.ExprCanBeRemovedIfUnused(property.ValueOrNil) {
+				return false
+			}
+		}
+		return true
+
+	case *ECall:
+		canCallBeRemoved := e.CanBeUnwrappedIfUnused
+
+		// A call that has been marked "__PURE__" can be removed if all arguments
+		// can be removed. The annotation causes us to ignore the target.
+		if canCallBeRemoved {
+			for _, arg := range e.Args {
+				if !ctx.ExprCanBeRemovedIfUnused(arg) {
+					return false
+				}
+			}
+			return true
+		}
+
+	case *ENew:
+		// A constructor call that has been marked "__PURE__" can be removed if all
+		// arguments can be removed. The annotation causes us to ignore the target.
+		if e.CanBeUnwrappedIfUnused {
+			for _, arg := range e.Args {
+				if !ctx.ExprCanBeRemovedIfUnused(arg) {
+					return false
+				}
+			}
+			return true
+		}
+
+	case *EUnary:
+		switch e.Op {
+		// These operators must not have any type conversions that can execute code
+		// such as "toString" or "valueOf". They must also never throw any exceptions.
+		case UnOpVoid, UnOpNot:
+			return ctx.ExprCanBeRemovedIfUnused(e.Value)
+
+		// The "typeof" operator doesn't do any type conversions so it can be removed
+		// if the result is unused and the operand has no side effects. However, it
+		// has a special case where if the operand is an identifier expression such
+		// as "typeof x" and "x" doesn't exist, no reference error is thrown so the
+		// operation has no side effects.
+		case UnOpTypeof:
+			if _, ok := e.Value.Data.(*EIdentifier); ok && e.WasOriginallyTypeofIdentifier {
+				// Expressions such as "typeof x" never have any side effects
+				return true
+			}
+			return ctx.ExprCanBeRemovedIfUnused(e.Value)
+		}
+
+	case *EBinary:
+		switch e.Op {
+		// These operators must not have any type conversions that can execute code
+		// such as "toString" or "valueOf". They must also never throw any exceptions.
+		case BinOpStrictEq, BinOpStrictNe, BinOpComma, BinOpNullishCoalescing:
+			return ctx.ExprCanBeRemovedIfUnused(e.Left) && ctx.ExprCanBeRemovedIfUnused(e.Right)
+
+		// Special-case "||" to make sure "typeof x === 'undefined' || x" can be removed
+		case BinOpLogicalOr:
+			return ctx.ExprCanBeRemovedIfUnused(e.Left) &&
+				(ctx.isSideEffectFreeUnboundIdentifierRef(e.Right, e.Left, false) || ctx.ExprCanBeRemovedIfUnused(e.Right))
+
+		// Special-case "&&" to make sure "typeof x !== 'undefined' && x" can be removed
+		case BinOpLogicalAnd:
+			return ctx.ExprCanBeRemovedIfUnused(e.Left) &&
+				(ctx.isSideEffectFreeUnboundIdentifierRef(e.Right, e.Left, true) || ctx.ExprCanBeRemovedIfUnused(e.Right))
+
+		// For "==" and "!=", pretend the operator was actually "===" or "!==". If
+		// we know that we can convert it to "==" or "!=", then we can consider the
+		// operator itself to have no side effects. This matters because our mangle
+		// logic will convert "typeof x === 'object'" into "typeof x == 'object'"
+		// and since "typeof x === 'object'" is considered to be side-effect free,
+		// we must also consider "typeof x == 'object'" to be side-effect free.
+		case BinOpLooseEq, BinOpLooseNe:
+			return CanChangeStrictToLoose(e.Left, e.Right) && ctx.ExprCanBeRemovedIfUnused(e.Left) && ctx.ExprCanBeRemovedIfUnused(e.Right)
+
+		// Special-case "<" and ">" with string, number, or bigint arguments
+		case BinOpLt, BinOpGt, BinOpLe, BinOpGe:
+			left := KnownPrimitiveType(e.Left.Data)
+			switch left {
+			case PrimitiveString, PrimitiveNumber, PrimitiveBigInt:
+				return KnownPrimitiveType(e.Right.Data) == left && ctx.ExprCanBeRemovedIfUnused(e.Left) && ctx.ExprCanBeRemovedIfUnused(e.Right)
+			}
+		}
+
+	case *ETemplate:
+		// A template can be removed if it has no tag and every value has no side
+		// effects and results in some kind of primitive, since all primitives
+		// have a "ToString" operation with no side effects.
+		if e.TagOrNil.Data == nil || e.CanBeUnwrappedIfUnused {
+			for _, part := range e.Parts {
+				if !ctx.ExprCanBeRemovedIfUnused(part.Value) || KnownPrimitiveType(part.Value.Data) == PrimitiveUnknown {
+					return false
+				}
+			}
+			return true
+		}
+	}
+
+	// Assume all other expression types have side effects and cannot be removed
+	return false
+}
+
+func (ctx HelperContext) isSideEffectFreeUnboundIdentifierRef(value Expr, guardCondition Expr, isYesBranch bool) bool {
+	if id, ok := value.Data.(*EIdentifier); ok && ctx.isUnbound(id.Ref) {
+		if binary, ok := guardCondition.Data.(*EBinary); ok {
+			switch binary.Op {
+			case BinOpStrictEq, BinOpStrictNe, BinOpLooseEq, BinOpLooseNe:
+				// Pattern match for "typeof x !== <string>"
+				typeof, string := binary.Left, binary.Right
+				if _, ok := typeof.Data.(*EString); ok {
+					typeof, string = string, typeof
+				}
+				if typeof, ok := typeof.Data.(*EUnary); ok && typeof.Op == UnOpTypeof && typeof.WasOriginallyTypeofIdentifier {
+					if text, ok := string.Data.(*EString); ok {
+						// In "typeof x !== 'undefined' ? x : null", the reference to "x" is side-effect free
+						// In "typeof x === 'object' ? x : null", the reference to "x" is side-effect free
+						if (helpers.UTF16EqualsString(text.Value, "undefined") == isYesBranch) ==
+							(binary.Op == BinOpStrictNe || binary.Op == BinOpLooseNe) {
+							if id2, ok := typeof.Value.Data.(*EIdentifier); ok && id2.Ref == id.Ref {
+								return true
+							}
+						}
+					}
+				}
+
+			case BinOpLt, BinOpGt, BinOpLe, BinOpGe:
+				// Pattern match for "typeof x < <string>"
+				typeof, string := binary.Left, binary.Right
+				if _, ok := typeof.Data.(*EString); ok {
+					typeof, string = string, typeof
+					isYesBranch = !isYesBranch
+				}
+				if typeof, ok := typeof.Data.(*EUnary); ok && typeof.Op == UnOpTypeof && typeof.WasOriginallyTypeofIdentifier {
+					if text, ok := string.Data.(*EString); ok && helpers.UTF16EqualsString(text.Value, "u") {
+						// In "typeof x < 'u' ? x : null", the reference to "x" is side-effect free
+						// In "typeof x > 'u' ? x : null", the reference to "x" is side-effect free
+						if isYesBranch == (binary.Op == BinOpLt || binary.Op == BinOpLe) {
+							if id2, ok := typeof.Value.Data.(*EIdentifier); ok && id2.Ref == id.Ref {
+								return true
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+	return false
+}
+
+func StringToEquivalentNumberValue(value []uint16) (float64, bool) {
+	if len(value) > 0 {
+		var intValue int32
+		isNegative := false
+		start := 0
+
+		if value[0] == '-' && len(value) > 1 {
+			isNegative = true
+			start++
+		}
+
+		for _, c := range value[start:] {
+			if c < '0' || c > '9' {
+				return 0, false
+			}
+			intValue = intValue*10 + int32(c) - '0'
+		}
+
+		if isNegative {
+			intValue = -intValue
+		}
+
+		if helpers.UTF16EqualsString(value, strconv.FormatInt(int64(intValue), 10)) {
+			return float64(intValue), true
+		}
+	}
+
+	return 0, false
+}
+
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func InlineSpreadsOfArrayLiterals(values []Expr) (results []Expr) {
+	for _, value := range values {
+		if spread, ok := value.Data.(*ESpread); ok {
+			if array, ok := spread.Value.Data.(*EArray); ok {
+				for _, item := range array.Items {
+					if _, ok := item.Data.(*EMissing); ok {
+						results = append(results, Expr{Loc: item.Loc, Data: EUndefinedShared})
+					} else {
+						results = append(results, item)
+					}
+				}
+				continue
+			}
+		}
+		results = append(results, value)
+	}
+	return
+}
+
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func MangleObjectSpread(properties []Property) []Property {
+	var result []Property
+	for _, property := range properties {
+		if property.Kind == PropertySpread {
+			switch v := property.ValueOrNil.Data.(type) {
+			case *EBoolean, *ENull, *EUndefined, *ENumber,
+				*EBigInt, *ERegExp, *EFunction, *EArrow:
+				// This value is ignored because it doesn't have any of its own properties
+				continue
+
+			case *EObject:
+				for i, p := range v.Properties {
+					// Getters are evaluated at iteration time. The property
+					// descriptor is not inlined into the caller. Since we are not
+					// evaluating code at compile time, just bail if we hit one
+					// and preserve the spread with the remaining properties.
+					if p.Kind == PropertyGetter || p.Kind == PropertySetter {
+						// Don't mutate the original AST
+						clone := *v
+						clone.Properties = v.Properties[i:]
+						property.ValueOrNil.Data = &clone
+						result = append(result, property)
+						break
+					}
+
+					// Also bail if we hit a verbatim "__proto__" key. This will
+					// actually set the prototype of the object being spread so
+					// inlining it is not correct.
+					if p.Kind == PropertyField && !p.Flags.Has(PropertyIsComputed) {
+						if str, ok := p.Key.Data.(*EString); ok && helpers.UTF16EqualsString(str.Value, "__proto__") {
+							// Don't mutate the original AST
+							clone := *v
+							clone.Properties = v.Properties[i:]
+							property.ValueOrNil.Data = &clone
+							result = append(result, property)
+							break
+						}
+					}
+
+					result = append(result, p)
+				}
+				continue
+			}
+		}
+		result = append(result, property)
+	}
+	return result
+}
+
+// This function intentionally avoids mutating the input AST so it can be
+// called after the AST has been frozen (i.e. after parsing ends).
+func (ctx HelperContext) MangleIfExpr(loc logger.Loc, e *EIf, unsupportedFeatures compat.JSFeature) Expr {
+	test := e.Test
+	yes := e.Yes
+	no := e.No
+
+	// "(a, b) ? c : d" => "a, b ? c : d"
+	if comma, ok := test.Data.(*EBinary); ok && comma.Op == BinOpComma {
+		return JoinWithComma(comma.Left, ctx.MangleIfExpr(comma.Right.Loc, &EIf{
+			Test: comma.Right,
+			Yes:  yes,
+			No:   no,
+		}, unsupportedFeatures))
+	}
+
+	// "!a ? b : c" => "a ? c : b"
+	if not, ok := test.Data.(*EUnary); ok && not.Op == UnOpNot {
+		test = not.Value
+		yes, no = no, yes
+	}
+
+	if ValuesLookTheSame(yes.Data, no.Data) {
+		// "/* @__PURE__ */ a() ? b : b" => "b"
+		if ctx.ExprCanBeRemovedIfUnused(test) {
+			return yes
+		}
+
+		// "a ? b : b" => "a, b"
+		return JoinWithComma(test, yes)
+	}
+
+	// "a ? true : false" => "!!a"
+	// "a ? false : true" => "!a"
+	if y, ok := yes.Data.(*EBoolean); ok {
+		if n, ok := no.Data.(*EBoolean); ok {
+			if y.Value && !n.Value {
+				return Not(Not(test))
+			}
+			if !y.Value && n.Value {
+				return Not(test)
+			}
+		}
+	}
+
+	if id, ok := test.Data.(*EIdentifier); ok {
+		// "a ? a : b" => "a || b"
+		if id2, ok := yes.Data.(*EIdentifier); ok && id.Ref == id2.Ref {
+			return JoinWithLeftAssociativeOp(BinOpLogicalOr, test, no)
+		}
+
+		// "a ? b : a" => "a && b"
+		if id2, ok := no.Data.(*EIdentifier); ok && id.Ref == id2.Ref {
+			return JoinWithLeftAssociativeOp(BinOpLogicalAnd, test, yes)
+		}
+	}
+
+	// "a ? b ? c : d : d" => "a && b ? c : d"
+	if yesIf, ok := yes.Data.(*EIf); ok && ValuesLookTheSame(yesIf.No.Data, no.Data) {
+		return Expr{Loc: loc, Data: &EIf{Test: JoinWithLeftAssociativeOp(BinOpLogicalAnd, test, yesIf.Test), Yes: yesIf.Yes, No: no}}
+	}
+
+	// "a ? b : c ? b : d" => "a || c ? b : d"
+	if noIf, ok := no.Data.(*EIf); ok && ValuesLookTheSame(yes.Data, noIf.Yes.Data) {
+		return Expr{Loc: loc, Data: &EIf{Test: JoinWithLeftAssociativeOp(BinOpLogicalOr, test, noIf.Test), Yes: yes, No: noIf.No}}
+	}
+
+	// "a ? c : (b, c)" => "(a || b), c"
+	if comma, ok := no.Data.(*EBinary); ok && comma.Op == BinOpComma && ValuesLookTheSame(yes.Data, comma.Right.Data) {
+		return JoinWithComma(
+			JoinWithLeftAssociativeOp(BinOpLogicalOr, test, comma.Left),
+			comma.Right,
+		)
+	}
+
+	// "a ? (b, c) : c" => "(a && b), c"
+	if comma, ok := yes.Data.(*EBinary); ok && comma.Op == BinOpComma && ValuesLookTheSame(comma.Right.Data, no.Data) {
+		return JoinWithComma(
+			JoinWithLeftAssociativeOp(BinOpLogicalAnd, test, comma.Left),
+			comma.Right,
+		)
+	}
+
+	// "a ? b || c : c" => "(a && b) || c"
+	if binary, ok := yes.Data.(*EBinary); ok && binary.Op == BinOpLogicalOr &&
+		ValuesLookTheSame(binary.Right.Data, no.Data) {
+		return Expr{Loc: loc, Data: &EBinary{
+			Op:    BinOpLogicalOr,
+			Left:  JoinWithLeftAssociativeOp(BinOpLogicalAnd, test, binary.Left),
+			Right: binary.Right,
+		}}
+	}
+
+	// "a ? c : b && c" => "(a || b) && c"
+	if binary, ok := no.Data.(*EBinary); ok && binary.Op == BinOpLogicalAnd &&
+		ValuesLookTheSame(yes.Data, binary.Right.Data) {
+		return Expr{Loc: loc, Data: &EBinary{
+			Op:    BinOpLogicalAnd,
+			Left:  JoinWithLeftAssociativeOp(BinOpLogicalOr, test, binary.Left),
+			Right: binary.Right,
+		}}
+	}
+
+	// "a ? b(c, d) : b(e, d)" => "b(a ? c : e, d)"
+	if y, ok := yes.Data.(*ECall); ok && len(y.Args) > 0 {
+		if n, ok := no.Data.(*ECall); ok && len(n.Args) == len(y.Args) &&
+			y.HasSameFlagsAs(n) && ValuesLookTheSame(y.Target.Data, n.Target.Data) {
+			// Only do this if the condition can be reordered past the call target
+			// without side effects. For example, if the test or the call target is
+			// an unbound identifier, reordering could potentially mean evaluating
+			// the code could throw a different ReferenceError.
+			if ctx.ExprCanBeRemovedIfUnused(test) && ctx.ExprCanBeRemovedIfUnused(y.Target) {
+				sameTailArgs := true
+				for i, count := 1, len(y.Args); i < count; i++ {
+					if !ValuesLookTheSame(y.Args[i].Data, n.Args[i].Data) {
+						sameTailArgs = false
+						break
+					}
+				}
+				if sameTailArgs {
+					yesSpread, yesIsSpread := y.Args[0].Data.(*ESpread)
+					noSpread, noIsSpread := n.Args[0].Data.(*ESpread)
+
+					// "a ? b(...c) : b(...e)" => "b(...a ? c : e)"
+					if yesIsSpread && noIsSpread {
+						// Don't mutate the original AST
+						temp := EIf{Test: test, Yes: yesSpread.Value, No: noSpread.Value}
+						clone := *y
+						clone.Args = append([]Expr{}, clone.Args...)
+						clone.Args[0] = Expr{Loc: loc, Data: &ESpread{Value: ctx.MangleIfExpr(loc, &temp, unsupportedFeatures)}}
+						return Expr{Loc: loc, Data: &clone}
+					}
+
+					// "a ? b(c) : b(e)" => "b(a ? c : e)"
+					if !yesIsSpread && !noIsSpread {
+						// Don't mutate the original AST
+						temp := EIf{Test: test, Yes: y.Args[0], No: n.Args[0]}
+						clone := *y
+						clone.Args = append([]Expr{}, clone.Args...)
+						clone.Args[0] = ctx.MangleIfExpr(loc, &temp, unsupportedFeatures)
+						return Expr{Loc: loc, Data: &clone}
+					}
+				}
+			}
+		}
+	}
+
+	// Try using the "??" or "?." operators
+	if binary, ok := test.Data.(*EBinary); ok {
+		var check Expr
+		var whenNull Expr
+		var whenNonNull Expr
+
+		switch binary.Op {
+		case BinOpLooseEq:
+			if _, ok := binary.Right.Data.(*ENull); ok {
+				// "a == null ? _ : _"
+				check = binary.Left
+				whenNull = yes
+				whenNonNull = no
+			} else if _, ok := binary.Left.Data.(*ENull); ok {
+				// "null == a ? _ : _"
+				check = binary.Right
+				whenNull = yes
+				whenNonNull = no
+			}
+
+		case BinOpLooseNe:
+			if _, ok := binary.Right.Data.(*ENull); ok {
+				// "a != null ? _ : _"
+				check = binary.Left
+				whenNonNull = yes
+				whenNull = no
+			} else if _, ok := binary.Left.Data.(*ENull); ok {
+				// "null != a ? _ : _"
+				check = binary.Right
+				whenNonNull = yes
+				whenNull = no
+			}
+		}
+
+		if ctx.ExprCanBeRemovedIfUnused(check) {
+			// "a != null ? a : b" => "a ?? b"
+			if !unsupportedFeatures.Has(compat.NullishCoalescing) && ValuesLookTheSame(check.Data, whenNonNull.Data) {
+				return JoinWithLeftAssociativeOp(BinOpNullishCoalescing, check, whenNull)
+			}
+
+			// "a != null ? a.b.c[d](e) : undefined" => "a?.b.c[d](e)"
+			if !unsupportedFeatures.Has(compat.OptionalChain) {
+				if _, ok := whenNull.Data.(*EUndefined); ok && TryToInsertOptionalChain(check, whenNonNull) {
+					return whenNonNull
+				}
+			}
+		}
+	}
+
+	// Don't mutate the original AST
+	if test != e.Test || yes != e.Yes || no != e.No {
+		return Expr{Loc: loc, Data: &EIf{Test: test, Yes: yes, No: no}}
+	}
+
+	return Expr{Loc: loc, Data: e}
+}
+
+func ForEachIdentifierBindingInDecls(decls []Decl, callback func(loc logger.Loc, b *BIdentifier)) {
+	for _, decl := range decls {
+		ForEachIdentifierBinding(decl.Binding, callback)
+	}
+}
+
+func ForEachIdentifierBinding(binding Binding, callback func(loc logger.Loc, b *BIdentifier)) {
+	switch b := binding.Data.(type) {
+	case *BMissing:
+
+	case *BIdentifier:
+		callback(binding.Loc, b)
+
+	case *BArray:
+		for _, item := range b.Items {
+			ForEachIdentifierBinding(item.Binding, callback)
+		}
+
+	case *BObject:
+		for _, property := range b.Properties {
+			ForEachIdentifierBinding(property.Value, callback)
+		}
+
+	default:
+		panic("Internal error")
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ident.go b/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ident.go
new file mode 100644
index 0000000..b1ff22d
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_ast/js_ident.go
@@ -0,0 +1,247 @@
+package js_ast
+
+import (
+	"strings"
+	"unicode"
+	"unicode/utf8"
+)
+
+func IsIdentifier(text string) bool {
+	if len(text) == 0 {
+		return false
+	}
+	for i, codePoint := range text {
+		if i == 0 {
+			if !IsIdentifierStart(codePoint) {
+				return false
+			}
+		} else {
+			if !IsIdentifierContinue(codePoint) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func IsIdentifierES5AndESNext(text string) bool {
+	if len(text) == 0 {
+		return false
+	}
+	for i, codePoint := range text {
+		if i == 0 {
+			if !IsIdentifierStartES5AndESNext(codePoint) {
+				return false
+			}
+		} else {
+			if !IsIdentifierContinueES5AndESNext(codePoint) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func ForceValidIdentifier(prefix string, text string) string {
+	sb := strings.Builder{}
+
+	// Private identifiers must be prefixed by "#"
+	if prefix != "" {
+		sb.WriteString(prefix)
+	}
+
+	// Identifier start
+	c, width := utf8.DecodeRuneInString(text)
+	text = text[width:]
+	if IsIdentifierStart(c) {
+		sb.WriteRune(c)
+	} else {
+		sb.WriteRune('_')
+	}
+
+	// Identifier continue
+	for text != "" {
+		c, width := utf8.DecodeRuneInString(text)
+		text = text[width:]
+		if IsIdentifierContinue(c) {
+			sb.WriteRune(c)
+		} else {
+			sb.WriteRune('_')
+		}
+	}
+
+	return sb.String()
+}
+
+// This does "IsIdentifier(UTF16ToString(text))" without any allocations
+func IsIdentifierUTF16(text []uint16) bool {
+	n := len(text)
+	if n == 0 {
+		return false
+	}
+	for i := 0; i < n; i++ {
+		isStart := i == 0
+		r1 := rune(text[i])
+		if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n {
+			if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
+				r1 = (r1 << 10) + r2 + (0x10000 - (0xD800 << 10) - 0xDC00)
+				i++
+			}
+		}
+		if isStart {
+			if !IsIdentifierStart(r1) {
+				return false
+			}
+		} else {
+			if !IsIdentifierContinue(r1) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// This does "IsIdentifierES5AndESNext(UTF16ToString(text))" without any allocations
+func IsIdentifierES5AndESNextUTF16(text []uint16) bool {
+	n := len(text)
+	if n == 0 {
+		return false
+	}
+	for i := 0; i < n; i++ {
+		isStart := i == 0
+		r1 := rune(text[i])
+		if r1 >= 0xD800 && r1 <= 0xDBFF && i+1 < n {
+			if r2 := rune(text[i+1]); r2 >= 0xDC00 && r2 <= 0xDFFF {
+				r1 = (r1 << 10) + r2 + (0x10000 - (0xD800 << 10) - 0xDC00)
+				i++
+			}
+		}
+		if isStart {
+			if !IsIdentifierStartES5AndESNext(r1) {
+				return false
+			}
+		} else {
+			if !IsIdentifierContinueES5AndESNext(r1) {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func IsIdentifierStart(codePoint rune) bool {
+	switch codePoint {
+	case '_', '$',
+		'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+		'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+		'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+		'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
+		return true
+	}
+
+	// All ASCII identifier start code points are listed above
+	if codePoint < 0x7F {
+		return false
+	}
+
+	return unicode.Is(idStartES5OrESNext, codePoint)
+}
+
+func IsIdentifierContinue(codePoint rune) bool {
+	switch codePoint {
+	case '_', '$', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+		'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+		'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+		'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+		'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
+		return true
+	}
+
+	// All ASCII identifier start code points are listed above
+	if codePoint < 0x7F {
+		return false
+	}
+
+	// ZWNJ and ZWJ are allowed in identifiers
+	if codePoint == 0x200C || codePoint == 0x200D {
+		return true
+	}
+
+	return unicode.Is(idContinueES5OrESNext, codePoint)
+}
+
+func IsIdentifierStartES5AndESNext(codePoint rune) bool {
+	switch codePoint {
+	case '_', '$',
+		'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+		'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+		'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+		'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
+		return true
+	}
+
+	// All ASCII identifier start code points are listed above
+	if codePoint < 0x7F {
+		return false
+	}
+
+	return unicode.Is(idStartES5AndESNext, codePoint)
+}
+
+func IsIdentifierContinueES5AndESNext(codePoint rune) bool {
+	switch codePoint {
+	case '_', '$', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+		'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+		'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+		'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+		'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
+		return true
+	}
+
+	// All ASCII identifier start code points are listed above
+	if codePoint < 0x7F {
+		return false
+	}
+
+	// ZWNJ and ZWJ are allowed in identifiers
+	if codePoint == 0x200C || codePoint == 0x200D {
+		return true
+	}
+
+	return unicode.Is(idContinueES5AndESNext, codePoint)
+}
+
+// See the "White Space Code Points" table in the ECMAScript standard
+func IsWhitespace(codePoint rune) bool {
+	switch codePoint {
+	case
+		'\u0009', // character tabulation
+		'\u000B', // line tabulation
+		'\u000C', // form feed
+		'\u0020', // space
+		'\u00A0', // no-break space
+
+		// Unicode "Space_Separator" code points
+		'\u1680', // ogham space mark
+		'\u2000', // en quad
+		'\u2001', // em quad
+		'\u2002', // en space
+		'\u2003', // em space
+		'\u2004', // three-per-em space
+		'\u2005', // four-per-em space
+		'\u2006', // six-per-em space
+		'\u2007', // figure space
+		'\u2008', // punctuation space
+		'\u2009', // thin space
+		'\u200A', // hair space
+		'\u202F', // narrow no-break space
+		'\u205F', // medium mathematical space
+		'\u3000', // ideographic space
+
+		'\uFEFF': // zero width non-breaking space
+		return true
+
+	default:
+		return false
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_ast/unicode.go b/source/vendor/github.com/evanw/esbuild/internal/js_ast/unicode.go
new file mode 100644
index 0000000..f1d6720
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_ast/unicode.go
@@ -0,0 +1,2065 @@
+// This file was automatically generated by gen-unicode-table.js. Do not edit.
+package js_ast
+
+import "unicode"
+
+var idStartES5AndESNext = &unicode.RangeTable{
+	LatinOffset: 117,
+	R16: []unicode.Range16{
+		{Lo: 0x41, Hi: 0x5a, Stride: 1},
+		{Lo: 0x61, Hi: 0x7a, Stride: 1},
+		{Lo: 0xaa, Hi: 0xaa, Stride: 1},
+		{Lo: 0xb5, Hi: 0xb5, Stride: 1},
+		{Lo: 0xba, Hi: 0xba, Stride: 1},
+		{Lo: 0xc0, Hi: 0xd6, Stride: 1},
+		{Lo: 0xd8, Hi: 0xf6, Stride: 1},
+		{Lo: 0xf8, Hi: 0x21f, Stride: 1},
+		{Lo: 0x222, Hi: 0x233, Stride: 1},
+		{Lo: 0x250, Hi: 0x2ad, Stride: 1},
+		{Lo: 0x2b0, Hi: 0x2b8, Stride: 1},
+		{Lo: 0x2bb, Hi: 0x2c1, Stride: 1},
+		{Lo: 0x2d0, Hi: 0x2d1, Stride: 1},
+		{Lo: 0x2e0, Hi: 0x2e4, Stride: 1},
+		{Lo: 0x2ee, Hi: 0x2ee, Stride: 1},
+		{Lo: 0x37a, Hi: 0x37a, Stride: 1},
+		{Lo: 0x386, Hi: 0x386, Stride: 1},
+		{Lo: 0x388, Hi: 0x38a, Stride: 1},
+		{Lo: 0x38c, Hi: 0x38c, Stride: 1},
+		{Lo: 0x38e, Hi: 0x3a1, Stride: 1},
+		{Lo: 0x3a3, Hi: 0x3ce, Stride: 1},
+		{Lo: 0x3d0, Hi: 0x3d7, Stride: 1},
+		{Lo: 0x3da, Hi: 0x3f3, Stride: 1},
+		{Lo: 0x400, Hi: 0x481, Stride: 1},
+		{Lo: 0x48c, Hi: 0x4c4, Stride: 1},
+		{Lo: 0x4c7, Hi: 0x4c8, Stride: 1},
+		{Lo: 0x4cb, Hi: 0x4cc, Stride: 1},
+		{Lo: 0x4d0, Hi: 0x4f5, Stride: 1},
+		{Lo: 0x4f8, Hi: 0x4f9, Stride: 1},
+		{Lo: 0x531, Hi: 0x556, Stride: 1},
+		{Lo: 0x559, Hi: 0x559, Stride: 1},
+		{Lo: 0x561, Hi: 0x587, Stride: 1},
+		{Lo: 0x5d0, Hi: 0x5ea, Stride: 1},
+		{Lo: 0x5f0, Hi: 0x5f2, Stride: 1},
+		{Lo: 0x621, Hi: 0x63a, Stride: 1},
+		{Lo: 0x640, Hi: 0x64a, Stride: 1},
+		{Lo: 0x671, Hi: 0x6d3, Stride: 1},
+		{Lo: 0x6d5, Hi: 0x6d5, Stride: 1},
+		{Lo: 0x6e5, Hi: 0x6e6, Stride: 1},
+		{Lo: 0x6fa, Hi: 0x6fc, Stride: 1},
+		{Lo: 0x710, Hi: 0x710, Stride: 1},
+		{Lo: 0x712, Hi: 0x72c, Stride: 1},
+		{Lo: 0x780, Hi: 0x7a5, Stride: 1},
+		{Lo: 0x905, Hi: 0x939, Stride: 1},
+		{Lo: 0x93d, Hi: 0x93d, Stride: 1},
+		{Lo: 0x950, Hi: 0x950, Stride: 1},
+		{Lo: 0x958, Hi: 0x961, Stride: 1},
+		{Lo: 0x985, Hi: 0x98c, Stride: 1},
+		{Lo: 0x98f, Hi: 0x990, Stride: 1},
+		{Lo: 0x993, Hi: 0x9a8, Stride: 1},
+		{Lo: 0x9aa, Hi: 0x9b0, Stride: 1},
+		{Lo: 0x9b2, Hi: 0x9b2, Stride: 1},
+		{Lo: 0x9b6, Hi: 0x9b9, Stride: 1},
+		{Lo: 0x9dc, Hi: 0x9dd, Stride: 1},
+		{Lo: 0x9df, Hi: 0x9e1, Stride: 1},
+		{Lo: 0x9f0, Hi: 0x9f1, Stride: 1},
+		{Lo: 0xa05, Hi: 0xa0a, Stride: 1},
+		{Lo: 0xa0f, Hi: 0xa10, Stride: 1},
+		{Lo: 0xa13, Hi: 0xa28, Stride: 1},
+		{Lo: 0xa2a, Hi: 0xa30, Stride: 1},
+		{Lo: 0xa32, Hi: 0xa33, Stride: 1},
+		{Lo: 0xa35, Hi: 0xa36, Stride: 1},
+		{Lo: 0xa38, Hi: 0xa39, Stride: 1},
+		{Lo: 0xa59, Hi: 0xa5c, Stride: 1},
+		{Lo: 0xa5e, Hi: 0xa5e, Stride: 1},
+		{Lo: 0xa72, Hi: 0xa74, Stride: 1},
+		{Lo: 0xa85, Hi: 0xa8b, Stride: 1},
+		{Lo: 0xa8d, Hi: 0xa8d, Stride: 1},
+		{Lo: 0xa8f, Hi: 0xa91, Stride: 1},
+		{Lo: 0xa93, Hi: 0xaa8, Stride: 1},
+		{Lo: 0xaaa, Hi: 0xab0, Stride: 1},
+		{Lo: 0xab2, Hi: 0xab3, Stride: 1},
+		{Lo: 0xab5, Hi: 0xab9, Stride: 1},
+		{Lo: 0xabd, Hi: 0xabd, Stride: 1},
+		{Lo: 0xad0, Hi: 0xad0, Stride: 1},
+		{Lo: 0xae0, Hi: 0xae0, Stride: 1},
+		{Lo: 0xb05, Hi: 0xb0c, Stride: 1},
+		{Lo: 0xb0f, Hi: 0xb10, Stride: 1},
+		{Lo: 0xb13, Hi: 0xb28, Stride: 1},
+		{Lo: 0xb2a, Hi: 0xb30, Stride: 1},
+		{Lo: 0xb32, Hi: 0xb33, Stride: 1},
+		{Lo: 0xb36, Hi: 0xb39, Stride: 1},
+		{Lo: 0xb3d, Hi: 0xb3d, Stride: 1},
+		{Lo: 0xb5c, Hi: 0xb5d, Stride: 1},
+		{Lo: 0xb5f, Hi: 0xb61, Stride: 1},
+		{Lo: 0xb85, Hi: 0xb8a, Stride: 1},
+		{Lo: 0xb8e, Hi: 0xb90, Stride: 1},
+		{Lo: 0xb92, Hi: 0xb95, Stride: 1},
+		{Lo: 0xb99, Hi: 0xb9a, Stride: 1},
+		{Lo: 0xb9c, Hi: 0xb9c, Stride: 1},
+		{Lo: 0xb9e, Hi: 0xb9f, Stride: 1},
+		{Lo: 0xba3, Hi: 0xba4, Stride: 1},
+		{Lo: 0xba8, Hi: 0xbaa, Stride: 1},
+		{Lo: 0xbae, Hi: 0xbb5, Stride: 1},
+		{Lo: 0xbb7, Hi: 0xbb9, Stride: 1},
+		{Lo: 0xc05, Hi: 0xc0c, Stride: 1},
+		{Lo: 0xc0e, Hi: 0xc10, Stride: 1},
+		{Lo: 0xc12, Hi: 0xc28, Stride: 1},
+		{Lo: 0xc2a, Hi: 0xc33, Stride: 1},
+		{Lo: 0xc35, Hi: 0xc39, Stride: 1},
+		{Lo: 0xc60, Hi: 0xc61, Stride: 1},
+		{Lo: 0xc85, Hi: 0xc8c, Stride: 1},
+		{Lo: 0xc8e, Hi: 0xc90, Stride: 1},
+		{Lo: 0xc92, Hi: 0xca8, Stride: 1},
+		{Lo: 0xcaa, Hi: 0xcb3, Stride: 1},
+		{Lo: 0xcb5, Hi: 0xcb9, Stride: 1},
+		{Lo: 0xcde, Hi: 0xcde, Stride: 1},
+		{Lo: 0xce0, Hi: 0xce1, Stride: 1},
+		{Lo: 0xd05, Hi: 0xd0c, Stride: 1},
+		{Lo: 0xd0e, Hi: 0xd10, Stride: 1},
+		{Lo: 0xd12, Hi: 0xd28, Stride: 1},
+		{Lo: 0xd2a, Hi: 0xd39, Stride: 1},
+		{Lo: 0xd60, Hi: 0xd61, Stride: 1},
+		{Lo: 0xd85, Hi: 0xd96, Stride: 1},
+		{Lo: 0xd9a, Hi: 0xdb1, Stride: 1},
+		{Lo: 0xdb3, Hi: 0xdbb, Stride: 1},
+		{Lo: 0xdbd, Hi: 0xdbd, Stride: 1},
+		{Lo: 0xdc0, Hi: 0xdc6, Stride: 1},
+		{Lo: 0xe01, Hi: 0xe30, Stride: 1},
+		{Lo: 0xe32, Hi: 0xe33, Stride: 1},
+		{Lo: 0xe40, Hi: 0xe46, Stride: 1},
+		{Lo: 0xe81, Hi: 0xe82, Stride: 1},
+		{Lo: 0xe84, Hi: 0xe84, Stride: 1},
+		{Lo: 0xe87, Hi: 0xe88, Stride: 1},
+		{Lo: 0xe8a, Hi: 0xe8a, Stride: 1},
+		{Lo: 0xe8d, Hi: 0xe8d, Stride: 1},
+		{Lo: 0xe94, Hi: 0xe97, Stride: 1},
+		{Lo: 0xe99, Hi: 0xe9f, Stride: 1},
+		{Lo: 0xea1, Hi: 0xea3, Stride: 1},
+		{Lo: 0xea5, Hi: 0xea5, Stride: 1},
+		{Lo: 0xea7, Hi: 0xea7, Stride: 1},
+		{Lo: 0xeaa, Hi: 0xeab, Stride: 1},
+		{Lo: 0xead, Hi: 0xeb0, Stride: 1},
+		{Lo: 0xeb2, Hi: 0xeb3, Stride: 1},
+		{Lo: 0xebd, Hi: 0xebd, Stride: 1},
+		{Lo: 0xec0, Hi: 0xec4, Stride: 1},
+		{Lo: 0xec6, Hi: 0xec6, Stride: 1},
+		{Lo: 0xedc, Hi: 0xedd, Stride: 1},
+		{Lo: 0xf00, Hi: 0xf00, Stride: 1},
+		{Lo: 0xf40, Hi: 0xf47, Stride: 1},
+		{Lo: 0xf49, Hi: 0xf6a, Stride: 1},
+		{Lo: 0xf88, Hi: 0xf8b, Stride: 1},
+	},
+	R32: []unicode.Range32{
+		{Lo: 0x1000, Hi: 0x1021, Stride: 1},
+		{Lo: 0x1023, Hi: 0x1027, Stride: 1},
+		{Lo: 0x1029, Hi: 0x102a, Stride: 1},
+		{Lo: 0x1050, Hi: 0x1055, Stride: 1},
+		{Lo: 0x10a0, Hi: 0x10c5, Stride: 1},
+		{Lo: 0x10d0, Hi: 0x10f6, Stride: 1},
+		{Lo: 0x1100, Hi: 0x1159, Stride: 1},
+		{Lo: 0x115f, Hi: 0x11a2, Stride: 1},
+		{Lo: 0x11a8, Hi: 0x11f9, Stride: 1},
+		{Lo: 0x1200, Hi: 0x1206, Stride: 1},
+		{Lo: 0x1208, Hi: 0x1246, Stride: 1},
+		{Lo: 0x1248, Hi: 0x1248, Stride: 1},
+		{Lo: 0x124a, Hi: 0x124d, Stride: 1},
+		{Lo: 0x1250, Hi: 0x1256, Stride: 1},
+		{Lo: 0x1258, Hi: 0x1258, Stride: 1},
+		{Lo: 0x125a, Hi: 0x125d, Stride: 1},
+		{Lo: 0x1260, Hi: 0x1286, Stride: 1},
+		{Lo: 0x1288, Hi: 0x1288, Stride: 1},
+		{Lo: 0x128a, Hi: 0x128d, Stride: 1},
+		{Lo: 0x1290, Hi: 0x12ae, Stride: 1},
+		{Lo: 0x12b0, Hi: 0x12b0, Stride: 1},
+		{Lo: 0x12b2, Hi: 0x12b5, Stride: 1},
+		{Lo: 0x12b8, Hi: 0x12be, Stride: 1},
+		{Lo: 0x12c0, Hi: 0x12c0, Stride: 1},
+		{Lo: 0x12c2, Hi: 0x12c5, Stride: 1},
+		{Lo: 0x12c8, Hi: 0x12ce, Stride: 1},
+		{Lo: 0x12d0, Hi: 0x12d6, Stride: 1},
+		{Lo: 0x12d8, Hi: 0x12ee, Stride: 1},
+		{Lo: 0x12f0, Hi: 0x130e, Stride: 1},
+		{Lo: 0x1310, Hi: 0x1310, Stride: 1},
+		{Lo: 0x1312, Hi: 0x1315, Stride: 1},
+		{Lo: 0x1318, Hi: 0x131e, Stride: 1},
+		{Lo: 0x1320, Hi: 0x1346, Stride: 1},
+		{Lo: 0x1348, Hi: 0x135a, Stride: 1},
+		{Lo: 0x13a0, Hi: 0x13f4, Stride: 1},
+		{Lo: 0x1401, Hi: 0x166c, Stride: 1},
+		{Lo: 0x166f, Hi: 0x1676, Stride: 1},
+		{Lo: 0x1681, Hi: 0x169a, Stride: 1},
+		{Lo: 0x16a0, Hi: 0x16ea, Stride: 1},
+		{Lo: 0x1780, Hi: 0x17b3, Stride: 1},
+		{Lo: 0x1820, Hi: 0x1877, Stride: 1},
+		{Lo: 0x1880, Hi: 0x18a8, Stride: 1},
+		{Lo: 0x1e00, Hi: 0x1e9b, Stride: 1},
+		{Lo: 0x1ea0, Hi: 0x1ef9, Stride: 1},
+		{Lo: 0x1f00, Hi: 0x1f15, Stride: 1},
+		{Lo: 0x1f18, Hi: 0x1f1d, Stride: 1},
+		{Lo: 0x1f20, Hi: 0x1f45, Stride: 1},
+		{Lo: 0x1f48, Hi: 0x1f4d, Stride: 1},
+		{Lo: 0x1f50, Hi: 0x1f57, Stride: 1},
+		{Lo: 0x1f59, Hi: 0x1f59, Stride: 1},
+		{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 1},
+		{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 1},
+		{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 1},
+		{Lo: 0x1f80, Hi: 0x1fb4, Stride: 1},
+		{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 1},
+		{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 1},
+		{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 1},
+		{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 1},
+		{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 1},
+		{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 1},
+		{Lo: 0x1fe0, Hi: 0x1fec, Stride: 1},
+		{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 1},
+		{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 1},
+		{Lo: 0x207f, Hi: 0x207f, Stride: 1},
+		{Lo: 0x2102, Hi: 0x2102, Stride: 1},
+		{Lo: 0x2107, Hi: 0x2107, Stride: 1},
+		{Lo: 0x210a, Hi: 0x2113, Stride: 1},
+		{Lo: 0x2115, Hi: 0x2115, Stride: 1},
+		{Lo: 0x2119, Hi: 0x211d, Stride: 1},
+		{Lo: 0x2124, Hi: 0x2124, Stride: 1},
+		{Lo: 0x2126, Hi: 0x2126, Stride: 1},
+		{Lo: 0x2128, Hi: 0x2128, Stride: 1},
+		{Lo: 0x212a, Hi: 0x212d, Stride: 1},
+		{Lo: 0x212f, Hi: 0x2131, Stride: 1},
+		{Lo: 0x2133, Hi: 0x2139, Stride: 1},
+		{Lo: 0x3005, Hi: 0x3006, Stride: 1},
+		{Lo: 0x3031, Hi: 0x3035, Stride: 1},
+		{Lo: 0x3041, Hi: 0x3094, Stride: 1},
+		{Lo: 0x309d, Hi: 0x309e, Stride: 1},
+		{Lo: 0x30a1, Hi: 0x30fa, Stride: 1},
+		{Lo: 0x30fc, Hi: 0x30fe, Stride: 1},
+		{Lo: 0x3105, Hi: 0x312c, Stride: 1},
+		{Lo: 0x3131, Hi: 0x318e, Stride: 1},
+		{Lo: 0x31a0, Hi: 0x31b7, Stride: 1},
+		{Lo: 0x3400, Hi: 0x4db5, Stride: 1},
+		{Lo: 0x4e00, Hi: 0x9fa5, Stride: 1},
+		{Lo: 0xa000, Hi: 0xa48c, Stride: 1},
+		{Lo: 0xac00, Hi: 0xd7a3, Stride: 1},
+		{Lo: 0xf900, Hi: 0xfa2d, Stride: 1},
+		{Lo: 0xfb00, Hi: 0xfb06, Stride: 1},
+		{Lo: 0xfb13, Hi: 0xfb17, Stride: 1},
+		{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 1},
+		{Lo: 0xfb1f, Hi: 0xfb28, Stride: 1},
+		{Lo: 0xfb2a, Hi: 0xfb36, Stride: 1},
+		{Lo: 0xfb38, Hi: 0xfb3c, Stride: 1},
+		{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 1},
+		{Lo: 0xfb40, Hi: 0xfb41, Stride: 1},
+		{Lo: 0xfb43, Hi: 0xfb44, Stride: 1},
+		{Lo: 0xfb46, Hi: 0xfbb1, Stride: 1},
+		{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 1},
+		{Lo: 0xfd50, Hi: 0xfd8f, Stride: 1},
+		{Lo: 0xfd92, Hi: 0xfdc7, Stride: 1},
+		{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 1},
+		{Lo: 0xfe70, Hi: 0xfe72, Stride: 1},
+		{Lo: 0xfe74, Hi: 0xfe74, Stride: 1},
+		{Lo: 0xfe76, Hi: 0xfefc, Stride: 1},
+		{Lo: 0xff21, Hi: 0xff3a, Stride: 1},
+		{Lo: 0xff41, Hi: 0xff5a, Stride: 1},
+		{Lo: 0xff66, Hi: 0xffbe, Stride: 1},
+		{Lo: 0xffc2, Hi: 0xffc7, Stride: 1},
+		{Lo: 0xffca, Hi: 0xffcf, Stride: 1},
+		{Lo: 0xffd2, Hi: 0xffd7, Stride: 1},
+		{Lo: 0xffda, Hi: 0xffdc, Stride: 1},
+	},
+}
+
+var idContinueES5AndESNext = &unicode.RangeTable{
+	LatinOffset: 128,
+	R16: []unicode.Range16{
+		{Lo: 0x30, Hi: 0x39, Stride: 1},
+		{Lo: 0x41, Hi: 0x5a, Stride: 1},
+		{Lo: 0x5f, Hi: 0x5f, Stride: 1},
+		{Lo: 0x61, Hi: 0x7a, Stride: 1},
+		{Lo: 0xaa, Hi: 0xaa, Stride: 1},
+		{Lo: 0xb5, Hi: 0xb5, Stride: 1},
+		{Lo: 0xba, Hi: 0xba, Stride: 1},
+		{Lo: 0xc0, Hi: 0xd6, Stride: 1},
+		{Lo: 0xd8, Hi: 0xf6, Stride: 1},
+		{Lo: 0xf8, Hi: 0x21f, Stride: 1},
+		{Lo: 0x222, Hi: 0x233, Stride: 1},
+		{Lo: 0x250, Hi: 0x2ad, Stride: 1},
+		{Lo: 0x2b0, Hi: 0x2b8, Stride: 1},
+		{Lo: 0x2bb, Hi: 0x2c1, Stride: 1},
+		{Lo: 0x2d0, Hi: 0x2d1, Stride: 1},
+		{Lo: 0x2e0, Hi: 0x2e4, Stride: 1},
+		{Lo: 0x2ee, Hi: 0x2ee, Stride: 1},
+		{Lo: 0x300, Hi: 0x34e, Stride: 1},
+		{Lo: 0x360, Hi: 0x362, Stride: 1},
+		{Lo: 0x37a, Hi: 0x37a, Stride: 1},
+		{Lo: 0x386, Hi: 0x386, Stride: 1},
+		{Lo: 0x388, Hi: 0x38a, Stride: 1},
+		{Lo: 0x38c, Hi: 0x38c, Stride: 1},
+		{Lo: 0x38e, Hi: 0x3a1, Stride: 1},
+		{Lo: 0x3a3, Hi: 0x3ce, Stride: 1},
+		{Lo: 0x3d0, Hi: 0x3d7, Stride: 1},
+		{Lo: 0x3da, Hi: 0x3f3, Stride: 1},
+		{Lo: 0x400, Hi: 0x481, Stride: 1},
+		{Lo: 0x483, Hi: 0x486, Stride: 1},
+		{Lo: 0x48c, Hi: 0x4c4, Stride: 1},
+		{Lo: 0x4c7, Hi: 0x4c8, Stride: 1},
+		{Lo: 0x4cb, Hi: 0x4cc, Stride: 1},
+		{Lo: 0x4d0, Hi: 0x4f5, Stride: 1},
+		{Lo: 0x4f8, Hi: 0x4f9, Stride: 1},
+		{Lo: 0x531, Hi: 0x556, Stride: 1},
+		{Lo: 0x559, Hi: 0x559, Stride: 1},
+		{Lo: 0x561, Hi: 0x587, Stride: 1},
+		{Lo: 0x591, Hi: 0x5a1, Stride: 1},
+		{Lo: 0x5a3, Hi: 0x5b9, Stride: 1},
+		{Lo: 0x5bb, Hi: 0x5bd, Stride: 1},
+		{Lo: 0x5bf, Hi: 0x5bf, Stride: 1},
+		{Lo: 0x5c1, Hi: 0x5c2, Stride: 1},
+		{Lo: 0x5c4, Hi: 0x5c4, Stride: 1},
+		{Lo: 0x5d0, Hi: 0x5ea, Stride: 1},
+		{Lo: 0x5f0, Hi: 0x5f2, Stride: 1},
+		{Lo: 0x621, Hi: 0x63a, Stride: 1},
+		{Lo: 0x640, Hi: 0x655, Stride: 1},
+		{Lo: 0x660, Hi: 0x669, Stride: 1},
+		{Lo: 0x670, Hi: 0x6d3, Stride: 1},
+		{Lo: 0x6d5, Hi: 0x6dc, Stride: 1},
+		{Lo: 0x6df, Hi: 0x6e8, Stride: 1},
+		{Lo: 0x6ea, Hi: 0x6ed, Stride: 1},
+		{Lo: 0x6f0, Hi: 0x6fc, Stride: 1},
+		{Lo: 0x710, Hi: 0x72c, Stride: 1},
+		{Lo: 0x730, Hi: 0x74a, Stride: 1},
+		{Lo: 0x780, Hi: 0x7b0, Stride: 1},
+		{Lo: 0x901, Hi: 0x903, Stride: 1},
+		{Lo: 0x905, Hi: 0x939, Stride: 1},
+		{Lo: 0x93c, Hi: 0x94d, Stride: 1},
+		{Lo: 0x950, Hi: 0x954, Stride: 1},
+		{Lo: 0x958, Hi: 0x963, Stride: 1},
+		{Lo: 0x966, Hi: 0x96f, Stride: 1},
+		{Lo: 0x981, Hi: 0x983, Stride: 1},
+		{Lo: 0x985, Hi: 0x98c, Stride: 1},
+		{Lo: 0x98f, Hi: 0x990, Stride: 1},
+		{Lo: 0x993, Hi: 0x9a8, Stride: 1},
+		{Lo: 0x9aa, Hi: 0x9b0, Stride: 1},
+		{Lo: 0x9b2, Hi: 0x9b2, Stride: 1},
+		{Lo: 0x9b6, Hi: 0x9b9, Stride: 1},
+		{Lo: 0x9bc, Hi: 0x9bc, Stride: 1},
+		{Lo: 0x9be, Hi: 0x9c4, Stride: 1},
+		{Lo: 0x9c7, Hi: 0x9c8, Stride: 1},
+		{Lo: 0x9cb, Hi: 0x9cd, Stride: 1},
+		{Lo: 0x9d7, Hi: 0x9d7, Stride: 1},
+		{Lo: 0x9dc, Hi: 0x9dd, Stride: 1},
+		{Lo: 0x9df, Hi: 0x9e3, Stride: 1},
+		{Lo: 0x9e6, Hi: 0x9f1, Stride: 1},
+		{Lo: 0xa02, Hi: 0xa02, Stride: 1},
+		{Lo: 0xa05, Hi: 0xa0a, Stride: 1},
+		{Lo: 0xa0f, Hi: 0xa10, Stride: 1},
+		{Lo: 0xa13, Hi: 0xa28, Stride: 1},
+		{Lo: 0xa2a, Hi: 0xa30, Stride: 1},
+		{Lo: 0xa32, Hi: 0xa33, Stride: 1},
+		{Lo: 0xa35, Hi: 0xa36, Stride: 1},
+		{Lo: 0xa38, Hi: 0xa39, Stride: 1},
+		{Lo: 0xa3c, Hi: 0xa3c, Stride: 1},
+		{Lo: 0xa3e, Hi: 0xa42, Stride: 1},
+		{Lo: 0xa47, Hi: 0xa48, Stride: 1},
+		{Lo: 0xa4b, Hi: 0xa4d, Stride: 1},
+		{Lo: 0xa59, Hi: 0xa5c, Stride: 1},
+		{Lo: 0xa5e, Hi: 0xa5e, Stride: 1},
+		{Lo: 0xa66, Hi: 0xa74, Stride: 1},
+		{Lo: 0xa81, Hi: 0xa83, Stride: 1},
+		{Lo: 0xa85, Hi: 0xa8b, Stride: 1},
+		{Lo: 0xa8d, Hi: 0xa8d, Stride: 1},
+		{Lo: 0xa8f, Hi: 0xa91, Stride: 1},
+		{Lo: 0xa93, Hi: 0xaa8, Stride: 1},
+		{Lo: 0xaaa, Hi: 0xab0, Stride: 1},
+		{Lo: 0xab2, Hi: 0xab3, Stride: 1},
+		{Lo: 0xab5, Hi: 0xab9, Stride: 1},
+		{Lo: 0xabc, Hi: 0xac5, Stride: 1},
+		{Lo: 0xac7, Hi: 0xac9, Stride: 1},
+		{Lo: 0xacb, Hi: 0xacd, Stride: 1},
+		{Lo: 0xad0, Hi: 0xad0, Stride: 1},
+		{Lo: 0xae0, Hi: 0xae0, Stride: 1},
+		{Lo: 0xae6, Hi: 0xaef, Stride: 1},
+		{Lo: 0xb01, Hi: 0xb03, Stride: 1},
+		{Lo: 0xb05, Hi: 0xb0c, Stride: 1},
+		{Lo: 0xb0f, Hi: 0xb10, Stride: 1},
+		{Lo: 0xb13, Hi: 0xb28, Stride: 1},
+		{Lo: 0xb2a, Hi: 0xb30, Stride: 1},
+		{Lo: 0xb32, Hi: 0xb33, Stride: 1},
+		{Lo: 0xb36, Hi: 0xb39, Stride: 1},
+		{Lo: 0xb3c, Hi: 0xb43, Stride: 1},
+		{Lo: 0xb47, Hi: 0xb48, Stride: 1},
+		{Lo: 0xb4b, Hi: 0xb4d, Stride: 1},
+		{Lo: 0xb56, Hi: 0xb57, Stride: 1},
+		{Lo: 0xb5c, Hi: 0xb5d, Stride: 1},
+		{Lo: 0xb5f, Hi: 0xb61, Stride: 1},
+		{Lo: 0xb66, Hi: 0xb6f, Stride: 1},
+		{Lo: 0xb82, Hi: 0xb83, Stride: 1},
+		{Lo: 0xb85, Hi: 0xb8a, Stride: 1},
+		{Lo: 0xb8e, Hi: 0xb90, Stride: 1},
+		{Lo: 0xb92, Hi: 0xb95, Stride: 1},
+		{Lo: 0xb99, Hi: 0xb9a, Stride: 1},
+		{Lo: 0xb9c, Hi: 0xb9c, Stride: 1},
+		{Lo: 0xb9e, Hi: 0xb9f, Stride: 1},
+		{Lo: 0xba3, Hi: 0xba4, Stride: 1},
+		{Lo: 0xba8, Hi: 0xbaa, Stride: 1},
+		{Lo: 0xbae, Hi: 0xbb5, Stride: 1},
+		{Lo: 0xbb7, Hi: 0xbb9, Stride: 1},
+		{Lo: 0xbbe, Hi: 0xbc2, Stride: 1},
+		{Lo: 0xbc6, Hi: 0xbc8, Stride: 1},
+		{Lo: 0xbca, Hi: 0xbcd, Stride: 1},
+		{Lo: 0xbd7, Hi: 0xbd7, Stride: 1},
+		{Lo: 0xbe7, Hi: 0xbef, Stride: 1},
+		{Lo: 0xc01, Hi: 0xc03, Stride: 1},
+		{Lo: 0xc05, Hi: 0xc0c, Stride: 1},
+		{Lo: 0xc0e, Hi: 0xc10, Stride: 1},
+		{Lo: 0xc12, Hi: 0xc28, Stride: 1},
+		{Lo: 0xc2a, Hi: 0xc33, Stride: 1},
+		{Lo: 0xc35, Hi: 0xc39, Stride: 1},
+		{Lo: 0xc3e, Hi: 0xc44, Stride: 1},
+		{Lo: 0xc46, Hi: 0xc48, Stride: 1},
+		{Lo: 0xc4a, Hi: 0xc4d, Stride: 1},
+		{Lo: 0xc55, Hi: 0xc56, Stride: 1},
+		{Lo: 0xc60, Hi: 0xc61, Stride: 1},
+		{Lo: 0xc66, Hi: 0xc6f, Stride: 1},
+		{Lo: 0xc82, Hi: 0xc83, Stride: 1},
+		{Lo: 0xc85, Hi: 0xc8c, Stride: 1},
+		{Lo: 0xc8e, Hi: 0xc90, Stride: 1},
+		{Lo: 0xc92, Hi: 0xca8, Stride: 1},
+		{Lo: 0xcaa, Hi: 0xcb3, Stride: 1},
+		{Lo: 0xcb5, Hi: 0xcb9, Stride: 1},
+		{Lo: 0xcbe, Hi: 0xcc4, Stride: 1},
+		{Lo: 0xcc6, Hi: 0xcc8, Stride: 1},
+		{Lo: 0xcca, Hi: 0xccd, Stride: 1},
+		{Lo: 0xcd5, Hi: 0xcd6, Stride: 1},
+		{Lo: 0xcde, Hi: 0xcde, Stride: 1},
+		{Lo: 0xce0, Hi: 0xce1, Stride: 1},
+		{Lo: 0xce6, Hi: 0xcef, Stride: 1},
+		{Lo: 0xd02, Hi: 0xd03, Stride: 1},
+		{Lo: 0xd05, Hi: 0xd0c, Stride: 1},
+		{Lo: 0xd0e, Hi: 0xd10, Stride: 1},
+		{Lo: 0xd12, Hi: 0xd28, Stride: 1},
+		{Lo: 0xd2a, Hi: 0xd39, Stride: 1},
+		{Lo: 0xd3e, Hi: 0xd43, Stride: 1},
+		{Lo: 0xd46, Hi: 0xd48, Stride: 1},
+		{Lo: 0xd4a, Hi: 0xd4d, Stride: 1},
+		{Lo: 0xd57, Hi: 0xd57, Stride: 1},
+		{Lo: 0xd60, Hi: 0xd61, Stride: 1},
+		{Lo: 0xd66, Hi: 0xd6f, Stride: 1},
+		{Lo: 0xd82, Hi: 0xd83, Stride: 1},
+		{Lo: 0xd85, Hi: 0xd96, Stride: 1},
+		{Lo: 0xd9a, Hi: 0xdb1, Stride: 1},
+		{Lo: 0xdb3, Hi: 0xdbb, Stride: 1},
+		{Lo: 0xdbd, Hi: 0xdbd, Stride: 1},
+		{Lo: 0xdc0, Hi: 0xdc6, Stride: 1},
+		{Lo: 0xdca, Hi: 0xdca, Stride: 1},
+		{Lo: 0xdcf, Hi: 0xdd4, Stride: 1},
+		{Lo: 0xdd6, Hi: 0xdd6, Stride: 1},
+		{Lo: 0xdd8, Hi: 0xddf, Stride: 1},
+		{Lo: 0xdf2, Hi: 0xdf3, Stride: 1},
+		{Lo: 0xe01, Hi: 0xe3a, Stride: 1},
+		{Lo: 0xe40, Hi: 0xe4e, Stride: 1},
+		{Lo: 0xe50, Hi: 0xe59, Stride: 1},
+		{Lo: 0xe81, Hi: 0xe82, Stride: 1},
+		{Lo: 0xe84, Hi: 0xe84, Stride: 1},
+		{Lo: 0xe87, Hi: 0xe88, Stride: 1},
+		{Lo: 0xe8a, Hi: 0xe8a, Stride: 1},
+		{Lo: 0xe8d, Hi: 0xe8d, Stride: 1},
+		{Lo: 0xe94, Hi: 0xe97, Stride: 1},
+		{Lo: 0xe99, Hi: 0xe9f, Stride: 1},
+		{Lo: 0xea1, Hi: 0xea3, Stride: 1},
+		{Lo: 0xea5, Hi: 0xea5, Stride: 1},
+		{Lo: 0xea7, Hi: 0xea7, Stride: 1},
+		{Lo: 0xeaa, Hi: 0xeab, Stride: 1},
+		{Lo: 0xead, Hi: 0xeb9, Stride: 1},
+		{Lo: 0xebb, Hi: 0xebd, Stride: 1},
+		{Lo: 0xec0, Hi: 0xec4, Stride: 1},
+		{Lo: 0xec6, Hi: 0xec6, Stride: 1},
+		{Lo: 0xec8, Hi: 0xecd, Stride: 1},
+		{Lo: 0xed0, Hi: 0xed9, Stride: 1},
+		{Lo: 0xedc, Hi: 0xedd, Stride: 1},
+		{Lo: 0xf00, Hi: 0xf00, Stride: 1},
+		{Lo: 0xf18, Hi: 0xf19, Stride: 1},
+		{Lo: 0xf20, Hi: 0xf29, Stride: 1},
+		{Lo: 0xf35, Hi: 0xf35, Stride: 1},
+		{Lo: 0xf37, Hi: 0xf37, Stride: 1},
+		{Lo: 0xf39, Hi: 0xf39, Stride: 1},
+		{Lo: 0xf3e, Hi: 0xf47, Stride: 1},
+		{Lo: 0xf49, Hi: 0xf6a, Stride: 1},
+		{Lo: 0xf71, Hi: 0xf84, Stride: 1},
+		{Lo: 0xf86, Hi: 0xf8b, Stride: 1},
+		{Lo: 0xf90, Hi: 0xf97, Stride: 1},
+		{Lo: 0xf99, Hi: 0xfbc, Stride: 1},
+		{Lo: 0xfc6, Hi: 0xfc6, Stride: 1},
+	},
+	R32: []unicode.Range32{
+		{Lo: 0x1000, Hi: 0x1021, Stride: 1},
+		{Lo: 0x1023, Hi: 0x1027, Stride: 1},
+		{Lo: 0x1029, Hi: 0x102a, Stride: 1},
+		{Lo: 0x102c, Hi: 0x1032, Stride: 1},
+		{Lo: 0x1036, Hi: 0x1039, Stride: 1},
+		{Lo: 0x1040, Hi: 0x1049, Stride: 1},
+		{Lo: 0x1050, Hi: 0x1059, Stride: 1},
+		{Lo: 0x10a0, Hi: 0x10c5, Stride: 1},
+		{Lo: 0x10d0, Hi: 0x10f6, Stride: 1},
+		{Lo: 0x1100, Hi: 0x1159, Stride: 1},
+		{Lo: 0x115f, Hi: 0x11a2, Stride: 1},
+		{Lo: 0x11a8, Hi: 0x11f9, Stride: 1},
+		{Lo: 0x1200, Hi: 0x1206, Stride: 1},
+		{Lo: 0x1208, Hi: 0x1246, Stride: 1},
+		{Lo: 0x1248, Hi: 0x1248, Stride: 1},
+		{Lo: 0x124a, Hi: 0x124d, Stride: 1},
+		{Lo: 0x1250, Hi: 0x1256, Stride: 1},
+		{Lo: 0x1258, Hi: 0x1258, Stride: 1},
+		{Lo: 0x125a, Hi: 0x125d, Stride: 1},
+		{Lo: 0x1260, Hi: 0x1286, Stride: 1},
+		{Lo: 0x1288, Hi: 0x1288, Stride: 1},
+		{Lo: 0x128a, Hi: 0x128d, Stride: 1},
+		{Lo: 0x1290, Hi: 0x12ae, Stride: 1},
+		{Lo: 0x12b0, Hi: 0x12b0, Stride: 1},
+		{Lo: 0x12b2, Hi: 0x12b5, Stride: 1},
+		{Lo: 0x12b8, Hi: 0x12be, Stride: 1},
+		{Lo: 0x12c0, Hi: 0x12c0, Stride: 1},
+		{Lo: 0x12c2, Hi: 0x12c5, Stride: 1},
+		{Lo: 0x12c8, Hi: 0x12ce, Stride: 1},
+		{Lo: 0x12d0, Hi: 0x12d6, Stride: 1},
+		{Lo: 0x12d8, Hi: 0x12ee, Stride: 1},
+		{Lo: 0x12f0, Hi: 0x130e, Stride: 1},
+		{Lo: 0x1310, Hi: 0x1310, Stride: 1},
+		{Lo: 0x1312, Hi: 0x1315, Stride: 1},
+		{Lo: 0x1318, Hi: 0x131e, Stride: 1},
+		{Lo: 0x1320, Hi: 0x1346, Stride: 1},
+		{Lo: 0x1348, Hi: 0x135a, Stride: 1},
+		{Lo: 0x1369, Hi: 0x1371, Stride: 1},
+		{Lo: 0x13a0, Hi: 0x13f4, Stride: 1},
+		{Lo: 0x1401, Hi: 0x166c, Stride: 1},
+		{Lo: 0x166f, Hi: 0x1676, Stride: 1},
+		{Lo: 0x1681, Hi: 0x169a, Stride: 1},
+		{Lo: 0x16a0, Hi: 0x16ea, Stride: 1},
+		{Lo: 0x1780, Hi: 0x17d3, Stride: 1},
+		{Lo: 0x17e0, Hi: 0x17e9, Stride: 1},
+		{Lo: 0x1810, Hi: 0x1819, Stride: 1},
+		{Lo: 0x1820, Hi: 0x1877, Stride: 1},
+		{Lo: 0x1880, Hi: 0x18a9, Stride: 1},
+		{Lo: 0x1e00, Hi: 0x1e9b, Stride: 1},
+		{Lo: 0x1ea0, Hi: 0x1ef9, Stride: 1},
+		{Lo: 0x1f00, Hi: 0x1f15, Stride: 1},
+		{Lo: 0x1f18, Hi: 0x1f1d, Stride: 1},
+		{Lo: 0x1f20, Hi: 0x1f45, Stride: 1},
+		{Lo: 0x1f48, Hi: 0x1f4d, Stride: 1},
+		{Lo: 0x1f50, Hi: 0x1f57, Stride: 1},
+		{Lo: 0x1f59, Hi: 0x1f59, Stride: 1},
+		{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 1},
+		{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 1},
+		{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 1},
+		{Lo: 0x1f80, Hi: 0x1fb4, Stride: 1},
+		{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 1},
+		{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 1},
+		{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 1},
+		{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 1},
+		{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 1},
+		{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 1},
+		{Lo: 0x1fe0, Hi: 0x1fec, Stride: 1},
+		{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 1},
+		{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 1},
+		{Lo: 0x203f, Hi: 0x2040, Stride: 1},
+		{Lo: 0x207f, Hi: 0x207f, Stride: 1},
+		{Lo: 0x20d0, Hi: 0x20dc, Stride: 1},
+		{Lo: 0x20e1, Hi: 0x20e1, Stride: 1},
+		{Lo: 0x2102, Hi: 0x2102, Stride: 1},
+		{Lo: 0x2107, Hi: 0x2107, Stride: 1},
+		{Lo: 0x210a, Hi: 0x2113, Stride: 1},
+		{Lo: 0x2115, Hi: 0x2115, Stride: 1},
+		{Lo: 0x2119, Hi: 0x211d, Stride: 1},
+		{Lo: 0x2124, Hi: 0x2124, Stride: 1},
+		{Lo: 0x2126, Hi: 0x2126, Stride: 1},
+		{Lo: 0x2128, Hi: 0x2128, Stride: 1},
+		{Lo: 0x212a, Hi: 0x212d, Stride: 1},
+		{Lo: 0x212f, Hi: 0x2131, Stride: 1},
+		{Lo: 0x2133, Hi: 0x2139, Stride: 1},
+		{Lo: 0x3005, Hi: 0x3006, Stride: 1},
+		{Lo: 0x302a, Hi: 0x302f, Stride: 1},
+		{Lo: 0x3031, Hi: 0x3035, Stride: 1},
+		{Lo: 0x3041, Hi: 0x3094, Stride: 1},
+		{Lo: 0x3099, Hi: 0x309a, Stride: 1},
+		{Lo: 0x309d, Hi: 0x309e, Stride: 1},
+		{Lo: 0x30a1, Hi: 0x30fa, Stride: 1},
+		{Lo: 0x30fc, Hi: 0x30fe, Stride: 1},
+		{Lo: 0x3105, Hi: 0x312c, Stride: 1},
+		{Lo: 0x3131, Hi: 0x318e, Stride: 1},
+		{Lo: 0x31a0, Hi: 0x31b7, Stride: 1},
+		{Lo: 0x3400, Hi: 0x4db5, Stride: 1},
+		{Lo: 0x4e00, Hi: 0x9fa5, Stride: 1},
+		{Lo: 0xa000, Hi: 0xa48c, Stride: 1},
+		{Lo: 0xac00, Hi: 0xd7a3, Stride: 1},
+		{Lo: 0xf900, Hi: 0xfa2d, Stride: 1},
+		{Lo: 0xfb00, Hi: 0xfb06, Stride: 1},
+		{Lo: 0xfb13, Hi: 0xfb17, Stride: 1},
+		{Lo: 0xfb1d, Hi: 0xfb28, Stride: 1},
+		{Lo: 0xfb2a, Hi: 0xfb36, Stride: 1},
+		{Lo: 0xfb38, Hi: 0xfb3c, Stride: 1},
+		{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 1},
+		{Lo: 0xfb40, Hi: 0xfb41, Stride: 1},
+		{Lo: 0xfb43, Hi: 0xfb44, Stride: 1},
+		{Lo: 0xfb46, Hi: 0xfbb1, Stride: 1},
+		{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 1},
+		{Lo: 0xfd50, Hi: 0xfd8f, Stride: 1},
+		{Lo: 0xfd92, Hi: 0xfdc7, Stride: 1},
+		{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 1},
+		{Lo: 0xfe20, Hi: 0xfe23, Stride: 1},
+		{Lo: 0xfe33, Hi: 0xfe34, Stride: 1},
+		{Lo: 0xfe4d, Hi: 0xfe4f, Stride: 1},
+		{Lo: 0xfe70, Hi: 0xfe72, Stride: 1},
+		{Lo: 0xfe74, Hi: 0xfe74, Stride: 1},
+		{Lo: 0xfe76, Hi: 0xfefc, Stride: 1},
+		{Lo: 0xff10, Hi: 0xff19, Stride: 1},
+		{Lo: 0xff21, Hi: 0xff3a, Stride: 1},
+		{Lo: 0xff3f, Hi: 0xff3f, Stride: 1},
+		{Lo: 0xff41, Hi: 0xff5a, Stride: 1},
+		{Lo: 0xff66, Hi: 0xffbe, Stride: 1},
+		{Lo: 0xffc2, Hi: 0xffc7, Stride: 1},
+		{Lo: 0xffca, Hi: 0xffcf, Stride: 1},
+		{Lo: 0xffd2, Hi: 0xffd7, Stride: 1},
+		{Lo: 0xffda, Hi: 0xffdc, Stride: 1},
+	},
+}
+
+var idStartES5OrESNext = &unicode.RangeTable{
+	LatinOffset: 117,
+	R16: []unicode.Range16{
+		{Lo: 0x41, Hi: 0x5a, Stride: 1},
+		{Lo: 0x61, Hi: 0x7a, Stride: 1},
+		{Lo: 0xaa, Hi: 0xaa, Stride: 1},
+		{Lo: 0xb5, Hi: 0xb5, Stride: 1},
+		{Lo: 0xba, Hi: 0xba, Stride: 1},
+		{Lo: 0xc0, Hi: 0xd6, Stride: 1},
+		{Lo: 0xd8, Hi: 0xf6, Stride: 1},
+		{Lo: 0xf8, Hi: 0x2c1, Stride: 1},
+		{Lo: 0x2c6, Hi: 0x2d1, Stride: 1},
+		{Lo: 0x2e0, Hi: 0x2e4, Stride: 1},
+		{Lo: 0x2ec, Hi: 0x2ec, Stride: 1},
+		{Lo: 0x2ee, Hi: 0x2ee, Stride: 1},
+		{Lo: 0x370, Hi: 0x374, Stride: 1},
+		{Lo: 0x376, Hi: 0x377, Stride: 1},
+		{Lo: 0x37a, Hi: 0x37d, Stride: 1},
+		{Lo: 0x37f, Hi: 0x37f, Stride: 1},
+		{Lo: 0x386, Hi: 0x386, Stride: 1},
+		{Lo: 0x388, Hi: 0x38a, Stride: 1},
+		{Lo: 0x38c, Hi: 0x38c, Stride: 1},
+		{Lo: 0x38e, Hi: 0x3a1, Stride: 1},
+		{Lo: 0x3a3, Hi: 0x3f5, Stride: 1},
+		{Lo: 0x3f7, Hi: 0x481, Stride: 1},
+		{Lo: 0x48a, Hi: 0x52f, Stride: 1},
+		{Lo: 0x531, Hi: 0x556, Stride: 1},
+		{Lo: 0x559, Hi: 0x559, Stride: 1},
+		{Lo: 0x560, Hi: 0x588, Stride: 1},
+		{Lo: 0x5d0, Hi: 0x5ea, Stride: 1},
+		{Lo: 0x5ef, Hi: 0x5f2, Stride: 1},
+		{Lo: 0x620, Hi: 0x64a, Stride: 1},
+		{Lo: 0x66e, Hi: 0x66f, Stride: 1},
+		{Lo: 0x671, Hi: 0x6d3, Stride: 1},
+		{Lo: 0x6d5, Hi: 0x6d5, Stride: 1},
+		{Lo: 0x6e5, Hi: 0x6e6, Stride: 1},
+		{Lo: 0x6ee, Hi: 0x6ef, Stride: 1},
+		{Lo: 0x6fa, Hi: 0x6fc, Stride: 1},
+		{Lo: 0x6ff, Hi: 0x6ff, Stride: 1},
+		{Lo: 0x710, Hi: 0x710, Stride: 1},
+		{Lo: 0x712, Hi: 0x72f, Stride: 1},
+		{Lo: 0x74d, Hi: 0x7a5, Stride: 1},
+		{Lo: 0x7b1, Hi: 0x7b1, Stride: 1},
+		{Lo: 0x7ca, Hi: 0x7ea, Stride: 1},
+		{Lo: 0x7f4, Hi: 0x7f5, Stride: 1},
+		{Lo: 0x7fa, Hi: 0x7fa, Stride: 1},
+		{Lo: 0x800, Hi: 0x815, Stride: 1},
+		{Lo: 0x81a, Hi: 0x81a, Stride: 1},
+		{Lo: 0x824, Hi: 0x824, Stride: 1},
+		{Lo: 0x828, Hi: 0x828, Stride: 1},
+		{Lo: 0x840, Hi: 0x858, Stride: 1},
+		{Lo: 0x860, Hi: 0x86a, Stride: 1},
+		{Lo: 0x870, Hi: 0x887, Stride: 1},
+		{Lo: 0x889, Hi: 0x88e, Stride: 1},
+		{Lo: 0x8a0, Hi: 0x8c9, Stride: 1},
+		{Lo: 0x904, Hi: 0x939, Stride: 1},
+		{Lo: 0x93d, Hi: 0x93d, Stride: 1},
+		{Lo: 0x950, Hi: 0x950, Stride: 1},
+		{Lo: 0x958, Hi: 0x961, Stride: 1},
+		{Lo: 0x971, Hi: 0x980, Stride: 1},
+		{Lo: 0x985, Hi: 0x98c, Stride: 1},
+		{Lo: 0x98f, Hi: 0x990, Stride: 1},
+		{Lo: 0x993, Hi: 0x9a8, Stride: 1},
+		{Lo: 0x9aa, Hi: 0x9b0, Stride: 1},
+		{Lo: 0x9b2, Hi: 0x9b2, Stride: 1},
+		{Lo: 0x9b6, Hi: 0x9b9, Stride: 1},
+		{Lo: 0x9bd, Hi: 0x9bd, Stride: 1},
+		{Lo: 0x9ce, Hi: 0x9ce, Stride: 1},
+		{Lo: 0x9dc, Hi: 0x9dd, Stride: 1},
+		{Lo: 0x9df, Hi: 0x9e1, Stride: 1},
+		{Lo: 0x9f0, Hi: 0x9f1, Stride: 1},
+		{Lo: 0x9fc, Hi: 0x9fc, Stride: 1},
+		{Lo: 0xa05, Hi: 0xa0a, Stride: 1},
+		{Lo: 0xa0f, Hi: 0xa10, Stride: 1},
+		{Lo: 0xa13, Hi: 0xa28, Stride: 1},
+		{Lo: 0xa2a, Hi: 0xa30, Stride: 1},
+		{Lo: 0xa32, Hi: 0xa33, Stride: 1},
+		{Lo: 0xa35, Hi: 0xa36, Stride: 1},
+		{Lo: 0xa38, Hi: 0xa39, Stride: 1},
+		{Lo: 0xa59, Hi: 0xa5c, Stride: 1},
+		{Lo: 0xa5e, Hi: 0xa5e, Stride: 1},
+		{Lo: 0xa72, Hi: 0xa74, Stride: 1},
+		{Lo: 0xa85, Hi: 0xa8d, Stride: 1},
+		{Lo: 0xa8f, Hi: 0xa91, Stride: 1},
+		{Lo: 0xa93, Hi: 0xaa8, Stride: 1},
+		{Lo: 0xaaa, Hi: 0xab0, Stride: 1},
+		{Lo: 0xab2, Hi: 0xab3, Stride: 1},
+		{Lo: 0xab5, Hi: 0xab9, Stride: 1},
+		{Lo: 0xabd, Hi: 0xabd, Stride: 1},
+		{Lo: 0xad0, Hi: 0xad0, Stride: 1},
+		{Lo: 0xae0, Hi: 0xae1, Stride: 1},
+		{Lo: 0xaf9, Hi: 0xaf9, Stride: 1},
+		{Lo: 0xb05, Hi: 0xb0c, Stride: 1},
+		{Lo: 0xb0f, Hi: 0xb10, Stride: 1},
+		{Lo: 0xb13, Hi: 0xb28, Stride: 1},
+		{Lo: 0xb2a, Hi: 0xb30, Stride: 1},
+		{Lo: 0xb32, Hi: 0xb33, Stride: 1},
+		{Lo: 0xb35, Hi: 0xb39, Stride: 1},
+		{Lo: 0xb3d, Hi: 0xb3d, Stride: 1},
+		{Lo: 0xb5c, Hi: 0xb5d, Stride: 1},
+		{Lo: 0xb5f, Hi: 0xb61, Stride: 1},
+		{Lo: 0xb71, Hi: 0xb71, Stride: 1},
+		{Lo: 0xb83, Hi: 0xb83, Stride: 1},
+		{Lo: 0xb85, Hi: 0xb8a, Stride: 1},
+		{Lo: 0xb8e, Hi: 0xb90, Stride: 1},
+		{Lo: 0xb92, Hi: 0xb95, Stride: 1},
+		{Lo: 0xb99, Hi: 0xb9a, Stride: 1},
+		{Lo: 0xb9c, Hi: 0xb9c, Stride: 1},
+		{Lo: 0xb9e, Hi: 0xb9f, Stride: 1},
+		{Lo: 0xba3, Hi: 0xba4, Stride: 1},
+		{Lo: 0xba8, Hi: 0xbaa, Stride: 1},
+		{Lo: 0xbae, Hi: 0xbb9, Stride: 1},
+		{Lo: 0xbd0, Hi: 0xbd0, Stride: 1},
+		{Lo: 0xc05, Hi: 0xc0c, Stride: 1},
+		{Lo: 0xc0e, Hi: 0xc10, Stride: 1},
+		{Lo: 0xc12, Hi: 0xc28, Stride: 1},
+		{Lo: 0xc2a, Hi: 0xc39, Stride: 1},
+		{Lo: 0xc3d, Hi: 0xc3d, Stride: 1},
+		{Lo: 0xc58, Hi: 0xc5a, Stride: 1},
+		{Lo: 0xc5d, Hi: 0xc5d, Stride: 1},
+		{Lo: 0xc60, Hi: 0xc61, Stride: 1},
+		{Lo: 0xc80, Hi: 0xc80, Stride: 1},
+		{Lo: 0xc85, Hi: 0xc8c, Stride: 1},
+		{Lo: 0xc8e, Hi: 0xc90, Stride: 1},
+		{Lo: 0xc92, Hi: 0xca8, Stride: 1},
+		{Lo: 0xcaa, Hi: 0xcb3, Stride: 1},
+		{Lo: 0xcb5, Hi: 0xcb9, Stride: 1},
+		{Lo: 0xcbd, Hi: 0xcbd, Stride: 1},
+		{Lo: 0xcdd, Hi: 0xcde, Stride: 1},
+		{Lo: 0xce0, Hi: 0xce1, Stride: 1},
+		{Lo: 0xcf1, Hi: 0xcf2, Stride: 1},
+		{Lo: 0xd04, Hi: 0xd0c, Stride: 1},
+		{Lo: 0xd0e, Hi: 0xd10, Stride: 1},
+		{Lo: 0xd12, Hi: 0xd3a, Stride: 1},
+		{Lo: 0xd3d, Hi: 0xd3d, Stride: 1},
+		{Lo: 0xd4e, Hi: 0xd4e, Stride: 1},
+		{Lo: 0xd54, Hi: 0xd56, Stride: 1},
+		{Lo: 0xd5f, Hi: 0xd61, Stride: 1},
+		{Lo: 0xd7a, Hi: 0xd7f, Stride: 1},
+		{Lo: 0xd85, Hi: 0xd96, Stride: 1},
+		{Lo: 0xd9a, Hi: 0xdb1, Stride: 1},
+		{Lo: 0xdb3, Hi: 0xdbb, Stride: 1},
+		{Lo: 0xdbd, Hi: 0xdbd, Stride: 1},
+		{Lo: 0xdc0, Hi: 0xdc6, Stride: 1},
+		{Lo: 0xe01, Hi: 0xe30, Stride: 1},
+		{Lo: 0xe32, Hi: 0xe33, Stride: 1},
+		{Lo: 0xe40, Hi: 0xe46, Stride: 1},
+		{Lo: 0xe81, Hi: 0xe82, Stride: 1},
+		{Lo: 0xe84, Hi: 0xe84, Stride: 1},
+		{Lo: 0xe86, Hi: 0xe8a, Stride: 1},
+		{Lo: 0xe8c, Hi: 0xea3, Stride: 1},
+		{Lo: 0xea5, Hi: 0xea5, Stride: 1},
+		{Lo: 0xea7, Hi: 0xeb0, Stride: 1},
+		{Lo: 0xeb2, Hi: 0xeb3, Stride: 1},
+		{Lo: 0xebd, Hi: 0xebd, Stride: 1},
+		{Lo: 0xec0, Hi: 0xec4, Stride: 1},
+		{Lo: 0xec6, Hi: 0xec6, Stride: 1},
+		{Lo: 0xedc, Hi: 0xedf, Stride: 1},
+		{Lo: 0xf00, Hi: 0xf00, Stride: 1},
+		{Lo: 0xf40, Hi: 0xf47, Stride: 1},
+		{Lo: 0xf49, Hi: 0xf6c, Stride: 1},
+		{Lo: 0xf88, Hi: 0xf8c, Stride: 1},
+	},
+	R32: []unicode.Range32{
+		{Lo: 0x1000, Hi: 0x102a, Stride: 1},
+		{Lo: 0x103f, Hi: 0x103f, Stride: 1},
+		{Lo: 0x1050, Hi: 0x1055, Stride: 1},
+		{Lo: 0x105a, Hi: 0x105d, Stride: 1},
+		{Lo: 0x1061, Hi: 0x1061, Stride: 1},
+		{Lo: 0x1065, Hi: 0x1066, Stride: 1},
+		{Lo: 0x106e, Hi: 0x1070, Stride: 1},
+		{Lo: 0x1075, Hi: 0x1081, Stride: 1},
+		{Lo: 0x108e, Hi: 0x108e, Stride: 1},
+		{Lo: 0x10a0, Hi: 0x10c5, Stride: 1},
+		{Lo: 0x10c7, Hi: 0x10c7, Stride: 1},
+		{Lo: 0x10cd, Hi: 0x10cd, Stride: 1},
+		{Lo: 0x10d0, Hi: 0x10fa, Stride: 1},
+		{Lo: 0x10fc, Hi: 0x1248, Stride: 1},
+		{Lo: 0x124a, Hi: 0x124d, Stride: 1},
+		{Lo: 0x1250, Hi: 0x1256, Stride: 1},
+		{Lo: 0x1258, Hi: 0x1258, Stride: 1},
+		{Lo: 0x125a, Hi: 0x125d, Stride: 1},
+		{Lo: 0x1260, Hi: 0x1288, Stride: 1},
+		{Lo: 0x128a, Hi: 0x128d, Stride: 1},
+		{Lo: 0x1290, Hi: 0x12b0, Stride: 1},
+		{Lo: 0x12b2, Hi: 0x12b5, Stride: 1},
+		{Lo: 0x12b8, Hi: 0x12be, Stride: 1},
+		{Lo: 0x12c0, Hi: 0x12c0, Stride: 1},
+		{Lo: 0x12c2, Hi: 0x12c5, Stride: 1},
+		{Lo: 0x12c8, Hi: 0x12d6, Stride: 1},
+		{Lo: 0x12d8, Hi: 0x1310, Stride: 1},
+		{Lo: 0x1312, Hi: 0x1315, Stride: 1},
+		{Lo: 0x1318, Hi: 0x135a, Stride: 1},
+		{Lo: 0x1380, Hi: 0x138f, Stride: 1},
+		{Lo: 0x13a0, Hi: 0x13f5, Stride: 1},
+		{Lo: 0x13f8, Hi: 0x13fd, Stride: 1},
+		{Lo: 0x1401, Hi: 0x166c, Stride: 1},
+		{Lo: 0x166f, Hi: 0x167f, Stride: 1},
+		{Lo: 0x1681, Hi: 0x169a, Stride: 1},
+		{Lo: 0x16a0, Hi: 0x16ea, Stride: 1},
+		{Lo: 0x16ee, Hi: 0x16f8, Stride: 1},
+		{Lo: 0x1700, Hi: 0x1711, Stride: 1},
+		{Lo: 0x171f, Hi: 0x1731, Stride: 1},
+		{Lo: 0x1740, Hi: 0x1751, Stride: 1},
+		{Lo: 0x1760, Hi: 0x176c, Stride: 1},
+		{Lo: 0x176e, Hi: 0x1770, Stride: 1},
+		{Lo: 0x1780, Hi: 0x17b3, Stride: 1},
+		{Lo: 0x17d7, Hi: 0x17d7, Stride: 1},
+		{Lo: 0x17dc, Hi: 0x17dc, Stride: 1},
+		{Lo: 0x1820, Hi: 0x1878, Stride: 1},
+		{Lo: 0x1880, Hi: 0x18a8, Stride: 1},
+		{Lo: 0x18aa, Hi: 0x18aa, Stride: 1},
+		{Lo: 0x18b0, Hi: 0x18f5, Stride: 1},
+		{Lo: 0x1900, Hi: 0x191e, Stride: 1},
+		{Lo: 0x1950, Hi: 0x196d, Stride: 1},
+		{Lo: 0x1970, Hi: 0x1974, Stride: 1},
+		{Lo: 0x1980, Hi: 0x19ab, Stride: 1},
+		{Lo: 0x19b0, Hi: 0x19c9, Stride: 1},
+		{Lo: 0x1a00, Hi: 0x1a16, Stride: 1},
+		{Lo: 0x1a20, Hi: 0x1a54, Stride: 1},
+		{Lo: 0x1aa7, Hi: 0x1aa7, Stride: 1},
+		{Lo: 0x1b05, Hi: 0x1b33, Stride: 1},
+		{Lo: 0x1b45, Hi: 0x1b4c, Stride: 1},
+		{Lo: 0x1b83, Hi: 0x1ba0, Stride: 1},
+		{Lo: 0x1bae, Hi: 0x1baf, Stride: 1},
+		{Lo: 0x1bba, Hi: 0x1be5, Stride: 1},
+		{Lo: 0x1c00, Hi: 0x1c23, Stride: 1},
+		{Lo: 0x1c4d, Hi: 0x1c4f, Stride: 1},
+		{Lo: 0x1c5a, Hi: 0x1c7d, Stride: 1},
+		{Lo: 0x1c80, Hi: 0x1c88, Stride: 1},
+		{Lo: 0x1c90, Hi: 0x1cba, Stride: 1},
+		{Lo: 0x1cbd, Hi: 0x1cbf, Stride: 1},
+		{Lo: 0x1ce9, Hi: 0x1cec, Stride: 1},
+		{Lo: 0x1cee, Hi: 0x1cf3, Stride: 1},
+		{Lo: 0x1cf5, Hi: 0x1cf6, Stride: 1},
+		{Lo: 0x1cfa, Hi: 0x1cfa, Stride: 1},
+		{Lo: 0x1d00, Hi: 0x1dbf, Stride: 1},
+		{Lo: 0x1e00, Hi: 0x1f15, Stride: 1},
+		{Lo: 0x1f18, Hi: 0x1f1d, Stride: 1},
+		{Lo: 0x1f20, Hi: 0x1f45, Stride: 1},
+		{Lo: 0x1f48, Hi: 0x1f4d, Stride: 1},
+		{Lo: 0x1f50, Hi: 0x1f57, Stride: 1},
+		{Lo: 0x1f59, Hi: 0x1f59, Stride: 1},
+		{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 1},
+		{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 1},
+		{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 1},
+		{Lo: 0x1f80, Hi: 0x1fb4, Stride: 1},
+		{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 1},
+		{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 1},
+		{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 1},
+		{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 1},
+		{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 1},
+		{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 1},
+		{Lo: 0x1fe0, Hi: 0x1fec, Stride: 1},
+		{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 1},
+		{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 1},
+		{Lo: 0x2071, Hi: 0x2071, Stride: 1},
+		{Lo: 0x207f, Hi: 0x207f, Stride: 1},
+		{Lo: 0x2090, Hi: 0x209c, Stride: 1},
+		{Lo: 0x2102, Hi: 0x2102, Stride: 1},
+		{Lo: 0x2107, Hi: 0x2107, Stride: 1},
+		{Lo: 0x210a, Hi: 0x2113, Stride: 1},
+		{Lo: 0x2115, Hi: 0x2115, Stride: 1},
+		{Lo: 0x2118, Hi: 0x211d, Stride: 1},
+		{Lo: 0x2124, Hi: 0x2124, Stride: 1},
+		{Lo: 0x2126, Hi: 0x2126, Stride: 1},
+		{Lo: 0x2128, Hi: 0x2128, Stride: 1},
+		{Lo: 0x212a, Hi: 0x2139, Stride: 1},
+		{Lo: 0x213c, Hi: 0x213f, Stride: 1},
+		{Lo: 0x2145, Hi: 0x2149, Stride: 1},
+		{Lo: 0x214e, Hi: 0x214e, Stride: 1},
+		{Lo: 0x2160, Hi: 0x2188, Stride: 1},
+		{Lo: 0x2c00, Hi: 0x2ce4, Stride: 1},
+		{Lo: 0x2ceb, Hi: 0x2cee, Stride: 1},
+		{Lo: 0x2cf2, Hi: 0x2cf3, Stride: 1},
+		{Lo: 0x2d00, Hi: 0x2d25, Stride: 1},
+		{Lo: 0x2d27, Hi: 0x2d27, Stride: 1},
+		{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 1},
+		{Lo: 0x2d30, Hi: 0x2d67, Stride: 1},
+		{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 1},
+		{Lo: 0x2d80, Hi: 0x2d96, Stride: 1},
+		{Lo: 0x2da0, Hi: 0x2da6, Stride: 1},
+		{Lo: 0x2da8, Hi: 0x2dae, Stride: 1},
+		{Lo: 0x2db0, Hi: 0x2db6, Stride: 1},
+		{Lo: 0x2db8, Hi: 0x2dbe, Stride: 1},
+		{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 1},
+		{Lo: 0x2dc8, Hi: 0x2dce, Stride: 1},
+		{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 1},
+		{Lo: 0x2dd8, Hi: 0x2dde, Stride: 1},
+		{Lo: 0x3005, Hi: 0x3007, Stride: 1},
+		{Lo: 0x3021, Hi: 0x3029, Stride: 1},
+		{Lo: 0x3031, Hi: 0x3035, Stride: 1},
+		{Lo: 0x3038, Hi: 0x303c, Stride: 1},
+		{Lo: 0x3041, Hi: 0x3096, Stride: 1},
+		{Lo: 0x309b, Hi: 0x309f, Stride: 1},
+		{Lo: 0x30a1, Hi: 0x30fa, Stride: 1},
+		{Lo: 0x30fc, Hi: 0x30ff, Stride: 1},
+		{Lo: 0x3105, Hi: 0x312f, Stride: 1},
+		{Lo: 0x3131, Hi: 0x318e, Stride: 1},
+		{Lo: 0x31a0, Hi: 0x31bf, Stride: 1},
+		{Lo: 0x31f0, Hi: 0x31ff, Stride: 1},
+		{Lo: 0x3400, Hi: 0x4dbf, Stride: 1},
+		{Lo: 0x4e00, Hi: 0xa48c, Stride: 1},
+		{Lo: 0xa4d0, Hi: 0xa4fd, Stride: 1},
+		{Lo: 0xa500, Hi: 0xa60c, Stride: 1},
+		{Lo: 0xa610, Hi: 0xa61f, Stride: 1},
+		{Lo: 0xa62a, Hi: 0xa62b, Stride: 1},
+		{Lo: 0xa640, Hi: 0xa66e, Stride: 1},
+		{Lo: 0xa67f, Hi: 0xa69d, Stride: 1},
+		{Lo: 0xa6a0, Hi: 0xa6ef, Stride: 1},
+		{Lo: 0xa717, Hi: 0xa71f, Stride: 1},
+		{Lo: 0xa722, Hi: 0xa788, Stride: 1},
+		{Lo: 0xa78b, Hi: 0xa7ca, Stride: 1},
+		{Lo: 0xa7d0, Hi: 0xa7d1, Stride: 1},
+		{Lo: 0xa7d3, Hi: 0xa7d3, Stride: 1},
+		{Lo: 0xa7d5, Hi: 0xa7d9, Stride: 1},
+		{Lo: 0xa7f2, Hi: 0xa801, Stride: 1},
+		{Lo: 0xa803, Hi: 0xa805, Stride: 1},
+		{Lo: 0xa807, Hi: 0xa80a, Stride: 1},
+		{Lo: 0xa80c, Hi: 0xa822, Stride: 1},
+		{Lo: 0xa840, Hi: 0xa873, Stride: 1},
+		{Lo: 0xa882, Hi: 0xa8b3, Stride: 1},
+		{Lo: 0xa8f2, Hi: 0xa8f7, Stride: 1},
+		{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 1},
+		{Lo: 0xa8fd, Hi: 0xa8fe, Stride: 1},
+		{Lo: 0xa90a, Hi: 0xa925, Stride: 1},
+		{Lo: 0xa930, Hi: 0xa946, Stride: 1},
+		{Lo: 0xa960, Hi: 0xa97c, Stride: 1},
+		{Lo: 0xa984, Hi: 0xa9b2, Stride: 1},
+		{Lo: 0xa9cf, Hi: 0xa9cf, Stride: 1},
+		{Lo: 0xa9e0, Hi: 0xa9e4, Stride: 1},
+		{Lo: 0xa9e6, Hi: 0xa9ef, Stride: 1},
+		{Lo: 0xa9fa, Hi: 0xa9fe, Stride: 1},
+		{Lo: 0xaa00, Hi: 0xaa28, Stride: 1},
+		{Lo: 0xaa40, Hi: 0xaa42, Stride: 1},
+		{Lo: 0xaa44, Hi: 0xaa4b, Stride: 1},
+		{Lo: 0xaa60, Hi: 0xaa76, Stride: 1},
+		{Lo: 0xaa7a, Hi: 0xaa7a, Stride: 1},
+		{Lo: 0xaa7e, Hi: 0xaaaf, Stride: 1},
+		{Lo: 0xaab1, Hi: 0xaab1, Stride: 1},
+		{Lo: 0xaab5, Hi: 0xaab6, Stride: 1},
+		{Lo: 0xaab9, Hi: 0xaabd, Stride: 1},
+		{Lo: 0xaac0, Hi: 0xaac0, Stride: 1},
+		{Lo: 0xaac2, Hi: 0xaac2, Stride: 1},
+		{Lo: 0xaadb, Hi: 0xaadd, Stride: 1},
+		{Lo: 0xaae0, Hi: 0xaaea, Stride: 1},
+		{Lo: 0xaaf2, Hi: 0xaaf4, Stride: 1},
+		{Lo: 0xab01, Hi: 0xab06, Stride: 1},
+		{Lo: 0xab09, Hi: 0xab0e, Stride: 1},
+		{Lo: 0xab11, Hi: 0xab16, Stride: 1},
+		{Lo: 0xab20, Hi: 0xab26, Stride: 1},
+		{Lo: 0xab28, Hi: 0xab2e, Stride: 1},
+		{Lo: 0xab30, Hi: 0xab5a, Stride: 1},
+		{Lo: 0xab5c, Hi: 0xab69, Stride: 1},
+		{Lo: 0xab70, Hi: 0xabe2, Stride: 1},
+		{Lo: 0xac00, Hi: 0xd7a3, Stride: 1},
+		{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 1},
+		{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 1},
+		{Lo: 0xf900, Hi: 0xfa6d, Stride: 1},
+		{Lo: 0xfa70, Hi: 0xfad9, Stride: 1},
+		{Lo: 0xfb00, Hi: 0xfb06, Stride: 1},
+		{Lo: 0xfb13, Hi: 0xfb17, Stride: 1},
+		{Lo: 0xfb1d, Hi: 0xfb1d, Stride: 1},
+		{Lo: 0xfb1f, Hi: 0xfb28, Stride: 1},
+		{Lo: 0xfb2a, Hi: 0xfb36, Stride: 1},
+		{Lo: 0xfb38, Hi: 0xfb3c, Stride: 1},
+		{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 1},
+		{Lo: 0xfb40, Hi: 0xfb41, Stride: 1},
+		{Lo: 0xfb43, Hi: 0xfb44, Stride: 1},
+		{Lo: 0xfb46, Hi: 0xfbb1, Stride: 1},
+		{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 1},
+		{Lo: 0xfd50, Hi: 0xfd8f, Stride: 1},
+		{Lo: 0xfd92, Hi: 0xfdc7, Stride: 1},
+		{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 1},
+		{Lo: 0xfe70, Hi: 0xfe74, Stride: 1},
+		{Lo: 0xfe76, Hi: 0xfefc, Stride: 1},
+		{Lo: 0xff21, Hi: 0xff3a, Stride: 1},
+		{Lo: 0xff41, Hi: 0xff5a, Stride: 1},
+		{Lo: 0xff66, Hi: 0xffbe, Stride: 1},
+		{Lo: 0xffc2, Hi: 0xffc7, Stride: 1},
+		{Lo: 0xffca, Hi: 0xffcf, Stride: 1},
+		{Lo: 0xffd2, Hi: 0xffd7, Stride: 1},
+		{Lo: 0xffda, Hi: 0xffdc, Stride: 1},
+		{Lo: 0x10000, Hi: 0x1000b, Stride: 1},
+		{Lo: 0x1000d, Hi: 0x10026, Stride: 1},
+		{Lo: 0x10028, Hi: 0x1003a, Stride: 1},
+		{Lo: 0x1003c, Hi: 0x1003d, Stride: 1},
+		{Lo: 0x1003f, Hi: 0x1004d, Stride: 1},
+		{Lo: 0x10050, Hi: 0x1005d, Stride: 1},
+		{Lo: 0x10080, Hi: 0x100fa, Stride: 1},
+		{Lo: 0x10140, Hi: 0x10174, Stride: 1},
+		{Lo: 0x10280, Hi: 0x1029c, Stride: 1},
+		{Lo: 0x102a0, Hi: 0x102d0, Stride: 1},
+		{Lo: 0x10300, Hi: 0x1031f, Stride: 1},
+		{Lo: 0x1032d, Hi: 0x1034a, Stride: 1},
+		{Lo: 0x10350, Hi: 0x10375, Stride: 1},
+		{Lo: 0x10380, Hi: 0x1039d, Stride: 1},
+		{Lo: 0x103a0, Hi: 0x103c3, Stride: 1},
+		{Lo: 0x103c8, Hi: 0x103cf, Stride: 1},
+		{Lo: 0x103d1, Hi: 0x103d5, Stride: 1},
+		{Lo: 0x10400, Hi: 0x1049d, Stride: 1},
+		{Lo: 0x104b0, Hi: 0x104d3, Stride: 1},
+		{Lo: 0x104d8, Hi: 0x104fb, Stride: 1},
+		{Lo: 0x10500, Hi: 0x10527, Stride: 1},
+		{Lo: 0x10530, Hi: 0x10563, Stride: 1},
+		{Lo: 0x10570, Hi: 0x1057a, Stride: 1},
+		{Lo: 0x1057c, Hi: 0x1058a, Stride: 1},
+		{Lo: 0x1058c, Hi: 0x10592, Stride: 1},
+		{Lo: 0x10594, Hi: 0x10595, Stride: 1},
+		{Lo: 0x10597, Hi: 0x105a1, Stride: 1},
+		{Lo: 0x105a3, Hi: 0x105b1, Stride: 1},
+		{Lo: 0x105b3, Hi: 0x105b9, Stride: 1},
+		{Lo: 0x105bb, Hi: 0x105bc, Stride: 1},
+		{Lo: 0x10600, Hi: 0x10736, Stride: 1},
+		{Lo: 0x10740, Hi: 0x10755, Stride: 1},
+		{Lo: 0x10760, Hi: 0x10767, Stride: 1},
+		{Lo: 0x10780, Hi: 0x10785, Stride: 1},
+		{Lo: 0x10787, Hi: 0x107b0, Stride: 1},
+		{Lo: 0x107b2, Hi: 0x107ba, Stride: 1},
+		{Lo: 0x10800, Hi: 0x10805, Stride: 1},
+		{Lo: 0x10808, Hi: 0x10808, Stride: 1},
+		{Lo: 0x1080a, Hi: 0x10835, Stride: 1},
+		{Lo: 0x10837, Hi: 0x10838, Stride: 1},
+		{Lo: 0x1083c, Hi: 0x1083c, Stride: 1},
+		{Lo: 0x1083f, Hi: 0x10855, Stride: 1},
+		{Lo: 0x10860, Hi: 0x10876, Stride: 1},
+		{Lo: 0x10880, Hi: 0x1089e, Stride: 1},
+		{Lo: 0x108e0, Hi: 0x108f2, Stride: 1},
+		{Lo: 0x108f4, Hi: 0x108f5, Stride: 1},
+		{Lo: 0x10900, Hi: 0x10915, Stride: 1},
+		{Lo: 0x10920, Hi: 0x10939, Stride: 1},
+		{Lo: 0x10980, Hi: 0x109b7, Stride: 1},
+		{Lo: 0x109be, Hi: 0x109bf, Stride: 1},
+		{Lo: 0x10a00, Hi: 0x10a00, Stride: 1},
+		{Lo: 0x10a10, Hi: 0x10a13, Stride: 1},
+		{Lo: 0x10a15, Hi: 0x10a17, Stride: 1},
+		{Lo: 0x10a19, Hi: 0x10a35, Stride: 1},
+		{Lo: 0x10a60, Hi: 0x10a7c, Stride: 1},
+		{Lo: 0x10a80, Hi: 0x10a9c, Stride: 1},
+		{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 1},
+		{Lo: 0x10ac9, Hi: 0x10ae4, Stride: 1},
+		{Lo: 0x10b00, Hi: 0x10b35, Stride: 1},
+		{Lo: 0x10b40, Hi: 0x10b55, Stride: 1},
+		{Lo: 0x10b60, Hi: 0x10b72, Stride: 1},
+		{Lo: 0x10b80, Hi: 0x10b91, Stride: 1},
+		{Lo: 0x10c00, Hi: 0x10c48, Stride: 1},
+		{Lo: 0x10c80, Hi: 0x10cb2, Stride: 1},
+		{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 1},
+		{Lo: 0x10d00, Hi: 0x10d23, Stride: 1},
+		{Lo: 0x10e80, Hi: 0x10ea9, Stride: 1},
+		{Lo: 0x10eb0, Hi: 0x10eb1, Stride: 1},
+		{Lo: 0x10f00, Hi: 0x10f1c, Stride: 1},
+		{Lo: 0x10f27, Hi: 0x10f27, Stride: 1},
+		{Lo: 0x10f30, Hi: 0x10f45, Stride: 1},
+		{Lo: 0x10f70, Hi: 0x10f81, Stride: 1},
+		{Lo: 0x10fb0, Hi: 0x10fc4, Stride: 1},
+		{Lo: 0x10fe0, Hi: 0x10ff6, Stride: 1},
+		{Lo: 0x11003, Hi: 0x11037, Stride: 1},
+		{Lo: 0x11071, Hi: 0x11072, Stride: 1},
+		{Lo: 0x11075, Hi: 0x11075, Stride: 1},
+		{Lo: 0x11083, Hi: 0x110af, Stride: 1},
+		{Lo: 0x110d0, Hi: 0x110e8, Stride: 1},
+		{Lo: 0x11103, Hi: 0x11126, Stride: 1},
+		{Lo: 0x11144, Hi: 0x11144, Stride: 1},
+		{Lo: 0x11147, Hi: 0x11147, Stride: 1},
+		{Lo: 0x11150, Hi: 0x11172, Stride: 1},
+		{Lo: 0x11176, Hi: 0x11176, Stride: 1},
+		{Lo: 0x11183, Hi: 0x111b2, Stride: 1},
+		{Lo: 0x111c1, Hi: 0x111c4, Stride: 1},
+		{Lo: 0x111da, Hi: 0x111da, Stride: 1},
+		{Lo: 0x111dc, Hi: 0x111dc, Stride: 1},
+		{Lo: 0x11200, Hi: 0x11211, Stride: 1},
+		{Lo: 0x11213, Hi: 0x1122b, Stride: 1},
+		{Lo: 0x1123f, Hi: 0x11240, Stride: 1},
+		{Lo: 0x11280, Hi: 0x11286, Stride: 1},
+		{Lo: 0x11288, Hi: 0x11288, Stride: 1},
+		{Lo: 0x1128a, Hi: 0x1128d, Stride: 1},
+		{Lo: 0x1128f, Hi: 0x1129d, Stride: 1},
+		{Lo: 0x1129f, Hi: 0x112a8, Stride: 1},
+		{Lo: 0x112b0, Hi: 0x112de, Stride: 1},
+		{Lo: 0x11305, Hi: 0x1130c, Stride: 1},
+		{Lo: 0x1130f, Hi: 0x11310, Stride: 1},
+		{Lo: 0x11313, Hi: 0x11328, Stride: 1},
+		{Lo: 0x1132a, Hi: 0x11330, Stride: 1},
+		{Lo: 0x11332, Hi: 0x11333, Stride: 1},
+		{Lo: 0x11335, Hi: 0x11339, Stride: 1},
+		{Lo: 0x1133d, Hi: 0x1133d, Stride: 1},
+		{Lo: 0x11350, Hi: 0x11350, Stride: 1},
+		{Lo: 0x1135d, Hi: 0x11361, Stride: 1},
+		{Lo: 0x11400, Hi: 0x11434, Stride: 1},
+		{Lo: 0x11447, Hi: 0x1144a, Stride: 1},
+		{Lo: 0x1145f, Hi: 0x11461, Stride: 1},
+		{Lo: 0x11480, Hi: 0x114af, Stride: 1},
+		{Lo: 0x114c4, Hi: 0x114c5, Stride: 1},
+		{Lo: 0x114c7, Hi: 0x114c7, Stride: 1},
+		{Lo: 0x11580, Hi: 0x115ae, Stride: 1},
+		{Lo: 0x115d8, Hi: 0x115db, Stride: 1},
+		{Lo: 0x11600, Hi: 0x1162f, Stride: 1},
+		{Lo: 0x11644, Hi: 0x11644, Stride: 1},
+		{Lo: 0x11680, Hi: 0x116aa, Stride: 1},
+		{Lo: 0x116b8, Hi: 0x116b8, Stride: 1},
+		{Lo: 0x11700, Hi: 0x1171a, Stride: 1},
+		{Lo: 0x11740, Hi: 0x11746, Stride: 1},
+		{Lo: 0x11800, Hi: 0x1182b, Stride: 1},
+		{Lo: 0x118a0, Hi: 0x118df, Stride: 1},
+		{Lo: 0x118ff, Hi: 0x11906, Stride: 1},
+		{Lo: 0x11909, Hi: 0x11909, Stride: 1},
+		{Lo: 0x1190c, Hi: 0x11913, Stride: 1},
+		{Lo: 0x11915, Hi: 0x11916, Stride: 1},
+		{Lo: 0x11918, Hi: 0x1192f, Stride: 1},
+		{Lo: 0x1193f, Hi: 0x1193f, Stride: 1},
+		{Lo: 0x11941, Hi: 0x11941, Stride: 1},
+		{Lo: 0x119a0, Hi: 0x119a7, Stride: 1},
+		{Lo: 0x119aa, Hi: 0x119d0, Stride: 1},
+		{Lo: 0x119e1, Hi: 0x119e1, Stride: 1},
+		{Lo: 0x119e3, Hi: 0x119e3, Stride: 1},
+		{Lo: 0x11a00, Hi: 0x11a00, Stride: 1},
+		{Lo: 0x11a0b, Hi: 0x11a32, Stride: 1},
+		{Lo: 0x11a3a, Hi: 0x11a3a, Stride: 1},
+		{Lo: 0x11a50, Hi: 0x11a50, Stride: 1},
+		{Lo: 0x11a5c, Hi: 0x11a89, Stride: 1},
+		{Lo: 0x11a9d, Hi: 0x11a9d, Stride: 1},
+		{Lo: 0x11ab0, Hi: 0x11af8, Stride: 1},
+		{Lo: 0x11c00, Hi: 0x11c08, Stride: 1},
+		{Lo: 0x11c0a, Hi: 0x11c2e, Stride: 1},
+		{Lo: 0x11c40, Hi: 0x11c40, Stride: 1},
+		{Lo: 0x11c72, Hi: 0x11c8f, Stride: 1},
+		{Lo: 0x11d00, Hi: 0x11d06, Stride: 1},
+		{Lo: 0x11d08, Hi: 0x11d09, Stride: 1},
+		{Lo: 0x11d0b, Hi: 0x11d30, Stride: 1},
+		{Lo: 0x11d46, Hi: 0x11d46, Stride: 1},
+		{Lo: 0x11d60, Hi: 0x11d65, Stride: 1},
+		{Lo: 0x11d67, Hi: 0x11d68, Stride: 1},
+		{Lo: 0x11d6a, Hi: 0x11d89, Stride: 1},
+		{Lo: 0x11d98, Hi: 0x11d98, Stride: 1},
+		{Lo: 0x11ee0, Hi: 0x11ef2, Stride: 1},
+		{Lo: 0x11f02, Hi: 0x11f02, Stride: 1},
+		{Lo: 0x11f04, Hi: 0x11f10, Stride: 1},
+		{Lo: 0x11f12, Hi: 0x11f33, Stride: 1},
+		{Lo: 0x11fb0, Hi: 0x11fb0, Stride: 1},
+		{Lo: 0x12000, Hi: 0x12399, Stride: 1},
+		{Lo: 0x12400, Hi: 0x1246e, Stride: 1},
+		{Lo: 0x12480, Hi: 0x12543, Stride: 1},
+		{Lo: 0x12f90, Hi: 0x12ff0, Stride: 1},
+		{Lo: 0x13000, Hi: 0x1342f, Stride: 1},
+		{Lo: 0x13441, Hi: 0x13446, Stride: 1},
+		{Lo: 0x14400, Hi: 0x14646, Stride: 1},
+		{Lo: 0x16800, Hi: 0x16a38, Stride: 1},
+		{Lo: 0x16a40, Hi: 0x16a5e, Stride: 1},
+		{Lo: 0x16a70, Hi: 0x16abe, Stride: 1},
+		{Lo: 0x16ad0, Hi: 0x16aed, Stride: 1},
+		{Lo: 0x16b00, Hi: 0x16b2f, Stride: 1},
+		{Lo: 0x16b40, Hi: 0x16b43, Stride: 1},
+		{Lo: 0x16b63, Hi: 0x16b77, Stride: 1},
+		{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 1},
+		{Lo: 0x16e40, Hi: 0x16e7f, Stride: 1},
+		{Lo: 0x16f00, Hi: 0x16f4a, Stride: 1},
+		{Lo: 0x16f50, Hi: 0x16f50, Stride: 1},
+		{Lo: 0x16f93, Hi: 0x16f9f, Stride: 1},
+		{Lo: 0x16fe0, Hi: 0x16fe1, Stride: 1},
+		{Lo: 0x16fe3, Hi: 0x16fe3, Stride: 1},
+		{Lo: 0x17000, Hi: 0x187f7, Stride: 1},
+		{Lo: 0x18800, Hi: 0x18cd5, Stride: 1},
+		{Lo: 0x18d00, Hi: 0x18d08, Stride: 1},
+		{Lo: 0x1aff0, Hi: 0x1aff3, Stride: 1},
+		{Lo: 0x1aff5, Hi: 0x1affb, Stride: 1},
+		{Lo: 0x1affd, Hi: 0x1affe, Stride: 1},
+		{Lo: 0x1b000, Hi: 0x1b122, Stride: 1},
+		{Lo: 0x1b132, Hi: 0x1b132, Stride: 1},
+		{Lo: 0x1b150, Hi: 0x1b152, Stride: 1},
+		{Lo: 0x1b155, Hi: 0x1b155, Stride: 1},
+		{Lo: 0x1b164, Hi: 0x1b167, Stride: 1},
+		{Lo: 0x1b170, Hi: 0x1b2fb, Stride: 1},
+		{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 1},
+		{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 1},
+		{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 1},
+		{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 1},
+		{Lo: 0x1d400, Hi: 0x1d454, Stride: 1},
+		{Lo: 0x1d456, Hi: 0x1d49c, Stride: 1},
+		{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 1},
+		{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 1},
+		{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 1},
+		{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 1},
+		{Lo: 0x1d4ae, Hi: 0x1d4b9, Stride: 1},
+		{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 1},
+		{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 1},
+		{Lo: 0x1d4c5, Hi: 0x1d505, Stride: 1},
+		{Lo: 0x1d507, Hi: 0x1d50a, Stride: 1},
+		{Lo: 0x1d50d, Hi: 0x1d514, Stride: 1},
+		{Lo: 0x1d516, Hi: 0x1d51c, Stride: 1},
+		{Lo: 0x1d51e, Hi: 0x1d539, Stride: 1},
+		{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 1},
+		{Lo: 0x1d540, Hi: 0x1d544, Stride: 1},
+		{Lo: 0x1d546, Hi: 0x1d546, Stride: 1},
+		{Lo: 0x1d54a, Hi: 0x1d550, Stride: 1},
+		{Lo: 0x1d552, Hi: 0x1d6a5, Stride: 1},
+		{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 1},
+		{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 1},
+		{Lo: 0x1d6dc, Hi: 0x1d6fa, Stride: 1},
+		{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 1},
+		{Lo: 0x1d716, Hi: 0x1d734, Stride: 1},
+		{Lo: 0x1d736, Hi: 0x1d74e, Stride: 1},
+		{Lo: 0x1d750, Hi: 0x1d76e, Stride: 1},
+		{Lo: 0x1d770, Hi: 0x1d788, Stride: 1},
+		{Lo: 0x1d78a, Hi: 0x1d7a8, Stride: 1},
+		{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 1},
+		{Lo: 0x1d7c4, Hi: 0x1d7cb, Stride: 1},
+		{Lo: 0x1df00, Hi: 0x1df1e, Stride: 1},
+		{Lo: 0x1df25, Hi: 0x1df2a, Stride: 1},
+		{Lo: 0x1e030, Hi: 0x1e06d, Stride: 1},
+		{Lo: 0x1e100, Hi: 0x1e12c, Stride: 1},
+		{Lo: 0x1e137, Hi: 0x1e13d, Stride: 1},
+		{Lo: 0x1e14e, Hi: 0x1e14e, Stride: 1},
+		{Lo: 0x1e290, Hi: 0x1e2ad, Stride: 1},
+		{Lo: 0x1e2c0, Hi: 0x1e2eb, Stride: 1},
+		{Lo: 0x1e4d0, Hi: 0x1e4eb, Stride: 1},
+		{Lo: 0x1e7e0, Hi: 0x1e7e6, Stride: 1},
+		{Lo: 0x1e7e8, Hi: 0x1e7eb, Stride: 1},
+		{Lo: 0x1e7ed, Hi: 0x1e7ee, Stride: 1},
+		{Lo: 0x1e7f0, Hi: 0x1e7fe, Stride: 1},
+		{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 1},
+		{Lo: 0x1e900, Hi: 0x1e943, Stride: 1},
+		{Lo: 0x1e94b, Hi: 0x1e94b, Stride: 1},
+		{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 1},
+		{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 1},
+		{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 1},
+		{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 1},
+		{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 1},
+		{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 1},
+		{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 1},
+		{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 1},
+		{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 1},
+		{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 1},
+		{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 1},
+		{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 1},
+		{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 1},
+		{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 1},
+		{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 1},
+		{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 1},
+		{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 1},
+		{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 1},
+		{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 1},
+		{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 1},
+		{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 1},
+		{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 1},
+		{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 1},
+		{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 1},
+		{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 1},
+		{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 1},
+		{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 1},
+		{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 1},
+		{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 1},
+		{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 1},
+		{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 1},
+		{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 1},
+		{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 1},
+		{Lo: 0x20000, Hi: 0x2a6df, Stride: 1},
+		{Lo: 0x2a700, Hi: 0x2b739, Stride: 1},
+		{Lo: 0x2b740, Hi: 0x2b81d, Stride: 1},
+		{Lo: 0x2b820, Hi: 0x2cea1, Stride: 1},
+		{Lo: 0x2ceb0, Hi: 0x2ebe0, Stride: 1},
+		{Lo: 0x2ebf0, Hi: 0x2ee5d, Stride: 1},
+		{Lo: 0x2f800, Hi: 0x2fa1d, Stride: 1},
+		{Lo: 0x30000, Hi: 0x3134a, Stride: 1},
+		{Lo: 0x31350, Hi: 0x323af, Stride: 1},
+	},
+}
+
+var idContinueES5OrESNext = &unicode.RangeTable{
+	LatinOffset: 129,
+	R16: []unicode.Range16{
+		{Lo: 0x30, Hi: 0x39, Stride: 1},
+		{Lo: 0x41, Hi: 0x5a, Stride: 1},
+		{Lo: 0x5f, Hi: 0x5f, Stride: 1},
+		{Lo: 0x61, Hi: 0x7a, Stride: 1},
+		{Lo: 0xaa, Hi: 0xaa, Stride: 1},
+		{Lo: 0xb5, Hi: 0xb5, Stride: 1},
+		{Lo: 0xb7, Hi: 0xb7, Stride: 1},
+		{Lo: 0xba, Hi: 0xba, Stride: 1},
+		{Lo: 0xc0, Hi: 0xd6, Stride: 1},
+		{Lo: 0xd8, Hi: 0xf6, Stride: 1},
+		{Lo: 0xf8, Hi: 0x2c1, Stride: 1},
+		{Lo: 0x2c6, Hi: 0x2d1, Stride: 1},
+		{Lo: 0x2e0, Hi: 0x2e4, Stride: 1},
+		{Lo: 0x2ec, Hi: 0x2ec, Stride: 1},
+		{Lo: 0x2ee, Hi: 0x2ee, Stride: 1},
+		{Lo: 0x300, Hi: 0x374, Stride: 1},
+		{Lo: 0x376, Hi: 0x377, Stride: 1},
+		{Lo: 0x37a, Hi: 0x37d, Stride: 1},
+		{Lo: 0x37f, Hi: 0x37f, Stride: 1},
+		{Lo: 0x386, Hi: 0x38a, Stride: 1},
+		{Lo: 0x38c, Hi: 0x38c, Stride: 1},
+		{Lo: 0x38e, Hi: 0x3a1, Stride: 1},
+		{Lo: 0x3a3, Hi: 0x3f5, Stride: 1},
+		{Lo: 0x3f7, Hi: 0x481, Stride: 1},
+		{Lo: 0x483, Hi: 0x487, Stride: 1},
+		{Lo: 0x48a, Hi: 0x52f, Stride: 1},
+		{Lo: 0x531, Hi: 0x556, Stride: 1},
+		{Lo: 0x559, Hi: 0x559, Stride: 1},
+		{Lo: 0x560, Hi: 0x588, Stride: 1},
+		{Lo: 0x591, Hi: 0x5bd, Stride: 1},
+		{Lo: 0x5bf, Hi: 0x5bf, Stride: 1},
+		{Lo: 0x5c1, Hi: 0x5c2, Stride: 1},
+		{Lo: 0x5c4, Hi: 0x5c5, Stride: 1},
+		{Lo: 0x5c7, Hi: 0x5c7, Stride: 1},
+		{Lo: 0x5d0, Hi: 0x5ea, Stride: 1},
+		{Lo: 0x5ef, Hi: 0x5f2, Stride: 1},
+		{Lo: 0x610, Hi: 0x61a, Stride: 1},
+		{Lo: 0x620, Hi: 0x669, Stride: 1},
+		{Lo: 0x66e, Hi: 0x6d3, Stride: 1},
+		{Lo: 0x6d5, Hi: 0x6dc, Stride: 1},
+		{Lo: 0x6df, Hi: 0x6e8, Stride: 1},
+		{Lo: 0x6ea, Hi: 0x6fc, Stride: 1},
+		{Lo: 0x6ff, Hi: 0x6ff, Stride: 1},
+		{Lo: 0x710, Hi: 0x74a, Stride: 1},
+		{Lo: 0x74d, Hi: 0x7b1, Stride: 1},
+		{Lo: 0x7c0, Hi: 0x7f5, Stride: 1},
+		{Lo: 0x7fa, Hi: 0x7fa, Stride: 1},
+		{Lo: 0x7fd, Hi: 0x7fd, Stride: 1},
+		{Lo: 0x800, Hi: 0x82d, Stride: 1},
+		{Lo: 0x840, Hi: 0x85b, Stride: 1},
+		{Lo: 0x860, Hi: 0x86a, Stride: 1},
+		{Lo: 0x870, Hi: 0x887, Stride: 1},
+		{Lo: 0x889, Hi: 0x88e, Stride: 1},
+		{Lo: 0x898, Hi: 0x8e1, Stride: 1},
+		{Lo: 0x8e3, Hi: 0x963, Stride: 1},
+		{Lo: 0x966, Hi: 0x96f, Stride: 1},
+		{Lo: 0x971, Hi: 0x983, Stride: 1},
+		{Lo: 0x985, Hi: 0x98c, Stride: 1},
+		{Lo: 0x98f, Hi: 0x990, Stride: 1},
+		{Lo: 0x993, Hi: 0x9a8, Stride: 1},
+		{Lo: 0x9aa, Hi: 0x9b0, Stride: 1},
+		{Lo: 0x9b2, Hi: 0x9b2, Stride: 1},
+		{Lo: 0x9b6, Hi: 0x9b9, Stride: 1},
+		{Lo: 0x9bc, Hi: 0x9c4, Stride: 1},
+		{Lo: 0x9c7, Hi: 0x9c8, Stride: 1},
+		{Lo: 0x9cb, Hi: 0x9ce, Stride: 1},
+		{Lo: 0x9d7, Hi: 0x9d7, Stride: 1},
+		{Lo: 0x9dc, Hi: 0x9dd, Stride: 1},
+		{Lo: 0x9df, Hi: 0x9e3, Stride: 1},
+		{Lo: 0x9e6, Hi: 0x9f1, Stride: 1},
+		{Lo: 0x9fc, Hi: 0x9fc, Stride: 1},
+		{Lo: 0x9fe, Hi: 0x9fe, Stride: 1},
+		{Lo: 0xa01, Hi: 0xa03, Stride: 1},
+		{Lo: 0xa05, Hi: 0xa0a, Stride: 1},
+		{Lo: 0xa0f, Hi: 0xa10, Stride: 1},
+		{Lo: 0xa13, Hi: 0xa28, Stride: 1},
+		{Lo: 0xa2a, Hi: 0xa30, Stride: 1},
+		{Lo: 0xa32, Hi: 0xa33, Stride: 1},
+		{Lo: 0xa35, Hi: 0xa36, Stride: 1},
+		{Lo: 0xa38, Hi: 0xa39, Stride: 1},
+		{Lo: 0xa3c, Hi: 0xa3c, Stride: 1},
+		{Lo: 0xa3e, Hi: 0xa42, Stride: 1},
+		{Lo: 0xa47, Hi: 0xa48, Stride: 1},
+		{Lo: 0xa4b, Hi: 0xa4d, Stride: 1},
+		{Lo: 0xa51, Hi: 0xa51, Stride: 1},
+		{Lo: 0xa59, Hi: 0xa5c, Stride: 1},
+		{Lo: 0xa5e, Hi: 0xa5e, Stride: 1},
+		{Lo: 0xa66, Hi: 0xa75, Stride: 1},
+		{Lo: 0xa81, Hi: 0xa83, Stride: 1},
+		{Lo: 0xa85, Hi: 0xa8d, Stride: 1},
+		{Lo: 0xa8f, Hi: 0xa91, Stride: 1},
+		{Lo: 0xa93, Hi: 0xaa8, Stride: 1},
+		{Lo: 0xaaa, Hi: 0xab0, Stride: 1},
+		{Lo: 0xab2, Hi: 0xab3, Stride: 1},
+		{Lo: 0xab5, Hi: 0xab9, Stride: 1},
+		{Lo: 0xabc, Hi: 0xac5, Stride: 1},
+		{Lo: 0xac7, Hi: 0xac9, Stride: 1},
+		{Lo: 0xacb, Hi: 0xacd, Stride: 1},
+		{Lo: 0xad0, Hi: 0xad0, Stride: 1},
+		{Lo: 0xae0, Hi: 0xae3, Stride: 1},
+		{Lo: 0xae6, Hi: 0xaef, Stride: 1},
+		{Lo: 0xaf9, Hi: 0xaff, Stride: 1},
+		{Lo: 0xb01, Hi: 0xb03, Stride: 1},
+		{Lo: 0xb05, Hi: 0xb0c, Stride: 1},
+		{Lo: 0xb0f, Hi: 0xb10, Stride: 1},
+		{Lo: 0xb13, Hi: 0xb28, Stride: 1},
+		{Lo: 0xb2a, Hi: 0xb30, Stride: 1},
+		{Lo: 0xb32, Hi: 0xb33, Stride: 1},
+		{Lo: 0xb35, Hi: 0xb39, Stride: 1},
+		{Lo: 0xb3c, Hi: 0xb44, Stride: 1},
+		{Lo: 0xb47, Hi: 0xb48, Stride: 1},
+		{Lo: 0xb4b, Hi: 0xb4d, Stride: 1},
+		{Lo: 0xb55, Hi: 0xb57, Stride: 1},
+		{Lo: 0xb5c, Hi: 0xb5d, Stride: 1},
+		{Lo: 0xb5f, Hi: 0xb63, Stride: 1},
+		{Lo: 0xb66, Hi: 0xb6f, Stride: 1},
+		{Lo: 0xb71, Hi: 0xb71, Stride: 1},
+		{Lo: 0xb82, Hi: 0xb83, Stride: 1},
+		{Lo: 0xb85, Hi: 0xb8a, Stride: 1},
+		{Lo: 0xb8e, Hi: 0xb90, Stride: 1},
+		{Lo: 0xb92, Hi: 0xb95, Stride: 1},
+		{Lo: 0xb99, Hi: 0xb9a, Stride: 1},
+		{Lo: 0xb9c, Hi: 0xb9c, Stride: 1},
+		{Lo: 0xb9e, Hi: 0xb9f, Stride: 1},
+		{Lo: 0xba3, Hi: 0xba4, Stride: 1},
+		{Lo: 0xba8, Hi: 0xbaa, Stride: 1},
+		{Lo: 0xbae, Hi: 0xbb9, Stride: 1},
+		{Lo: 0xbbe, Hi: 0xbc2, Stride: 1},
+		{Lo: 0xbc6, Hi: 0xbc8, Stride: 1},
+		{Lo: 0xbca, Hi: 0xbcd, Stride: 1},
+		{Lo: 0xbd0, Hi: 0xbd0, Stride: 1},
+		{Lo: 0xbd7, Hi: 0xbd7, Stride: 1},
+		{Lo: 0xbe6, Hi: 0xbef, Stride: 1},
+		{Lo: 0xc00, Hi: 0xc0c, Stride: 1},
+		{Lo: 0xc0e, Hi: 0xc10, Stride: 1},
+		{Lo: 0xc12, Hi: 0xc28, Stride: 1},
+		{Lo: 0xc2a, Hi: 0xc39, Stride: 1},
+		{Lo: 0xc3c, Hi: 0xc44, Stride: 1},
+		{Lo: 0xc46, Hi: 0xc48, Stride: 1},
+		{Lo: 0xc4a, Hi: 0xc4d, Stride: 1},
+		{Lo: 0xc55, Hi: 0xc56, Stride: 1},
+		{Lo: 0xc58, Hi: 0xc5a, Stride: 1},
+		{Lo: 0xc5d, Hi: 0xc5d, Stride: 1},
+		{Lo: 0xc60, Hi: 0xc63, Stride: 1},
+		{Lo: 0xc66, Hi: 0xc6f, Stride: 1},
+		{Lo: 0xc80, Hi: 0xc83, Stride: 1},
+		{Lo: 0xc85, Hi: 0xc8c, Stride: 1},
+		{Lo: 0xc8e, Hi: 0xc90, Stride: 1},
+		{Lo: 0xc92, Hi: 0xca8, Stride: 1},
+		{Lo: 0xcaa, Hi: 0xcb3, Stride: 1},
+		{Lo: 0xcb5, Hi: 0xcb9, Stride: 1},
+		{Lo: 0xcbc, Hi: 0xcc4, Stride: 1},
+		{Lo: 0xcc6, Hi: 0xcc8, Stride: 1},
+		{Lo: 0xcca, Hi: 0xccd, Stride: 1},
+		{Lo: 0xcd5, Hi: 0xcd6, Stride: 1},
+		{Lo: 0xcdd, Hi: 0xcde, Stride: 1},
+		{Lo: 0xce0, Hi: 0xce3, Stride: 1},
+		{Lo: 0xce6, Hi: 0xcef, Stride: 1},
+		{Lo: 0xcf1, Hi: 0xcf3, Stride: 1},
+		{Lo: 0xd00, Hi: 0xd0c, Stride: 1},
+		{Lo: 0xd0e, Hi: 0xd10, Stride: 1},
+		{Lo: 0xd12, Hi: 0xd44, Stride: 1},
+		{Lo: 0xd46, Hi: 0xd48, Stride: 1},
+		{Lo: 0xd4a, Hi: 0xd4e, Stride: 1},
+		{Lo: 0xd54, Hi: 0xd57, Stride: 1},
+		{Lo: 0xd5f, Hi: 0xd63, Stride: 1},
+		{Lo: 0xd66, Hi: 0xd6f, Stride: 1},
+		{Lo: 0xd7a, Hi: 0xd7f, Stride: 1},
+		{Lo: 0xd81, Hi: 0xd83, Stride: 1},
+		{Lo: 0xd85, Hi: 0xd96, Stride: 1},
+		{Lo: 0xd9a, Hi: 0xdb1, Stride: 1},
+		{Lo: 0xdb3, Hi: 0xdbb, Stride: 1},
+		{Lo: 0xdbd, Hi: 0xdbd, Stride: 1},
+		{Lo: 0xdc0, Hi: 0xdc6, Stride: 1},
+		{Lo: 0xdca, Hi: 0xdca, Stride: 1},
+		{Lo: 0xdcf, Hi: 0xdd4, Stride: 1},
+		{Lo: 0xdd6, Hi: 0xdd6, Stride: 1},
+		{Lo: 0xdd8, Hi: 0xddf, Stride: 1},
+		{Lo: 0xde6, Hi: 0xdef, Stride: 1},
+		{Lo: 0xdf2, Hi: 0xdf3, Stride: 1},
+		{Lo: 0xe01, Hi: 0xe3a, Stride: 1},
+		{Lo: 0xe40, Hi: 0xe4e, Stride: 1},
+		{Lo: 0xe50, Hi: 0xe59, Stride: 1},
+		{Lo: 0xe81, Hi: 0xe82, Stride: 1},
+		{Lo: 0xe84, Hi: 0xe84, Stride: 1},
+		{Lo: 0xe86, Hi: 0xe8a, Stride: 1},
+		{Lo: 0xe8c, Hi: 0xea3, Stride: 1},
+		{Lo: 0xea5, Hi: 0xea5, Stride: 1},
+		{Lo: 0xea7, Hi: 0xebd, Stride: 1},
+		{Lo: 0xec0, Hi: 0xec4, Stride: 1},
+		{Lo: 0xec6, Hi: 0xec6, Stride: 1},
+		{Lo: 0xec8, Hi: 0xece, Stride: 1},
+		{Lo: 0xed0, Hi: 0xed9, Stride: 1},
+		{Lo: 0xedc, Hi: 0xedf, Stride: 1},
+		{Lo: 0xf00, Hi: 0xf00, Stride: 1},
+		{Lo: 0xf18, Hi: 0xf19, Stride: 1},
+		{Lo: 0xf20, Hi: 0xf29, Stride: 1},
+		{Lo: 0xf35, Hi: 0xf35, Stride: 1},
+		{Lo: 0xf37, Hi: 0xf37, Stride: 1},
+		{Lo: 0xf39, Hi: 0xf39, Stride: 1},
+		{Lo: 0xf3e, Hi: 0xf47, Stride: 1},
+		{Lo: 0xf49, Hi: 0xf6c, Stride: 1},
+		{Lo: 0xf71, Hi: 0xf84, Stride: 1},
+		{Lo: 0xf86, Hi: 0xf97, Stride: 1},
+		{Lo: 0xf99, Hi: 0xfbc, Stride: 1},
+		{Lo: 0xfc6, Hi: 0xfc6, Stride: 1},
+	},
+	R32: []unicode.Range32{
+		{Lo: 0x1000, Hi: 0x1049, Stride: 1},
+		{Lo: 0x1050, Hi: 0x109d, Stride: 1},
+		{Lo: 0x10a0, Hi: 0x10c5, Stride: 1},
+		{Lo: 0x10c7, Hi: 0x10c7, Stride: 1},
+		{Lo: 0x10cd, Hi: 0x10cd, Stride: 1},
+		{Lo: 0x10d0, Hi: 0x10fa, Stride: 1},
+		{Lo: 0x10fc, Hi: 0x1248, Stride: 1},
+		{Lo: 0x124a, Hi: 0x124d, Stride: 1},
+		{Lo: 0x1250, Hi: 0x1256, Stride: 1},
+		{Lo: 0x1258, Hi: 0x1258, Stride: 1},
+		{Lo: 0x125a, Hi: 0x125d, Stride: 1},
+		{Lo: 0x1260, Hi: 0x1288, Stride: 1},
+		{Lo: 0x128a, Hi: 0x128d, Stride: 1},
+		{Lo: 0x1290, Hi: 0x12b0, Stride: 1},
+		{Lo: 0x12b2, Hi: 0x12b5, Stride: 1},
+		{Lo: 0x12b8, Hi: 0x12be, Stride: 1},
+		{Lo: 0x12c0, Hi: 0x12c0, Stride: 1},
+		{Lo: 0x12c2, Hi: 0x12c5, Stride: 1},
+		{Lo: 0x12c8, Hi: 0x12d6, Stride: 1},
+		{Lo: 0x12d8, Hi: 0x1310, Stride: 1},
+		{Lo: 0x1312, Hi: 0x1315, Stride: 1},
+		{Lo: 0x1318, Hi: 0x135a, Stride: 1},
+		{Lo: 0x135d, Hi: 0x135f, Stride: 1},
+		{Lo: 0x1369, Hi: 0x1371, Stride: 1},
+		{Lo: 0x1380, Hi: 0x138f, Stride: 1},
+		{Lo: 0x13a0, Hi: 0x13f5, Stride: 1},
+		{Lo: 0x13f8, Hi: 0x13fd, Stride: 1},
+		{Lo: 0x1401, Hi: 0x166c, Stride: 1},
+		{Lo: 0x166f, Hi: 0x167f, Stride: 1},
+		{Lo: 0x1681, Hi: 0x169a, Stride: 1},
+		{Lo: 0x16a0, Hi: 0x16ea, Stride: 1},
+		{Lo: 0x16ee, Hi: 0x16f8, Stride: 1},
+		{Lo: 0x1700, Hi: 0x1715, Stride: 1},
+		{Lo: 0x171f, Hi: 0x1734, Stride: 1},
+		{Lo: 0x1740, Hi: 0x1753, Stride: 1},
+		{Lo: 0x1760, Hi: 0x176c, Stride: 1},
+		{Lo: 0x176e, Hi: 0x1770, Stride: 1},
+		{Lo: 0x1772, Hi: 0x1773, Stride: 1},
+		{Lo: 0x1780, Hi: 0x17d3, Stride: 1},
+		{Lo: 0x17d7, Hi: 0x17d7, Stride: 1},
+		{Lo: 0x17dc, Hi: 0x17dd, Stride: 1},
+		{Lo: 0x17e0, Hi: 0x17e9, Stride: 1},
+		{Lo: 0x180b, Hi: 0x180d, Stride: 1},
+		{Lo: 0x180f, Hi: 0x1819, Stride: 1},
+		{Lo: 0x1820, Hi: 0x1878, Stride: 1},
+		{Lo: 0x1880, Hi: 0x18aa, Stride: 1},
+		{Lo: 0x18b0, Hi: 0x18f5, Stride: 1},
+		{Lo: 0x1900, Hi: 0x191e, Stride: 1},
+		{Lo: 0x1920, Hi: 0x192b, Stride: 1},
+		{Lo: 0x1930, Hi: 0x193b, Stride: 1},
+		{Lo: 0x1946, Hi: 0x196d, Stride: 1},
+		{Lo: 0x1970, Hi: 0x1974, Stride: 1},
+		{Lo: 0x1980, Hi: 0x19ab, Stride: 1},
+		{Lo: 0x19b0, Hi: 0x19c9, Stride: 1},
+		{Lo: 0x19d0, Hi: 0x19da, Stride: 1},
+		{Lo: 0x1a00, Hi: 0x1a1b, Stride: 1},
+		{Lo: 0x1a20, Hi: 0x1a5e, Stride: 1},
+		{Lo: 0x1a60, Hi: 0x1a7c, Stride: 1},
+		{Lo: 0x1a7f, Hi: 0x1a89, Stride: 1},
+		{Lo: 0x1a90, Hi: 0x1a99, Stride: 1},
+		{Lo: 0x1aa7, Hi: 0x1aa7, Stride: 1},
+		{Lo: 0x1ab0, Hi: 0x1abd, Stride: 1},
+		{Lo: 0x1abf, Hi: 0x1ace, Stride: 1},
+		{Lo: 0x1b00, Hi: 0x1b4c, Stride: 1},
+		{Lo: 0x1b50, Hi: 0x1b59, Stride: 1},
+		{Lo: 0x1b6b, Hi: 0x1b73, Stride: 1},
+		{Lo: 0x1b80, Hi: 0x1bf3, Stride: 1},
+		{Lo: 0x1c00, Hi: 0x1c37, Stride: 1},
+		{Lo: 0x1c40, Hi: 0x1c49, Stride: 1},
+		{Lo: 0x1c4d, Hi: 0x1c7d, Stride: 1},
+		{Lo: 0x1c80, Hi: 0x1c88, Stride: 1},
+		{Lo: 0x1c90, Hi: 0x1cba, Stride: 1},
+		{Lo: 0x1cbd, Hi: 0x1cbf, Stride: 1},
+		{Lo: 0x1cd0, Hi: 0x1cd2, Stride: 1},
+		{Lo: 0x1cd4, Hi: 0x1cfa, Stride: 1},
+		{Lo: 0x1d00, Hi: 0x1f15, Stride: 1},
+		{Lo: 0x1f18, Hi: 0x1f1d, Stride: 1},
+		{Lo: 0x1f20, Hi: 0x1f45, Stride: 1},
+		{Lo: 0x1f48, Hi: 0x1f4d, Stride: 1},
+		{Lo: 0x1f50, Hi: 0x1f57, Stride: 1},
+		{Lo: 0x1f59, Hi: 0x1f59, Stride: 1},
+		{Lo: 0x1f5b, Hi: 0x1f5b, Stride: 1},
+		{Lo: 0x1f5d, Hi: 0x1f5d, Stride: 1},
+		{Lo: 0x1f5f, Hi: 0x1f7d, Stride: 1},
+		{Lo: 0x1f80, Hi: 0x1fb4, Stride: 1},
+		{Lo: 0x1fb6, Hi: 0x1fbc, Stride: 1},
+		{Lo: 0x1fbe, Hi: 0x1fbe, Stride: 1},
+		{Lo: 0x1fc2, Hi: 0x1fc4, Stride: 1},
+		{Lo: 0x1fc6, Hi: 0x1fcc, Stride: 1},
+		{Lo: 0x1fd0, Hi: 0x1fd3, Stride: 1},
+		{Lo: 0x1fd6, Hi: 0x1fdb, Stride: 1},
+		{Lo: 0x1fe0, Hi: 0x1fec, Stride: 1},
+		{Lo: 0x1ff2, Hi: 0x1ff4, Stride: 1},
+		{Lo: 0x1ff6, Hi: 0x1ffc, Stride: 1},
+		{Lo: 0x200c, Hi: 0x200d, Stride: 1},
+		{Lo: 0x203f, Hi: 0x2040, Stride: 1},
+		{Lo: 0x2054, Hi: 0x2054, Stride: 1},
+		{Lo: 0x2071, Hi: 0x2071, Stride: 1},
+		{Lo: 0x207f, Hi: 0x207f, Stride: 1},
+		{Lo: 0x2090, Hi: 0x209c, Stride: 1},
+		{Lo: 0x20d0, Hi: 0x20dc, Stride: 1},
+		{Lo: 0x20e1, Hi: 0x20e1, Stride: 1},
+		{Lo: 0x20e5, Hi: 0x20f0, Stride: 1},
+		{Lo: 0x2102, Hi: 0x2102, Stride: 1},
+		{Lo: 0x2107, Hi: 0x2107, Stride: 1},
+		{Lo: 0x210a, Hi: 0x2113, Stride: 1},
+		{Lo: 0x2115, Hi: 0x2115, Stride: 1},
+		{Lo: 0x2118, Hi: 0x211d, Stride: 1},
+		{Lo: 0x2124, Hi: 0x2124, Stride: 1},
+		{Lo: 0x2126, Hi: 0x2126, Stride: 1},
+		{Lo: 0x2128, Hi: 0x2128, Stride: 1},
+		{Lo: 0x212a, Hi: 0x2139, Stride: 1},
+		{Lo: 0x213c, Hi: 0x213f, Stride: 1},
+		{Lo: 0x2145, Hi: 0x2149, Stride: 1},
+		{Lo: 0x214e, Hi: 0x214e, Stride: 1},
+		{Lo: 0x2160, Hi: 0x2188, Stride: 1},
+		{Lo: 0x2c00, Hi: 0x2ce4, Stride: 1},
+		{Lo: 0x2ceb, Hi: 0x2cf3, Stride: 1},
+		{Lo: 0x2d00, Hi: 0x2d25, Stride: 1},
+		{Lo: 0x2d27, Hi: 0x2d27, Stride: 1},
+		{Lo: 0x2d2d, Hi: 0x2d2d, Stride: 1},
+		{Lo: 0x2d30, Hi: 0x2d67, Stride: 1},
+		{Lo: 0x2d6f, Hi: 0x2d6f, Stride: 1},
+		{Lo: 0x2d7f, Hi: 0x2d96, Stride: 1},
+		{Lo: 0x2da0, Hi: 0x2da6, Stride: 1},
+		{Lo: 0x2da8, Hi: 0x2dae, Stride: 1},
+		{Lo: 0x2db0, Hi: 0x2db6, Stride: 1},
+		{Lo: 0x2db8, Hi: 0x2dbe, Stride: 1},
+		{Lo: 0x2dc0, Hi: 0x2dc6, Stride: 1},
+		{Lo: 0x2dc8, Hi: 0x2dce, Stride: 1},
+		{Lo: 0x2dd0, Hi: 0x2dd6, Stride: 1},
+		{Lo: 0x2dd8, Hi: 0x2dde, Stride: 1},
+		{Lo: 0x2de0, Hi: 0x2dff, Stride: 1},
+		{Lo: 0x3005, Hi: 0x3007, Stride: 1},
+		{Lo: 0x3021, Hi: 0x302f, Stride: 1},
+		{Lo: 0x3031, Hi: 0x3035, Stride: 1},
+		{Lo: 0x3038, Hi: 0x303c, Stride: 1},
+		{Lo: 0x3041, Hi: 0x3096, Stride: 1},
+		{Lo: 0x3099, Hi: 0x309f, Stride: 1},
+		{Lo: 0x30a1, Hi: 0x30ff, Stride: 1},
+		{Lo: 0x3105, Hi: 0x312f, Stride: 1},
+		{Lo: 0x3131, Hi: 0x318e, Stride: 1},
+		{Lo: 0x31a0, Hi: 0x31bf, Stride: 1},
+		{Lo: 0x31f0, Hi: 0x31ff, Stride: 1},
+		{Lo: 0x3400, Hi: 0x4dbf, Stride: 1},
+		{Lo: 0x4e00, Hi: 0xa48c, Stride: 1},
+		{Lo: 0xa4d0, Hi: 0xa4fd, Stride: 1},
+		{Lo: 0xa500, Hi: 0xa60c, Stride: 1},
+		{Lo: 0xa610, Hi: 0xa62b, Stride: 1},
+		{Lo: 0xa640, Hi: 0xa66f, Stride: 1},
+		{Lo: 0xa674, Hi: 0xa67d, Stride: 1},
+		{Lo: 0xa67f, Hi: 0xa6f1, Stride: 1},
+		{Lo: 0xa717, Hi: 0xa71f, Stride: 1},
+		{Lo: 0xa722, Hi: 0xa788, Stride: 1},
+		{Lo: 0xa78b, Hi: 0xa7ca, Stride: 1},
+		{Lo: 0xa7d0, Hi: 0xa7d1, Stride: 1},
+		{Lo: 0xa7d3, Hi: 0xa7d3, Stride: 1},
+		{Lo: 0xa7d5, Hi: 0xa7d9, Stride: 1},
+		{Lo: 0xa7f2, Hi: 0xa827, Stride: 1},
+		{Lo: 0xa82c, Hi: 0xa82c, Stride: 1},
+		{Lo: 0xa840, Hi: 0xa873, Stride: 1},
+		{Lo: 0xa880, Hi: 0xa8c5, Stride: 1},
+		{Lo: 0xa8d0, Hi: 0xa8d9, Stride: 1},
+		{Lo: 0xa8e0, Hi: 0xa8f7, Stride: 1},
+		{Lo: 0xa8fb, Hi: 0xa8fb, Stride: 1},
+		{Lo: 0xa8fd, Hi: 0xa92d, Stride: 1},
+		{Lo: 0xa930, Hi: 0xa953, Stride: 1},
+		{Lo: 0xa960, Hi: 0xa97c, Stride: 1},
+		{Lo: 0xa980, Hi: 0xa9c0, Stride: 1},
+		{Lo: 0xa9cf, Hi: 0xa9d9, Stride: 1},
+		{Lo: 0xa9e0, Hi: 0xa9fe, Stride: 1},
+		{Lo: 0xaa00, Hi: 0xaa36, Stride: 1},
+		{Lo: 0xaa40, Hi: 0xaa4d, Stride: 1},
+		{Lo: 0xaa50, Hi: 0xaa59, Stride: 1},
+		{Lo: 0xaa60, Hi: 0xaa76, Stride: 1},
+		{Lo: 0xaa7a, Hi: 0xaac2, Stride: 1},
+		{Lo: 0xaadb, Hi: 0xaadd, Stride: 1},
+		{Lo: 0xaae0, Hi: 0xaaef, Stride: 1},
+		{Lo: 0xaaf2, Hi: 0xaaf6, Stride: 1},
+		{Lo: 0xab01, Hi: 0xab06, Stride: 1},
+		{Lo: 0xab09, Hi: 0xab0e, Stride: 1},
+		{Lo: 0xab11, Hi: 0xab16, Stride: 1},
+		{Lo: 0xab20, Hi: 0xab26, Stride: 1},
+		{Lo: 0xab28, Hi: 0xab2e, Stride: 1},
+		{Lo: 0xab30, Hi: 0xab5a, Stride: 1},
+		{Lo: 0xab5c, Hi: 0xab69, Stride: 1},
+		{Lo: 0xab70, Hi: 0xabea, Stride: 1},
+		{Lo: 0xabec, Hi: 0xabed, Stride: 1},
+		{Lo: 0xabf0, Hi: 0xabf9, Stride: 1},
+		{Lo: 0xac00, Hi: 0xd7a3, Stride: 1},
+		{Lo: 0xd7b0, Hi: 0xd7c6, Stride: 1},
+		{Lo: 0xd7cb, Hi: 0xd7fb, Stride: 1},
+		{Lo: 0xf900, Hi: 0xfa6d, Stride: 1},
+		{Lo: 0xfa70, Hi: 0xfad9, Stride: 1},
+		{Lo: 0xfb00, Hi: 0xfb06, Stride: 1},
+		{Lo: 0xfb13, Hi: 0xfb17, Stride: 1},
+		{Lo: 0xfb1d, Hi: 0xfb28, Stride: 1},
+		{Lo: 0xfb2a, Hi: 0xfb36, Stride: 1},
+		{Lo: 0xfb38, Hi: 0xfb3c, Stride: 1},
+		{Lo: 0xfb3e, Hi: 0xfb3e, Stride: 1},
+		{Lo: 0xfb40, Hi: 0xfb41, Stride: 1},
+		{Lo: 0xfb43, Hi: 0xfb44, Stride: 1},
+		{Lo: 0xfb46, Hi: 0xfbb1, Stride: 1},
+		{Lo: 0xfbd3, Hi: 0xfd3d, Stride: 1},
+		{Lo: 0xfd50, Hi: 0xfd8f, Stride: 1},
+		{Lo: 0xfd92, Hi: 0xfdc7, Stride: 1},
+		{Lo: 0xfdf0, Hi: 0xfdfb, Stride: 1},
+		{Lo: 0xfe00, Hi: 0xfe0f, Stride: 1},
+		{Lo: 0xfe20, Hi: 0xfe2f, Stride: 1},
+		{Lo: 0xfe33, Hi: 0xfe34, Stride: 1},
+		{Lo: 0xfe4d, Hi: 0xfe4f, Stride: 1},
+		{Lo: 0xfe70, Hi: 0xfe74, Stride: 1},
+		{Lo: 0xfe76, Hi: 0xfefc, Stride: 1},
+		{Lo: 0xff10, Hi: 0xff19, Stride: 1},
+		{Lo: 0xff21, Hi: 0xff3a, Stride: 1},
+		{Lo: 0xff3f, Hi: 0xff3f, Stride: 1},
+		{Lo: 0xff41, Hi: 0xff5a, Stride: 1},
+		{Lo: 0xff65, Hi: 0xffbe, Stride: 1},
+		{Lo: 0xffc2, Hi: 0xffc7, Stride: 1},
+		{Lo: 0xffca, Hi: 0xffcf, Stride: 1},
+		{Lo: 0xffd2, Hi: 0xffd7, Stride: 1},
+		{Lo: 0xffda, Hi: 0xffdc, Stride: 1},
+		{Lo: 0x10000, Hi: 0x1000b, Stride: 1},
+		{Lo: 0x1000d, Hi: 0x10026, Stride: 1},
+		{Lo: 0x10028, Hi: 0x1003a, Stride: 1},
+		{Lo: 0x1003c, Hi: 0x1003d, Stride: 1},
+		{Lo: 0x1003f, Hi: 0x1004d, Stride: 1},
+		{Lo: 0x10050, Hi: 0x1005d, Stride: 1},
+		{Lo: 0x10080, Hi: 0x100fa, Stride: 1},
+		{Lo: 0x10140, Hi: 0x10174, Stride: 1},
+		{Lo: 0x101fd, Hi: 0x101fd, Stride: 1},
+		{Lo: 0x10280, Hi: 0x1029c, Stride: 1},
+		{Lo: 0x102a0, Hi: 0x102d0, Stride: 1},
+		{Lo: 0x102e0, Hi: 0x102e0, Stride: 1},
+		{Lo: 0x10300, Hi: 0x1031f, Stride: 1},
+		{Lo: 0x1032d, Hi: 0x1034a, Stride: 1},
+		{Lo: 0x10350, Hi: 0x1037a, Stride: 1},
+		{Lo: 0x10380, Hi: 0x1039d, Stride: 1},
+		{Lo: 0x103a0, Hi: 0x103c3, Stride: 1},
+		{Lo: 0x103c8, Hi: 0x103cf, Stride: 1},
+		{Lo: 0x103d1, Hi: 0x103d5, Stride: 1},
+		{Lo: 0x10400, Hi: 0x1049d, Stride: 1},
+		{Lo: 0x104a0, Hi: 0x104a9, Stride: 1},
+		{Lo: 0x104b0, Hi: 0x104d3, Stride: 1},
+		{Lo: 0x104d8, Hi: 0x104fb, Stride: 1},
+		{Lo: 0x10500, Hi: 0x10527, Stride: 1},
+		{Lo: 0x10530, Hi: 0x10563, Stride: 1},
+		{Lo: 0x10570, Hi: 0x1057a, Stride: 1},
+		{Lo: 0x1057c, Hi: 0x1058a, Stride: 1},
+		{Lo: 0x1058c, Hi: 0x10592, Stride: 1},
+		{Lo: 0x10594, Hi: 0x10595, Stride: 1},
+		{Lo: 0x10597, Hi: 0x105a1, Stride: 1},
+		{Lo: 0x105a3, Hi: 0x105b1, Stride: 1},
+		{Lo: 0x105b3, Hi: 0x105b9, Stride: 1},
+		{Lo: 0x105bb, Hi: 0x105bc, Stride: 1},
+		{Lo: 0x10600, Hi: 0x10736, Stride: 1},
+		{Lo: 0x10740, Hi: 0x10755, Stride: 1},
+		{Lo: 0x10760, Hi: 0x10767, Stride: 1},
+		{Lo: 0x10780, Hi: 0x10785, Stride: 1},
+		{Lo: 0x10787, Hi: 0x107b0, Stride: 1},
+		{Lo: 0x107b2, Hi: 0x107ba, Stride: 1},
+		{Lo: 0x10800, Hi: 0x10805, Stride: 1},
+		{Lo: 0x10808, Hi: 0x10808, Stride: 1},
+		{Lo: 0x1080a, Hi: 0x10835, Stride: 1},
+		{Lo: 0x10837, Hi: 0x10838, Stride: 1},
+		{Lo: 0x1083c, Hi: 0x1083c, Stride: 1},
+		{Lo: 0x1083f, Hi: 0x10855, Stride: 1},
+		{Lo: 0x10860, Hi: 0x10876, Stride: 1},
+		{Lo: 0x10880, Hi: 0x1089e, Stride: 1},
+		{Lo: 0x108e0, Hi: 0x108f2, Stride: 1},
+		{Lo: 0x108f4, Hi: 0x108f5, Stride: 1},
+		{Lo: 0x10900, Hi: 0x10915, Stride: 1},
+		{Lo: 0x10920, Hi: 0x10939, Stride: 1},
+		{Lo: 0x10980, Hi: 0x109b7, Stride: 1},
+		{Lo: 0x109be, Hi: 0x109bf, Stride: 1},
+		{Lo: 0x10a00, Hi: 0x10a03, Stride: 1},
+		{Lo: 0x10a05, Hi: 0x10a06, Stride: 1},
+		{Lo: 0x10a0c, Hi: 0x10a13, Stride: 1},
+		{Lo: 0x10a15, Hi: 0x10a17, Stride: 1},
+		{Lo: 0x10a19, Hi: 0x10a35, Stride: 1},
+		{Lo: 0x10a38, Hi: 0x10a3a, Stride: 1},
+		{Lo: 0x10a3f, Hi: 0x10a3f, Stride: 1},
+		{Lo: 0x10a60, Hi: 0x10a7c, Stride: 1},
+		{Lo: 0x10a80, Hi: 0x10a9c, Stride: 1},
+		{Lo: 0x10ac0, Hi: 0x10ac7, Stride: 1},
+		{Lo: 0x10ac9, Hi: 0x10ae6, Stride: 1},
+		{Lo: 0x10b00, Hi: 0x10b35, Stride: 1},
+		{Lo: 0x10b40, Hi: 0x10b55, Stride: 1},
+		{Lo: 0x10b60, Hi: 0x10b72, Stride: 1},
+		{Lo: 0x10b80, Hi: 0x10b91, Stride: 1},
+		{Lo: 0x10c00, Hi: 0x10c48, Stride: 1},
+		{Lo: 0x10c80, Hi: 0x10cb2, Stride: 1},
+		{Lo: 0x10cc0, Hi: 0x10cf2, Stride: 1},
+		{Lo: 0x10d00, Hi: 0x10d27, Stride: 1},
+		{Lo: 0x10d30, Hi: 0x10d39, Stride: 1},
+		{Lo: 0x10e80, Hi: 0x10ea9, Stride: 1},
+		{Lo: 0x10eab, Hi: 0x10eac, Stride: 1},
+		{Lo: 0x10eb0, Hi: 0x10eb1, Stride: 1},
+		{Lo: 0x10efd, Hi: 0x10f1c, Stride: 1},
+		{Lo: 0x10f27, Hi: 0x10f27, Stride: 1},
+		{Lo: 0x10f30, Hi: 0x10f50, Stride: 1},
+		{Lo: 0x10f70, Hi: 0x10f85, Stride: 1},
+		{Lo: 0x10fb0, Hi: 0x10fc4, Stride: 1},
+		{Lo: 0x10fe0, Hi: 0x10ff6, Stride: 1},
+		{Lo: 0x11000, Hi: 0x11046, Stride: 1},
+		{Lo: 0x11066, Hi: 0x11075, Stride: 1},
+		{Lo: 0x1107f, Hi: 0x110ba, Stride: 1},
+		{Lo: 0x110c2, Hi: 0x110c2, Stride: 1},
+		{Lo: 0x110d0, Hi: 0x110e8, Stride: 1},
+		{Lo: 0x110f0, Hi: 0x110f9, Stride: 1},
+		{Lo: 0x11100, Hi: 0x11134, Stride: 1},
+		{Lo: 0x11136, Hi: 0x1113f, Stride: 1},
+		{Lo: 0x11144, Hi: 0x11147, Stride: 1},
+		{Lo: 0x11150, Hi: 0x11173, Stride: 1},
+		{Lo: 0x11176, Hi: 0x11176, Stride: 1},
+		{Lo: 0x11180, Hi: 0x111c4, Stride: 1},
+		{Lo: 0x111c9, Hi: 0x111cc, Stride: 1},
+		{Lo: 0x111ce, Hi: 0x111da, Stride: 1},
+		{Lo: 0x111dc, Hi: 0x111dc, Stride: 1},
+		{Lo: 0x11200, Hi: 0x11211, Stride: 1},
+		{Lo: 0x11213, Hi: 0x11237, Stride: 1},
+		{Lo: 0x1123e, Hi: 0x11241, Stride: 1},
+		{Lo: 0x11280, Hi: 0x11286, Stride: 1},
+		{Lo: 0x11288, Hi: 0x11288, Stride: 1},
+		{Lo: 0x1128a, Hi: 0x1128d, Stride: 1},
+		{Lo: 0x1128f, Hi: 0x1129d, Stride: 1},
+		{Lo: 0x1129f, Hi: 0x112a8, Stride: 1},
+		{Lo: 0x112b0, Hi: 0x112ea, Stride: 1},
+		{Lo: 0x112f0, Hi: 0x112f9, Stride: 1},
+		{Lo: 0x11300, Hi: 0x11303, Stride: 1},
+		{Lo: 0x11305, Hi: 0x1130c, Stride: 1},
+		{Lo: 0x1130f, Hi: 0x11310, Stride: 1},
+		{Lo: 0x11313, Hi: 0x11328, Stride: 1},
+		{Lo: 0x1132a, Hi: 0x11330, Stride: 1},
+		{Lo: 0x11332, Hi: 0x11333, Stride: 1},
+		{Lo: 0x11335, Hi: 0x11339, Stride: 1},
+		{Lo: 0x1133b, Hi: 0x11344, Stride: 1},
+		{Lo: 0x11347, Hi: 0x11348, Stride: 1},
+		{Lo: 0x1134b, Hi: 0x1134d, Stride: 1},
+		{Lo: 0x11350, Hi: 0x11350, Stride: 1},
+		{Lo: 0x11357, Hi: 0x11357, Stride: 1},
+		{Lo: 0x1135d, Hi: 0x11363, Stride: 1},
+		{Lo: 0x11366, Hi: 0x1136c, Stride: 1},
+		{Lo: 0x11370, Hi: 0x11374, Stride: 1},
+		{Lo: 0x11400, Hi: 0x1144a, Stride: 1},
+		{Lo: 0x11450, Hi: 0x11459, Stride: 1},
+		{Lo: 0x1145e, Hi: 0x11461, Stride: 1},
+		{Lo: 0x11480, Hi: 0x114c5, Stride: 1},
+		{Lo: 0x114c7, Hi: 0x114c7, Stride: 1},
+		{Lo: 0x114d0, Hi: 0x114d9, Stride: 1},
+		{Lo: 0x11580, Hi: 0x115b5, Stride: 1},
+		{Lo: 0x115b8, Hi: 0x115c0, Stride: 1},
+		{Lo: 0x115d8, Hi: 0x115dd, Stride: 1},
+		{Lo: 0x11600, Hi: 0x11640, Stride: 1},
+		{Lo: 0x11644, Hi: 0x11644, Stride: 1},
+		{Lo: 0x11650, Hi: 0x11659, Stride: 1},
+		{Lo: 0x11680, Hi: 0x116b8, Stride: 1},
+		{Lo: 0x116c0, Hi: 0x116c9, Stride: 1},
+		{Lo: 0x11700, Hi: 0x1171a, Stride: 1},
+		{Lo: 0x1171d, Hi: 0x1172b, Stride: 1},
+		{Lo: 0x11730, Hi: 0x11739, Stride: 1},
+		{Lo: 0x11740, Hi: 0x11746, Stride: 1},
+		{Lo: 0x11800, Hi: 0x1183a, Stride: 1},
+		{Lo: 0x118a0, Hi: 0x118e9, Stride: 1},
+		{Lo: 0x118ff, Hi: 0x11906, Stride: 1},
+		{Lo: 0x11909, Hi: 0x11909, Stride: 1},
+		{Lo: 0x1190c, Hi: 0x11913, Stride: 1},
+		{Lo: 0x11915, Hi: 0x11916, Stride: 1},
+		{Lo: 0x11918, Hi: 0x11935, Stride: 1},
+		{Lo: 0x11937, Hi: 0x11938, Stride: 1},
+		{Lo: 0x1193b, Hi: 0x11943, Stride: 1},
+		{Lo: 0x11950, Hi: 0x11959, Stride: 1},
+		{Lo: 0x119a0, Hi: 0x119a7, Stride: 1},
+		{Lo: 0x119aa, Hi: 0x119d7, Stride: 1},
+		{Lo: 0x119da, Hi: 0x119e1, Stride: 1},
+		{Lo: 0x119e3, Hi: 0x119e4, Stride: 1},
+		{Lo: 0x11a00, Hi: 0x11a3e, Stride: 1},
+		{Lo: 0x11a47, Hi: 0x11a47, Stride: 1},
+		{Lo: 0x11a50, Hi: 0x11a99, Stride: 1},
+		{Lo: 0x11a9d, Hi: 0x11a9d, Stride: 1},
+		{Lo: 0x11ab0, Hi: 0x11af8, Stride: 1},
+		{Lo: 0x11c00, Hi: 0x11c08, Stride: 1},
+		{Lo: 0x11c0a, Hi: 0x11c36, Stride: 1},
+		{Lo: 0x11c38, Hi: 0x11c40, Stride: 1},
+		{Lo: 0x11c50, Hi: 0x11c59, Stride: 1},
+		{Lo: 0x11c72, Hi: 0x11c8f, Stride: 1},
+		{Lo: 0x11c92, Hi: 0x11ca7, Stride: 1},
+		{Lo: 0x11ca9, Hi: 0x11cb6, Stride: 1},
+		{Lo: 0x11d00, Hi: 0x11d06, Stride: 1},
+		{Lo: 0x11d08, Hi: 0x11d09, Stride: 1},
+		{Lo: 0x11d0b, Hi: 0x11d36, Stride: 1},
+		{Lo: 0x11d3a, Hi: 0x11d3a, Stride: 1},
+		{Lo: 0x11d3c, Hi: 0x11d3d, Stride: 1},
+		{Lo: 0x11d3f, Hi: 0x11d47, Stride: 1},
+		{Lo: 0x11d50, Hi: 0x11d59, Stride: 1},
+		{Lo: 0x11d60, Hi: 0x11d65, Stride: 1},
+		{Lo: 0x11d67, Hi: 0x11d68, Stride: 1},
+		{Lo: 0x11d6a, Hi: 0x11d8e, Stride: 1},
+		{Lo: 0x11d90, Hi: 0x11d91, Stride: 1},
+		{Lo: 0x11d93, Hi: 0x11d98, Stride: 1},
+		{Lo: 0x11da0, Hi: 0x11da9, Stride: 1},
+		{Lo: 0x11ee0, Hi: 0x11ef6, Stride: 1},
+		{Lo: 0x11f00, Hi: 0x11f10, Stride: 1},
+		{Lo: 0x11f12, Hi: 0x11f3a, Stride: 1},
+		{Lo: 0x11f3e, Hi: 0x11f42, Stride: 1},
+		{Lo: 0x11f50, Hi: 0x11f59, Stride: 1},
+		{Lo: 0x11fb0, Hi: 0x11fb0, Stride: 1},
+		{Lo: 0x12000, Hi: 0x12399, Stride: 1},
+		{Lo: 0x12400, Hi: 0x1246e, Stride: 1},
+		{Lo: 0x12480, Hi: 0x12543, Stride: 1},
+		{Lo: 0x12f90, Hi: 0x12ff0, Stride: 1},
+		{Lo: 0x13000, Hi: 0x1342f, Stride: 1},
+		{Lo: 0x13440, Hi: 0x13455, Stride: 1},
+		{Lo: 0x14400, Hi: 0x14646, Stride: 1},
+		{Lo: 0x16800, Hi: 0x16a38, Stride: 1},
+		{Lo: 0x16a40, Hi: 0x16a5e, Stride: 1},
+		{Lo: 0x16a60, Hi: 0x16a69, Stride: 1},
+		{Lo: 0x16a70, Hi: 0x16abe, Stride: 1},
+		{Lo: 0x16ac0, Hi: 0x16ac9, Stride: 1},
+		{Lo: 0x16ad0, Hi: 0x16aed, Stride: 1},
+		{Lo: 0x16af0, Hi: 0x16af4, Stride: 1},
+		{Lo: 0x16b00, Hi: 0x16b36, Stride: 1},
+		{Lo: 0x16b40, Hi: 0x16b43, Stride: 1},
+		{Lo: 0x16b50, Hi: 0x16b59, Stride: 1},
+		{Lo: 0x16b63, Hi: 0x16b77, Stride: 1},
+		{Lo: 0x16b7d, Hi: 0x16b8f, Stride: 1},
+		{Lo: 0x16e40, Hi: 0x16e7f, Stride: 1},
+		{Lo: 0x16f00, Hi: 0x16f4a, Stride: 1},
+		{Lo: 0x16f4f, Hi: 0x16f87, Stride: 1},
+		{Lo: 0x16f8f, Hi: 0x16f9f, Stride: 1},
+		{Lo: 0x16fe0, Hi: 0x16fe1, Stride: 1},
+		{Lo: 0x16fe3, Hi: 0x16fe4, Stride: 1},
+		{Lo: 0x16ff0, Hi: 0x16ff1, Stride: 1},
+		{Lo: 0x17000, Hi: 0x187f7, Stride: 1},
+		{Lo: 0x18800, Hi: 0x18cd5, Stride: 1},
+		{Lo: 0x18d00, Hi: 0x18d08, Stride: 1},
+		{Lo: 0x1aff0, Hi: 0x1aff3, Stride: 1},
+		{Lo: 0x1aff5, Hi: 0x1affb, Stride: 1},
+		{Lo: 0x1affd, Hi: 0x1affe, Stride: 1},
+		{Lo: 0x1b000, Hi: 0x1b122, Stride: 1},
+		{Lo: 0x1b132, Hi: 0x1b132, Stride: 1},
+		{Lo: 0x1b150, Hi: 0x1b152, Stride: 1},
+		{Lo: 0x1b155, Hi: 0x1b155, Stride: 1},
+		{Lo: 0x1b164, Hi: 0x1b167, Stride: 1},
+		{Lo: 0x1b170, Hi: 0x1b2fb, Stride: 1},
+		{Lo: 0x1bc00, Hi: 0x1bc6a, Stride: 1},
+		{Lo: 0x1bc70, Hi: 0x1bc7c, Stride: 1},
+		{Lo: 0x1bc80, Hi: 0x1bc88, Stride: 1},
+		{Lo: 0x1bc90, Hi: 0x1bc99, Stride: 1},
+		{Lo: 0x1bc9d, Hi: 0x1bc9e, Stride: 1},
+		{Lo: 0x1cf00, Hi: 0x1cf2d, Stride: 1},
+		{Lo: 0x1cf30, Hi: 0x1cf46, Stride: 1},
+		{Lo: 0x1d165, Hi: 0x1d169, Stride: 1},
+		{Lo: 0x1d16d, Hi: 0x1d172, Stride: 1},
+		{Lo: 0x1d17b, Hi: 0x1d182, Stride: 1},
+		{Lo: 0x1d185, Hi: 0x1d18b, Stride: 1},
+		{Lo: 0x1d1aa, Hi: 0x1d1ad, Stride: 1},
+		{Lo: 0x1d242, Hi: 0x1d244, Stride: 1},
+		{Lo: 0x1d400, Hi: 0x1d454, Stride: 1},
+		{Lo: 0x1d456, Hi: 0x1d49c, Stride: 1},
+		{Lo: 0x1d49e, Hi: 0x1d49f, Stride: 1},
+		{Lo: 0x1d4a2, Hi: 0x1d4a2, Stride: 1},
+		{Lo: 0x1d4a5, Hi: 0x1d4a6, Stride: 1},
+		{Lo: 0x1d4a9, Hi: 0x1d4ac, Stride: 1},
+		{Lo: 0x1d4ae, Hi: 0x1d4b9, Stride: 1},
+		{Lo: 0x1d4bb, Hi: 0x1d4bb, Stride: 1},
+		{Lo: 0x1d4bd, Hi: 0x1d4c3, Stride: 1},
+		{Lo: 0x1d4c5, Hi: 0x1d505, Stride: 1},
+		{Lo: 0x1d507, Hi: 0x1d50a, Stride: 1},
+		{Lo: 0x1d50d, Hi: 0x1d514, Stride: 1},
+		{Lo: 0x1d516, Hi: 0x1d51c, Stride: 1},
+		{Lo: 0x1d51e, Hi: 0x1d539, Stride: 1},
+		{Lo: 0x1d53b, Hi: 0x1d53e, Stride: 1},
+		{Lo: 0x1d540, Hi: 0x1d544, Stride: 1},
+		{Lo: 0x1d546, Hi: 0x1d546, Stride: 1},
+		{Lo: 0x1d54a, Hi: 0x1d550, Stride: 1},
+		{Lo: 0x1d552, Hi: 0x1d6a5, Stride: 1},
+		{Lo: 0x1d6a8, Hi: 0x1d6c0, Stride: 1},
+		{Lo: 0x1d6c2, Hi: 0x1d6da, Stride: 1},
+		{Lo: 0x1d6dc, Hi: 0x1d6fa, Stride: 1},
+		{Lo: 0x1d6fc, Hi: 0x1d714, Stride: 1},
+		{Lo: 0x1d716, Hi: 0x1d734, Stride: 1},
+		{Lo: 0x1d736, Hi: 0x1d74e, Stride: 1},
+		{Lo: 0x1d750, Hi: 0x1d76e, Stride: 1},
+		{Lo: 0x1d770, Hi: 0x1d788, Stride: 1},
+		{Lo: 0x1d78a, Hi: 0x1d7a8, Stride: 1},
+		{Lo: 0x1d7aa, Hi: 0x1d7c2, Stride: 1},
+		{Lo: 0x1d7c4, Hi: 0x1d7cb, Stride: 1},
+		{Lo: 0x1d7ce, Hi: 0x1d7ff, Stride: 1},
+		{Lo: 0x1da00, Hi: 0x1da36, Stride: 1},
+		{Lo: 0x1da3b, Hi: 0x1da6c, Stride: 1},
+		{Lo: 0x1da75, Hi: 0x1da75, Stride: 1},
+		{Lo: 0x1da84, Hi: 0x1da84, Stride: 1},
+		{Lo: 0x1da9b, Hi: 0x1da9f, Stride: 1},
+		{Lo: 0x1daa1, Hi: 0x1daaf, Stride: 1},
+		{Lo: 0x1df00, Hi: 0x1df1e, Stride: 1},
+		{Lo: 0x1df25, Hi: 0x1df2a, Stride: 1},
+		{Lo: 0x1e000, Hi: 0x1e006, Stride: 1},
+		{Lo: 0x1e008, Hi: 0x1e018, Stride: 1},
+		{Lo: 0x1e01b, Hi: 0x1e021, Stride: 1},
+		{Lo: 0x1e023, Hi: 0x1e024, Stride: 1},
+		{Lo: 0x1e026, Hi: 0x1e02a, Stride: 1},
+		{Lo: 0x1e030, Hi: 0x1e06d, Stride: 1},
+		{Lo: 0x1e08f, Hi: 0x1e08f, Stride: 1},
+		{Lo: 0x1e100, Hi: 0x1e12c, Stride: 1},
+		{Lo: 0x1e130, Hi: 0x1e13d, Stride: 1},
+		{Lo: 0x1e140, Hi: 0x1e149, Stride: 1},
+		{Lo: 0x1e14e, Hi: 0x1e14e, Stride: 1},
+		{Lo: 0x1e290, Hi: 0x1e2ae, Stride: 1},
+		{Lo: 0x1e2c0, Hi: 0x1e2f9, Stride: 1},
+		{Lo: 0x1e4d0, Hi: 0x1e4f9, Stride: 1},
+		{Lo: 0x1e7e0, Hi: 0x1e7e6, Stride: 1},
+		{Lo: 0x1e7e8, Hi: 0x1e7eb, Stride: 1},
+		{Lo: 0x1e7ed, Hi: 0x1e7ee, Stride: 1},
+		{Lo: 0x1e7f0, Hi: 0x1e7fe, Stride: 1},
+		{Lo: 0x1e800, Hi: 0x1e8c4, Stride: 1},
+		{Lo: 0x1e8d0, Hi: 0x1e8d6, Stride: 1},
+		{Lo: 0x1e900, Hi: 0x1e94b, Stride: 1},
+		{Lo: 0x1e950, Hi: 0x1e959, Stride: 1},
+		{Lo: 0x1ee00, Hi: 0x1ee03, Stride: 1},
+		{Lo: 0x1ee05, Hi: 0x1ee1f, Stride: 1},
+		{Lo: 0x1ee21, Hi: 0x1ee22, Stride: 1},
+		{Lo: 0x1ee24, Hi: 0x1ee24, Stride: 1},
+		{Lo: 0x1ee27, Hi: 0x1ee27, Stride: 1},
+		{Lo: 0x1ee29, Hi: 0x1ee32, Stride: 1},
+		{Lo: 0x1ee34, Hi: 0x1ee37, Stride: 1},
+		{Lo: 0x1ee39, Hi: 0x1ee39, Stride: 1},
+		{Lo: 0x1ee3b, Hi: 0x1ee3b, Stride: 1},
+		{Lo: 0x1ee42, Hi: 0x1ee42, Stride: 1},
+		{Lo: 0x1ee47, Hi: 0x1ee47, Stride: 1},
+		{Lo: 0x1ee49, Hi: 0x1ee49, Stride: 1},
+		{Lo: 0x1ee4b, Hi: 0x1ee4b, Stride: 1},
+		{Lo: 0x1ee4d, Hi: 0x1ee4f, Stride: 1},
+		{Lo: 0x1ee51, Hi: 0x1ee52, Stride: 1},
+		{Lo: 0x1ee54, Hi: 0x1ee54, Stride: 1},
+		{Lo: 0x1ee57, Hi: 0x1ee57, Stride: 1},
+		{Lo: 0x1ee59, Hi: 0x1ee59, Stride: 1},
+		{Lo: 0x1ee5b, Hi: 0x1ee5b, Stride: 1},
+		{Lo: 0x1ee5d, Hi: 0x1ee5d, Stride: 1},
+		{Lo: 0x1ee5f, Hi: 0x1ee5f, Stride: 1},
+		{Lo: 0x1ee61, Hi: 0x1ee62, Stride: 1},
+		{Lo: 0x1ee64, Hi: 0x1ee64, Stride: 1},
+		{Lo: 0x1ee67, Hi: 0x1ee6a, Stride: 1},
+		{Lo: 0x1ee6c, Hi: 0x1ee72, Stride: 1},
+		{Lo: 0x1ee74, Hi: 0x1ee77, Stride: 1},
+		{Lo: 0x1ee79, Hi: 0x1ee7c, Stride: 1},
+		{Lo: 0x1ee7e, Hi: 0x1ee7e, Stride: 1},
+		{Lo: 0x1ee80, Hi: 0x1ee89, Stride: 1},
+		{Lo: 0x1ee8b, Hi: 0x1ee9b, Stride: 1},
+		{Lo: 0x1eea1, Hi: 0x1eea3, Stride: 1},
+		{Lo: 0x1eea5, Hi: 0x1eea9, Stride: 1},
+		{Lo: 0x1eeab, Hi: 0x1eebb, Stride: 1},
+		{Lo: 0x1fbf0, Hi: 0x1fbf9, Stride: 1},
+		{Lo: 0x20000, Hi: 0x2a6df, Stride: 1},
+		{Lo: 0x2a700, Hi: 0x2b739, Stride: 1},
+		{Lo: 0x2b740, Hi: 0x2b81d, Stride: 1},
+		{Lo: 0x2b820, Hi: 0x2cea1, Stride: 1},
+		{Lo: 0x2ceb0, Hi: 0x2ebe0, Stride: 1},
+		{Lo: 0x2ebf0, Hi: 0x2ee5d, Stride: 1},
+		{Lo: 0x2f800, Hi: 0x2fa1d, Stride: 1},
+		{Lo: 0x30000, Hi: 0x3134a, Stride: 1},
+		{Lo: 0x31350, Hi: 0x323af, Stride: 1},
+		{Lo: 0xe0100, Hi: 0xe01ef, Stride: 1},
+	},
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_lexer/js_lexer.go b/source/vendor/github.com/evanw/esbuild/internal/js_lexer/js_lexer.go
new file mode 100644
index 0000000..3776f97
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_lexer/js_lexer.go
@@ -0,0 +1,2665 @@
+package js_lexer
+
+// The lexer converts a source file to a stream of tokens. Unlike many
+// compilers, esbuild does not run the lexer to completion before the parser is
+// started. Instead, the lexer is called repeatedly by the parser as the parser
+// parses the file. This is because many tokens are context-sensitive and need
+// high-level information from the parser. Examples are regular expression
+// literals and JSX elements.
+//
+// For efficiency, the text associated with textual tokens is stored in two
+// separate ways depending on the token. Identifiers use UTF-8 encoding which
+// allows them to be slices of the input file without allocating extra memory.
+// Strings use UTF-16 encoding so they can represent unicode surrogates
+// accurately.
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type T uint8
+
+// If you add a new token, remember to add it to "tokenToString" too
+const (
+	TEndOfFile T = iota
+	TSyntaxError
+
+	// "#!/usr/bin/env node"
+	THashbang
+
+	// Literals
+	TNoSubstitutionTemplateLiteral // Contents are in lexer.StringLiteral ([]uint16)
+	TNumericLiteral                // Contents are in lexer.Number (float64)
+	TStringLiteral                 // Contents are in lexer.StringLiteral ([]uint16)
+	TBigIntegerLiteral             // Contents are in lexer.Identifier (string)
+
+	// Pseudo-literals
+	TTemplateHead   // Contents are in lexer.StringLiteral ([]uint16)
+	TTemplateMiddle // Contents are in lexer.StringLiteral ([]uint16)
+	TTemplateTail   // Contents are in lexer.StringLiteral ([]uint16)
+
+	// Punctuation
+	TAmpersand
+	TAmpersandAmpersand
+	TAsterisk
+	TAsteriskAsterisk
+	TAt
+	TBar
+	TBarBar
+	TCaret
+	TCloseBrace
+	TCloseBracket
+	TCloseParen
+	TColon
+	TComma
+	TDot
+	TDotDotDot
+	TEqualsEquals
+	TEqualsEqualsEquals
+	TEqualsGreaterThan
+	TExclamation
+	TExclamationEquals
+	TExclamationEqualsEquals
+	TGreaterThan
+	TGreaterThanEquals
+	TGreaterThanGreaterThan
+	TGreaterThanGreaterThanGreaterThan
+	TLessThan
+	TLessThanEquals
+	TLessThanLessThan
+	TMinus
+	TMinusMinus
+	TOpenBrace
+	TOpenBracket
+	TOpenParen
+	TPercent
+	TPlus
+	TPlusPlus
+	TQuestion
+	TQuestionDot
+	TQuestionQuestion
+	TSemicolon
+	TSlash
+	TTilde
+
+	// Assignments (keep in sync with IsAssign() below)
+	TAmpersandAmpersandEquals
+	TAmpersandEquals
+	TAsteriskAsteriskEquals
+	TAsteriskEquals
+	TBarBarEquals
+	TBarEquals
+	TCaretEquals
+	TEquals
+	TGreaterThanGreaterThanEquals
+	TGreaterThanGreaterThanGreaterThanEquals
+	TLessThanLessThanEquals
+	TMinusEquals
+	TPercentEquals
+	TPlusEquals
+	TQuestionQuestionEquals
+	TSlashEquals
+
+	// Class-private fields and methods
+	TPrivateIdentifier
+
+	// Identifiers
+	TIdentifier     // Contents are in lexer.Identifier (string)
+	TEscapedKeyword // A keyword that has been escaped as an identifer
+
+	// Reserved words
+	TBreak
+	TCase
+	TCatch
+	TClass
+	TConst
+	TContinue
+	TDebugger
+	TDefault
+	TDelete
+	TDo
+	TElse
+	TEnum
+	TExport
+	TExtends
+	TFalse
+	TFinally
+	TFor
+	TFunction
+	TIf
+	TImport
+	TIn
+	TInstanceof
+	TNew
+	TNull
+	TReturn
+	TSuper
+	TSwitch
+	TThis
+	TThrow
+	TTrue
+	TTry
+	TTypeof
+	TVar
+	TVoid
+	TWhile
+	TWith
+)
+
+func (t T) IsAssign() bool {
+	return t >= TAmpersandAmpersandEquals && t <= TSlashEquals
+}
+
+var Keywords = map[string]T{
+	// Reserved words
+	"break":      TBreak,
+	"case":       TCase,
+	"catch":      TCatch,
+	"class":      TClass,
+	"const":      TConst,
+	"continue":   TContinue,
+	"debugger":   TDebugger,
+	"default":    TDefault,
+	"delete":     TDelete,
+	"do":         TDo,
+	"else":       TElse,
+	"enum":       TEnum,
+	"export":     TExport,
+	"extends":    TExtends,
+	"false":      TFalse,
+	"finally":    TFinally,
+	"for":        TFor,
+	"function":   TFunction,
+	"if":         TIf,
+	"import":     TImport,
+	"in":         TIn,
+	"instanceof": TInstanceof,
+	"new":        TNew,
+	"null":       TNull,
+	"return":     TReturn,
+	"super":      TSuper,
+	"switch":     TSwitch,
+	"this":       TThis,
+	"throw":      TThrow,
+	"true":       TTrue,
+	"try":        TTry,
+	"typeof":     TTypeof,
+	"var":        TVar,
+	"void":       TVoid,
+	"while":      TWhile,
+	"with":       TWith,
+}
+
+var StrictModeReservedWords = map[string]bool{
+	"implements": true,
+	"interface":  true,
+	"let":        true,
+	"package":    true,
+	"private":    true,
+	"protected":  true,
+	"public":     true,
+	"static":     true,
+	"yield":      true,
+}
+
+// This represents a string that is maybe a substring of the current file's
+// "source.Contents" string. The point of doing this is that if it is a
+// substring (the common case), then we can represent it more efficiently.
+//
+// For compactness and performance, the JS AST represents identifiers as a
+// symbol reference instead of as a string. However, we need to track the
+// string between the first pass and the second pass because the string is only
+// resolved to a symbol in the second pass. To avoid allocating extra memory
+// to store the string, we instead use an index+length slice of the original JS
+// source code. That index is what "Start" represents here. The length is just
+// "len(String)".
+//
+// Set "Start" to invalid (the zero value) if "String" is not a substring of
+// "source.Contents". This is the case for escaped identifiers. For example,
+// the identifier "fo\u006f" would be "MaybeSubstring{String: "foo"}". It's
+// critical that any code changing the "String" also set "Start" to the zero
+// value, which is best done by just overwriting the whole "MaybeSubstring".
+//
+// The substring range used to be recovered automatically from the string but
+// that relied on the Go "unsafe" package which can hypothetically break under
+// certain Go compiler optimization passes, so it has been removed and replaced
+// with this more error-prone approach that doesn't use "unsafe".
+type MaybeSubstring struct {
+	String string
+	Start  ast.Index32
+}
+
+type Lexer struct {
+	LegalCommentsBeforeToken     []logger.Range
+	CommentsBeforeToken          []logger.Range
+	AllComments                  []logger.Range
+	Identifier                   MaybeSubstring
+	log                          logger.Log
+	source                       logger.Source
+	JSXFactoryPragmaComment      logger.Span
+	JSXFragmentPragmaComment     logger.Span
+	JSXRuntimePragmaComment      logger.Span
+	JSXImportSourcePragmaComment logger.Span
+	SourceMappingURL             logger.Span
+	BadArrowInTSXSuggestion      string
+
+	// Escape sequences in string literals are decoded lazily because they are
+	// not interpreted inside tagged templates, and tagged templates can contain
+	// invalid escape sequences. If the decoded array is nil, the encoded value
+	// should be passed to "tryToDecodeEscapeSequences" first.
+	decodedStringLiteralOrNil []uint16
+	encodedStringLiteralText  string
+
+	errorSuffix string
+	tracker     logger.LineColumnTracker
+
+	encodedStringLiteralStart int
+
+	Number                          float64
+	current                         int
+	start                           int
+	end                             int
+	ApproximateNewlineCount         int
+	CouldBeBadArrowInTSX            int
+	BadArrowInTSXRange              logger.Range
+	LegacyOctalLoc                  logger.Loc
+	AwaitKeywordLoc                 logger.Loc
+	FnOrArrowStartLoc               logger.Loc
+	PreviousBackslashQuoteInJSX     logger.Range
+	LegacyHTMLCommentRange          logger.Range
+	codePoint                       rune
+	prevErrorLoc                    logger.Loc
+	json                            JSONFlavor
+	Token                           T
+	ts                              config.TSOptions
+	HasNewlineBefore                bool
+	HasCommentBefore                CommentBefore
+	IsLegacyOctalLiteral            bool
+	PrevTokenWasAwaitKeyword        bool
+	rescanCloseBraceAsTemplateToken bool
+	forGlobalName                   bool
+
+	// The log is disabled during speculative scans that may backtrack
+	IsLogDisabled bool
+}
+
+type CommentBefore uint8
+
+const (
+	PureCommentBefore CommentBefore = 1 << iota
+	KeyCommentBefore
+	NoSideEffectsCommentBefore
+)
+
+type LexerPanic struct{}
+
+func NewLexer(log logger.Log, source logger.Source, ts config.TSOptions) Lexer {
+	lexer := Lexer{
+		log:               log,
+		source:            source,
+		tracker:           logger.MakeLineColumnTracker(&source),
+		prevErrorLoc:      logger.Loc{Start: -1},
+		FnOrArrowStartLoc: logger.Loc{Start: -1},
+		ts:                ts,
+		json:              NotJSON,
+	}
+	lexer.step()
+	lexer.Next()
+	return lexer
+}
+
+func NewLexerGlobalName(log logger.Log, source logger.Source) Lexer {
+	lexer := Lexer{
+		log:               log,
+		source:            source,
+		tracker:           logger.MakeLineColumnTracker(&source),
+		prevErrorLoc:      logger.Loc{Start: -1},
+		FnOrArrowStartLoc: logger.Loc{Start: -1},
+		forGlobalName:     true,
+		json:              NotJSON,
+	}
+	lexer.step()
+	lexer.Next()
+	return lexer
+}
+
+type JSONFlavor uint8
+
+const (
+	// Specification: https://json.org/
+	JSON JSONFlavor = iota
+
+	// TypeScript's JSON superset is not documented but appears to allow:
+	// - Comments: https://github.com/microsoft/TypeScript/issues/4987
+	// - Trailing commas
+	// - Full JS number syntax
+	TSConfigJSON
+
+	// This is used by the JavaScript lexer
+	NotJSON
+)
+
+func NewLexerJSON(log logger.Log, source logger.Source, json JSONFlavor, errorSuffix string) Lexer {
+	lexer := Lexer{
+		log:               log,
+		source:            source,
+		tracker:           logger.MakeLineColumnTracker(&source),
+		prevErrorLoc:      logger.Loc{Start: -1},
+		FnOrArrowStartLoc: logger.Loc{Start: -1},
+		errorSuffix:       errorSuffix,
+		json:              json,
+	}
+	lexer.step()
+	lexer.Next()
+	return lexer
+}
+
+func (lexer *Lexer) Loc() logger.Loc {
+	return logger.Loc{Start: int32(lexer.start)}
+}
+
+func (lexer *Lexer) Range() logger.Range {
+	return logger.Range{Loc: logger.Loc{Start: int32(lexer.start)}, Len: int32(lexer.end - lexer.start)}
+}
+
+func (lexer *Lexer) Raw() string {
+	return lexer.source.Contents[lexer.start:lexer.end]
+}
+
+func (lexer *Lexer) rawIdentifier() MaybeSubstring {
+	return MaybeSubstring{lexer.Raw(), ast.MakeIndex32(uint32(lexer.start))}
+}
+
+func (lexer *Lexer) StringLiteral() []uint16 {
+	if lexer.decodedStringLiteralOrNil == nil {
+		// Lazily decode escape sequences if needed
+		if decoded, ok, end := lexer.tryToDecodeEscapeSequences(lexer.encodedStringLiteralStart, lexer.encodedStringLiteralText, true /* reportErrors */); !ok {
+			lexer.end = end
+			lexer.SyntaxError()
+		} else {
+			lexer.decodedStringLiteralOrNil = decoded
+		}
+	}
+	return lexer.decodedStringLiteralOrNil
+}
+
+func (lexer *Lexer) CookedAndRawTemplateContents() ([]uint16, string) {
+	var raw string
+
+	switch lexer.Token {
+	case TNoSubstitutionTemplateLiteral, TTemplateTail:
+		// "`x`" or "}x`"
+		raw = lexer.source.Contents[lexer.start+1 : lexer.end-1]
+
+	case TTemplateHead, TTemplateMiddle:
+		// "`x${" or "}x${"
+		raw = lexer.source.Contents[lexer.start+1 : lexer.end-2]
+	}
+
+	if strings.IndexByte(raw, '\r') != -1 {
+		// From the specification:
+		//
+		// 11.8.6.1 Static Semantics: TV and TRV
+		//
+		// TV excludes the code units of LineContinuation while TRV includes
+		// them. <CR><LF> and <CR> LineTerminatorSequences are normalized to
+		// <LF> for both TV and TRV. An explicit EscapeSequence is needed to
+		// include a <CR> or <CR><LF> sequence.
+
+		bytes := []byte(raw)
+		end := 0
+		i := 0
+
+		for i < len(bytes) {
+			c := bytes[i]
+			i++
+
+			if c == '\r' {
+				// Convert '\r\n' into '\n'
+				if i < len(bytes) && bytes[i] == '\n' {
+					i++
+				}
+
+				// Convert '\r' into '\n'
+				c = '\n'
+			}
+
+			bytes[end] = c
+			end++
+		}
+
+		raw = string(bytes[:end])
+	}
+
+	// This will return nil on failure, which will become "undefined" for the tag
+	cooked, _, _ := lexer.tryToDecodeEscapeSequences(lexer.start+1, raw, false /* reportErrors */)
+	return cooked, raw
+}
+
+func (lexer *Lexer) IsIdentifierOrKeyword() bool {
+	return lexer.Token >= TIdentifier
+}
+
+func (lexer *Lexer) IsContextualKeyword(text string) bool {
+	return lexer.Token == TIdentifier && lexer.Raw() == text
+}
+
+func (lexer *Lexer) ExpectContextualKeyword(text string) {
+	if !lexer.IsContextualKeyword(text) {
+		lexer.ExpectedString(fmt.Sprintf("%q", text))
+	}
+	lexer.Next()
+}
+
+func (lexer *Lexer) SyntaxError() {
+	loc := logger.Loc{Start: int32(lexer.end)}
+	message := "Unexpected end of file"
+	if lexer.end < len(lexer.source.Contents) {
+		c, _ := utf8.DecodeRuneInString(lexer.source.Contents[lexer.end:])
+		if c < 0x20 {
+			message = fmt.Sprintf("Syntax error \"\\x%02X\"", c)
+		} else if c >= 0x80 {
+			message = fmt.Sprintf("Syntax error \"\\u{%x}\"", c)
+		} else if c != '"' {
+			message = fmt.Sprintf("Syntax error \"%c\"", c)
+		} else {
+			message = "Syntax error '\"'"
+		}
+	}
+	lexer.addRangeError(logger.Range{Loc: loc}, message)
+	panic(LexerPanic{})
+}
+
+func (lexer *Lexer) ExpectedString(text string) {
+	// Provide a friendly error message about "await" without "async"
+	if lexer.PrevTokenWasAwaitKeyword {
+		var notes []logger.MsgData
+		if lexer.FnOrArrowStartLoc.Start != -1 {
+			note := lexer.tracker.MsgData(logger.Range{Loc: lexer.FnOrArrowStartLoc},
+				"Consider adding the \"async\" keyword here:")
+			note.Location.Suggestion = "async"
+			notes = []logger.MsgData{note}
+		}
+		lexer.AddRangeErrorWithNotes(RangeOfIdentifier(lexer.source, lexer.AwaitKeywordLoc),
+			"\"await\" can only be used inside an \"async\" function",
+			notes)
+		panic(LexerPanic{})
+	}
+
+	found := fmt.Sprintf("%q", lexer.Raw())
+	if lexer.start == len(lexer.source.Contents) {
+		found = "end of file"
+	}
+
+	suggestion := ""
+	if strings.HasPrefix(text, "\"") && strings.HasSuffix(text, "\"") {
+		suggestion = text[1 : len(text)-1]
+	}
+
+	lexer.addRangeErrorWithSuggestion(lexer.Range(), fmt.Sprintf("Expected %s%s but found %s", text, lexer.errorSuffix, found), suggestion)
+	panic(LexerPanic{})
+}
+
+func (lexer *Lexer) Expected(token T) {
+	if text, ok := tokenToString[token]; ok {
+		lexer.ExpectedString(text)
+	} else {
+		lexer.Unexpected()
+	}
+}
+
+func (lexer *Lexer) Unexpected() {
+	found := fmt.Sprintf("%q", lexer.Raw())
+	if lexer.start == len(lexer.source.Contents) {
+		found = "end of file"
+	}
+	lexer.addRangeError(lexer.Range(), fmt.Sprintf("Unexpected %s%s", found, lexer.errorSuffix))
+	panic(LexerPanic{})
+}
+
+func (lexer *Lexer) Expect(token T) {
+	if lexer.Token != token {
+		lexer.Expected(token)
+	}
+	lexer.Next()
+}
+
+func (lexer *Lexer) ExpectOrInsertSemicolon() {
+	if lexer.Token == TSemicolon || (!lexer.HasNewlineBefore &&
+		lexer.Token != TCloseBrace && lexer.Token != TEndOfFile) {
+		lexer.Expect(TSemicolon)
+	}
+}
+
+// This parses a single "<" token. If that is the first part of a longer token,
+// this function splits off the first "<" and leaves the remainder of the
+// current token as another, smaller token. For example, "<<=" becomes "<=".
+func (lexer *Lexer) ExpectLessThan(isInsideJSXElement bool) {
+	switch lexer.Token {
+	case TLessThan:
+		if isInsideJSXElement {
+			lexer.NextInsideJSXElement()
+		} else {
+			lexer.Next()
+		}
+
+	case TLessThanEquals:
+		lexer.Token = TEquals
+		lexer.start++
+		lexer.maybeExpandEquals()
+
+	case TLessThanLessThan:
+		lexer.Token = TLessThan
+		lexer.start++
+
+	case TLessThanLessThanEquals:
+		lexer.Token = TLessThanEquals
+		lexer.start++
+
+	default:
+		lexer.Expected(TLessThan)
+	}
+}
+
+// This parses a single ">" token. If that is the first part of a longer token,
+// this function splits off the first ">" and leaves the remainder of the
+// current token as another, smaller token. For example, ">>=" becomes ">=".
+func (lexer *Lexer) ExpectGreaterThan(isInsideJSXElement bool) {
+	switch lexer.Token {
+	case TGreaterThan:
+		if isInsideJSXElement {
+			lexer.NextInsideJSXElement()
+		} else {
+			lexer.Next()
+		}
+
+	case TGreaterThanEquals:
+		lexer.Token = TEquals
+		lexer.start++
+		lexer.maybeExpandEquals()
+
+	case TGreaterThanGreaterThan:
+		lexer.Token = TGreaterThan
+		lexer.start++
+
+	case TGreaterThanGreaterThanEquals:
+		lexer.Token = TGreaterThanEquals
+		lexer.start++
+
+	case TGreaterThanGreaterThanGreaterThan:
+		lexer.Token = TGreaterThanGreaterThan
+		lexer.start++
+
+	case TGreaterThanGreaterThanGreaterThanEquals:
+		lexer.Token = TGreaterThanGreaterThanEquals
+		lexer.start++
+
+	default:
+		lexer.Expected(TGreaterThan)
+	}
+}
+
+func (lexer *Lexer) maybeExpandEquals() {
+	switch lexer.codePoint {
+	case '>':
+		// "=" + ">" = "=>"
+		lexer.Token = TEqualsGreaterThan
+		lexer.step()
+
+	case '=':
+		// "=" + "=" = "=="
+		lexer.Token = TEqualsEquals
+		lexer.step()
+
+		if lexer.Token == '=' {
+			// "=" + "==" = "==="
+			lexer.Token = TEqualsEqualsEquals
+			lexer.step()
+		}
+	}
+}
+
+func RangeOfIdentifier(source logger.Source, loc logger.Loc) logger.Range {
+	text := source.Contents[loc.Start:]
+	if len(text) == 0 {
+		return logger.Range{Loc: loc, Len: 0}
+	}
+
+	i := 0
+	c, _ := utf8.DecodeRuneInString(text[i:])
+
+	// Handle private names
+	if c == '#' {
+		i++
+		c, _ = utf8.DecodeRuneInString(text[i:])
+	}
+
+	if js_ast.IsIdentifierStart(c) || c == '\\' {
+		// Search for the end of the identifier
+		for i < len(text) {
+			c2, width2 := utf8.DecodeRuneInString(text[i:])
+			if c2 == '\\' {
+				i += width2
+
+				// Skip over bracketed unicode escapes such as "\u{10000}"
+				if i+2 < len(text) && text[i] == 'u' && text[i+1] == '{' {
+					i += 2
+					for i < len(text) {
+						if text[i] == '}' {
+							i++
+							break
+						}
+						i++
+					}
+				}
+			} else if !js_ast.IsIdentifierContinue(c2) {
+				return logger.Range{Loc: loc, Len: int32(i)}
+			} else {
+				i += width2
+			}
+		}
+	}
+
+	// When minifying, this identifier may have originally been a string
+	return source.RangeOfString(loc)
+}
+
+type KeyOrValue uint8
+
+const (
+	KeyRange KeyOrValue = iota
+	ValueRange
+	KeyAndValueRange
+)
+
+func RangeOfImportAssertOrWith(source logger.Source, assertOrWith ast.AssertOrWithEntry, which KeyOrValue) logger.Range {
+	if which == KeyRange {
+		return RangeOfIdentifier(source, assertOrWith.KeyLoc)
+	}
+	if which == ValueRange {
+		return source.RangeOfString(assertOrWith.ValueLoc)
+	}
+	loc := RangeOfIdentifier(source, assertOrWith.KeyLoc).Loc
+	return logger.Range{Loc: loc, Len: source.RangeOfString(assertOrWith.ValueLoc).End() - loc.Start}
+}
+
+func (lexer *Lexer) ExpectJSXElementChild(token T) {
+	if lexer.Token != token {
+		lexer.Expected(token)
+	}
+	lexer.NextJSXElementChild()
+}
+
+func (lexer *Lexer) NextJSXElementChild() {
+	lexer.HasNewlineBefore = false
+	originalStart := lexer.end
+
+	for {
+		lexer.start = lexer.end
+		lexer.Token = 0
+
+		switch lexer.codePoint {
+		case -1: // This indicates the end of the file
+			lexer.Token = TEndOfFile
+
+		case '{':
+			lexer.step()
+			lexer.Token = TOpenBrace
+
+		case '<':
+			lexer.step()
+			lexer.Token = TLessThan
+
+		default:
+			needsFixing := false
+
+		stringLiteral:
+			for {
+				switch lexer.codePoint {
+				case -1, '{', '<':
+					// Stop when the string ends
+					break stringLiteral
+
+				case '&', '\r', '\n', '\u2028', '\u2029':
+					// This needs fixing if it has an entity or if it's a multi-line string
+					needsFixing = true
+					lexer.step()
+
+				case '}', '>':
+					// These technically aren't valid JSX: https://facebook.github.io/jsx/
+					//
+					//   JSXTextCharacter :
+					//     * SourceCharacter but not one of {, <, > or }
+					//
+					var replacement string
+					if lexer.codePoint == '}' {
+						replacement = "{'}'}"
+					} else {
+						replacement = "{'>'}"
+					}
+					msg := logger.Msg{
+						Kind: logger.Error,
+						Data: lexer.tracker.MsgData(logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}, Len: 1},
+							fmt.Sprintf("The character \"%c\" is not valid inside a JSX element", lexer.codePoint)),
+					}
+
+					// Attempt to provide a better error message if this looks like an arrow function
+					if lexer.CouldBeBadArrowInTSX > 0 && lexer.codePoint == '>' && lexer.source.Contents[lexer.end-1] == '=' {
+						msg.Notes = []logger.MsgData{lexer.tracker.MsgData(lexer.BadArrowInTSXRange,
+							"TypeScript's TSX syntax interprets arrow functions with a single generic type parameter as an opening JSX element. "+
+								"If you want it to be interpreted as an arrow function instead, you need to add a trailing comma after the type parameter to disambiguate:")}
+						msg.Notes[0].Location.Suggestion = lexer.BadArrowInTSXSuggestion
+					} else {
+						msg.Notes = []logger.MsgData{{Text: fmt.Sprintf("Did you mean to escape it as %q instead?", replacement)}}
+						msg.Data.Location.Suggestion = replacement
+						if !lexer.ts.Parse {
+							// TypeScript treats this as an error but Babel doesn't treat this
+							// as an error yet, so allow this in JS for now. Babel version 8
+							// was supposed to be released in 2021 but was never released. If
+							// it's released in the future, this can be changed to an error too.
+							//
+							// More context:
+							// * TypeScript change: https://github.com/microsoft/TypeScript/issues/36341
+							// * Babel 8 change: https://github.com/babel/babel/issues/11042
+							// * Babel 8 release: https://github.com/babel/babel/issues/10746
+							//
+							msg.Kind = logger.Warning
+						}
+					}
+
+					lexer.log.AddMsg(msg)
+					lexer.step()
+
+				default:
+					// Non-ASCII strings need the slow path
+					if lexer.codePoint >= 0x80 {
+						needsFixing = true
+					}
+					lexer.step()
+				}
+			}
+
+			lexer.Token = TStringLiteral
+			text := lexer.source.Contents[originalStart:lexer.end]
+
+			if needsFixing {
+				// Slow path
+				lexer.decodedStringLiteralOrNil = fixWhitespaceAndDecodeJSXEntities(text)
+			} else {
+				// Fast path
+				n := len(text)
+				copy := make([]uint16, n)
+				for i := 0; i < n; i++ {
+					copy[i] = uint16(text[i])
+				}
+				lexer.decodedStringLiteralOrNil = copy
+			}
+		}
+
+		break
+	}
+}
+
+func (lexer *Lexer) ExpectInsideJSXElement(token T) {
+	if lexer.Token != token {
+		lexer.Expected(token)
+	}
+	lexer.NextInsideJSXElement()
+}
+
+func (lexer *Lexer) NextInsideJSXElement() {
+	lexer.HasNewlineBefore = false
+
+	for {
+		lexer.start = lexer.end
+		lexer.Token = 0
+
+		switch lexer.codePoint {
+		case -1: // This indicates the end of the file
+			lexer.Token = TEndOfFile
+
+		case '\r', '\n', '\u2028', '\u2029':
+			lexer.step()
+			lexer.HasNewlineBefore = true
+			continue
+
+		case '\t', ' ':
+			lexer.step()
+			continue
+
+		case '.':
+			lexer.step()
+			lexer.Token = TDot
+
+		case ':':
+			lexer.step()
+			lexer.Token = TColon
+
+		case '=':
+			lexer.step()
+			lexer.Token = TEquals
+
+		case '{':
+			lexer.step()
+			lexer.Token = TOpenBrace
+
+		case '}':
+			lexer.step()
+			lexer.Token = TCloseBrace
+
+		case '<':
+			lexer.step()
+			lexer.Token = TLessThan
+
+		case '>':
+			lexer.step()
+			lexer.Token = TGreaterThan
+
+		case '/':
+			// '/' or '//' or '/* ... */'
+			lexer.step()
+			switch lexer.codePoint {
+			case '/':
+			singleLineComment:
+				for {
+					lexer.step()
+					switch lexer.codePoint {
+					case '\r', '\n', '\u2028', '\u2029':
+						break singleLineComment
+
+					case -1: // This indicates the end of the file
+						break singleLineComment
+					}
+				}
+				continue
+
+			case '*':
+				lexer.step()
+				startRange := lexer.Range()
+			multiLineComment:
+				for {
+					switch lexer.codePoint {
+					case '*':
+						lexer.step()
+						if lexer.codePoint == '/' {
+							lexer.step()
+							break multiLineComment
+						}
+
+					case '\r', '\n', '\u2028', '\u2029':
+						lexer.step()
+						lexer.HasNewlineBefore = true
+
+					case -1: // This indicates the end of the file
+						lexer.start = lexer.end
+						lexer.AddRangeErrorWithNotes(logger.Range{Loc: lexer.Loc()}, "Expected \"*/\" to terminate multi-line comment",
+							[]logger.MsgData{lexer.tracker.MsgData(startRange, "The multi-line comment starts here:")})
+						panic(LexerPanic{})
+
+					default:
+						lexer.step()
+					}
+				}
+				continue
+
+			default:
+				lexer.Token = TSlash
+			}
+
+		case '\'', '"':
+			var backslash logger.Range
+			quote := lexer.codePoint
+			needsDecode := false
+			lexer.step()
+
+		stringLiteral:
+			for {
+				switch lexer.codePoint {
+				case -1: // This indicates the end of the file
+					lexer.SyntaxError()
+
+				case '&':
+					needsDecode = true
+					lexer.step()
+
+				case '\\':
+					backslash = logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}, Len: 1}
+					lexer.step()
+					continue
+
+				case quote:
+					if backslash.Len > 0 {
+						backslash.Len++
+						lexer.PreviousBackslashQuoteInJSX = backslash
+					}
+					lexer.step()
+					break stringLiteral
+
+				default:
+					// Non-ASCII strings need the slow path
+					if lexer.codePoint >= 0x80 {
+						needsDecode = true
+					}
+					lexer.step()
+				}
+				backslash = logger.Range{}
+			}
+
+			lexer.Token = TStringLiteral
+			text := lexer.source.Contents[lexer.start+1 : lexer.end-1]
+
+			if needsDecode {
+				// Slow path
+				lexer.decodedStringLiteralOrNil = decodeJSXEntities([]uint16{}, text)
+			} else {
+				// Fast path
+				n := len(text)
+				copy := make([]uint16, n)
+				for i := 0; i < n; i++ {
+					copy[i] = uint16(text[i])
+				}
+				lexer.decodedStringLiteralOrNil = copy
+			}
+
+		default:
+			// Check for unusual whitespace characters
+			if js_ast.IsWhitespace(lexer.codePoint) {
+				lexer.step()
+				continue
+			}
+
+			if js_ast.IsIdentifierStart(lexer.codePoint) {
+				lexer.step()
+				for js_ast.IsIdentifierContinue(lexer.codePoint) || lexer.codePoint == '-' {
+					lexer.step()
+				}
+
+				lexer.Identifier = lexer.rawIdentifier()
+				lexer.Token = TIdentifier
+				break
+			}
+
+			lexer.end = lexer.current
+			lexer.Token = TSyntaxError
+		}
+
+		return
+	}
+}
+
+func (lexer *Lexer) Next() {
+	lexer.HasNewlineBefore = lexer.end == 0
+	lexer.HasCommentBefore = 0
+	lexer.PrevTokenWasAwaitKeyword = false
+	lexer.LegalCommentsBeforeToken = lexer.LegalCommentsBeforeToken[:0]
+	lexer.CommentsBeforeToken = lexer.CommentsBeforeToken[:0]
+
+	for {
+		lexer.start = lexer.end
+		lexer.Token = 0
+
+		switch lexer.codePoint {
+		case -1: // This indicates the end of the file
+			lexer.Token = TEndOfFile
+
+		case '#':
+			if lexer.start == 0 && strings.HasPrefix(lexer.source.Contents, "#!") {
+				// "#!/usr/bin/env node"
+				lexer.Token = THashbang
+			hashbang:
+				for {
+					lexer.step()
+					switch lexer.codePoint {
+					case '\r', '\n', '\u2028', '\u2029':
+						break hashbang
+
+					case -1: // This indicates the end of the file
+						break hashbang
+					}
+				}
+				lexer.Identifier = lexer.rawIdentifier()
+			} else {
+				// "#foo"
+				lexer.step()
+				if lexer.codePoint == '\\' {
+					lexer.Identifier, _ = lexer.scanIdentifierWithEscapes(privateIdentifier)
+				} else {
+					if !js_ast.IsIdentifierStart(lexer.codePoint) {
+						lexer.SyntaxError()
+					}
+					lexer.step()
+					for js_ast.IsIdentifierContinue(lexer.codePoint) {
+						lexer.step()
+					}
+					if lexer.codePoint == '\\' {
+						lexer.Identifier, _ = lexer.scanIdentifierWithEscapes(privateIdentifier)
+					} else {
+						lexer.Identifier = lexer.rawIdentifier()
+					}
+				}
+				lexer.Token = TPrivateIdentifier
+			}
+
+		case '\r', '\n', '\u2028', '\u2029':
+			lexer.step()
+			lexer.HasNewlineBefore = true
+			continue
+
+		case '\t', ' ':
+			lexer.step()
+			continue
+
+		case '(':
+			lexer.step()
+			lexer.Token = TOpenParen
+
+		case ')':
+			lexer.step()
+			lexer.Token = TCloseParen
+
+		case '[':
+			lexer.step()
+			lexer.Token = TOpenBracket
+
+		case ']':
+			lexer.step()
+			lexer.Token = TCloseBracket
+
+		case '{':
+			lexer.step()
+			lexer.Token = TOpenBrace
+
+		case '}':
+			lexer.step()
+			lexer.Token = TCloseBrace
+
+		case ',':
+			lexer.step()
+			lexer.Token = TComma
+
+		case ':':
+			lexer.step()
+			lexer.Token = TColon
+
+		case ';':
+			lexer.step()
+			lexer.Token = TSemicolon
+
+		case '@':
+			lexer.step()
+			lexer.Token = TAt
+
+		case '~':
+			lexer.step()
+			lexer.Token = TTilde
+
+		case '?':
+			// '?' or '?.' or '??' or '??='
+			lexer.step()
+			switch lexer.codePoint {
+			case '?':
+				lexer.step()
+				switch lexer.codePoint {
+				case '=':
+					lexer.step()
+					lexer.Token = TQuestionQuestionEquals
+				default:
+					lexer.Token = TQuestionQuestion
+				}
+			case '.':
+				lexer.Token = TQuestion
+				current := lexer.current
+				contents := lexer.source.Contents
+
+				// Lookahead to disambiguate with 'a?.1:b'
+				if current < len(contents) {
+					c := contents[current]
+					if c < '0' || c > '9' {
+						lexer.step()
+						lexer.Token = TQuestionDot
+					}
+				}
+			default:
+				lexer.Token = TQuestion
+			}
+
+		case '%':
+			// '%' or '%='
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TPercentEquals
+			default:
+				lexer.Token = TPercent
+			}
+
+		case '&':
+			// '&' or '&=' or '&&' or '&&='
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TAmpersandEquals
+			case '&':
+				lexer.step()
+				switch lexer.codePoint {
+				case '=':
+					lexer.step()
+					lexer.Token = TAmpersandAmpersandEquals
+				default:
+					lexer.Token = TAmpersandAmpersand
+				}
+			default:
+				lexer.Token = TAmpersand
+			}
+
+		case '|':
+			// '|' or '|=' or '||' or '||='
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TBarEquals
+			case '|':
+				lexer.step()
+				switch lexer.codePoint {
+				case '=':
+					lexer.step()
+					lexer.Token = TBarBarEquals
+				default:
+					lexer.Token = TBarBar
+				}
+			default:
+				lexer.Token = TBar
+			}
+
+		case '^':
+			// '^' or '^='
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TCaretEquals
+			default:
+				lexer.Token = TCaret
+			}
+
+		case '+':
+			// '+' or '+=' or '++'
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TPlusEquals
+			case '+':
+				lexer.step()
+				lexer.Token = TPlusPlus
+			default:
+				lexer.Token = TPlus
+			}
+
+		case '-':
+			// '-' or '-=' or '--' or '-->'
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TMinusEquals
+			case '-':
+				lexer.step()
+
+				// Handle legacy HTML-style comments
+				if lexer.codePoint == '>' && lexer.HasNewlineBefore {
+					lexer.step()
+					lexer.LegacyHTMLCommentRange = lexer.Range()
+					lexer.log.AddID(logger.MsgID_JS_HTMLCommentInJS, logger.Warning, &lexer.tracker, lexer.Range(),
+						"Treating \"-->\" as the start of a legacy HTML single-line comment")
+				singleLineHTMLCloseComment:
+					for {
+						switch lexer.codePoint {
+						case '\r', '\n', '\u2028', '\u2029':
+							break singleLineHTMLCloseComment
+
+						case -1: // This indicates the end of the file
+							break singleLineHTMLCloseComment
+						}
+						lexer.step()
+					}
+					continue
+				}
+
+				lexer.Token = TMinusMinus
+			default:
+				lexer.Token = TMinus
+				if lexer.json == JSON && lexer.codePoint != '.' && (lexer.codePoint < '0' || lexer.codePoint > '9') {
+					lexer.Unexpected()
+				}
+			}
+
+		case '*':
+			// '*' or '*=' or '**' or '**='
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TAsteriskEquals
+
+			case '*':
+				lexer.step()
+				switch lexer.codePoint {
+				case '=':
+					lexer.step()
+					lexer.Token = TAsteriskAsteriskEquals
+
+				default:
+					lexer.Token = TAsteriskAsterisk
+				}
+
+			default:
+				lexer.Token = TAsterisk
+			}
+
+		case '/':
+			// '/' or '/=' or '//' or '/* ... */'
+			lexer.step()
+			if lexer.forGlobalName {
+				lexer.Token = TSlash
+				break
+			}
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TSlashEquals
+
+			case '/':
+			singleLineComment:
+				for {
+					lexer.step()
+					switch lexer.codePoint {
+					case '\r', '\n', '\u2028', '\u2029':
+						break singleLineComment
+
+					case -1: // This indicates the end of the file
+						break singleLineComment
+					}
+				}
+				if lexer.json == JSON {
+					lexer.addRangeError(lexer.Range(), "JSON does not support comments")
+				}
+				lexer.scanCommentText()
+				continue
+
+			case '*':
+				lexer.step()
+				startRange := lexer.Range()
+			multiLineComment:
+				for {
+					switch lexer.codePoint {
+					case '*':
+						lexer.step()
+						if lexer.codePoint == '/' {
+							lexer.step()
+							break multiLineComment
+						}
+
+					case '\r', '\n', '\u2028', '\u2029':
+						lexer.step()
+						lexer.HasNewlineBefore = true
+
+					case -1: // This indicates the end of the file
+						lexer.start = lexer.end
+						lexer.AddRangeErrorWithNotes(logger.Range{Loc: lexer.Loc()}, "Expected \"*/\" to terminate multi-line comment",
+							[]logger.MsgData{lexer.tracker.MsgData(startRange, "The multi-line comment starts here:")})
+						panic(LexerPanic{})
+
+					default:
+						lexer.step()
+					}
+				}
+				if lexer.json == JSON {
+					lexer.addRangeError(lexer.Range(), "JSON does not support comments")
+				}
+				lexer.scanCommentText()
+				continue
+
+			default:
+				lexer.Token = TSlash
+			}
+
+		case '=':
+			// '=' or '=>' or '==' or '==='
+			lexer.step()
+			switch lexer.codePoint {
+			case '>':
+				lexer.step()
+				lexer.Token = TEqualsGreaterThan
+			case '=':
+				lexer.step()
+				switch lexer.codePoint {
+				case '=':
+					lexer.step()
+					lexer.Token = TEqualsEqualsEquals
+				default:
+					lexer.Token = TEqualsEquals
+				}
+			default:
+				lexer.Token = TEquals
+			}
+
+		case '<':
+			// '<' or '<<' or '<=' or '<<=' or '<!--'
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TLessThanEquals
+			case '<':
+				lexer.step()
+				switch lexer.codePoint {
+				case '=':
+					lexer.step()
+					lexer.Token = TLessThanLessThanEquals
+				default:
+					lexer.Token = TLessThanLessThan
+				}
+
+				// Handle legacy HTML-style comments
+			case '!':
+				if strings.HasPrefix(lexer.source.Contents[lexer.start:], "<!--") {
+					lexer.step()
+					lexer.step()
+					lexer.step()
+					lexer.LegacyHTMLCommentRange = lexer.Range()
+					lexer.log.AddID(logger.MsgID_JS_HTMLCommentInJS, logger.Warning, &lexer.tracker, lexer.Range(),
+						"Treating \"<!--\" as the start of a legacy HTML single-line comment")
+				singleLineHTMLOpenComment:
+					for {
+						switch lexer.codePoint {
+						case '\r', '\n', '\u2028', '\u2029':
+							break singleLineHTMLOpenComment
+
+						case -1: // This indicates the end of the file
+							break singleLineHTMLOpenComment
+						}
+						lexer.step()
+					}
+					continue
+				}
+
+				lexer.Token = TLessThan
+
+			default:
+				lexer.Token = TLessThan
+			}
+
+		case '>':
+			// '>' or '>>' or '>>>' or '>=' or '>>=' or '>>>='
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				lexer.Token = TGreaterThanEquals
+			case '>':
+				lexer.step()
+				switch lexer.codePoint {
+				case '=':
+					lexer.step()
+					lexer.Token = TGreaterThanGreaterThanEquals
+				case '>':
+					lexer.step()
+					switch lexer.codePoint {
+					case '=':
+						lexer.step()
+						lexer.Token = TGreaterThanGreaterThanGreaterThanEquals
+					default:
+						lexer.Token = TGreaterThanGreaterThanGreaterThan
+					}
+				default:
+					lexer.Token = TGreaterThanGreaterThan
+				}
+			default:
+				lexer.Token = TGreaterThan
+			}
+
+		case '!':
+			// '!' or '!=' or '!=='
+			lexer.step()
+			switch lexer.codePoint {
+			case '=':
+				lexer.step()
+				switch lexer.codePoint {
+				case '=':
+					lexer.step()
+					lexer.Token = TExclamationEqualsEquals
+				default:
+					lexer.Token = TExclamationEquals
+				}
+			default:
+				lexer.Token = TExclamation
+			}
+
+		case '\'', '"', '`':
+			quote := lexer.codePoint
+			needsSlowPath := false
+			suffixLen := 1
+
+			if quote != '`' {
+				lexer.Token = TStringLiteral
+			} else if lexer.rescanCloseBraceAsTemplateToken {
+				lexer.Token = TTemplateTail
+			} else {
+				lexer.Token = TNoSubstitutionTemplateLiteral
+			}
+			lexer.step()
+
+		stringLiteral:
+			for {
+				switch lexer.codePoint {
+				case '\\':
+					needsSlowPath = true
+					lexer.step()
+
+					// Handle Windows CRLF
+					if lexer.codePoint == '\r' && lexer.json != JSON {
+						lexer.step()
+						if lexer.codePoint == '\n' {
+							lexer.step()
+						}
+						continue
+					}
+
+				case -1: // This indicates the end of the file
+					lexer.addRangeError(logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}}, "Unterminated string literal")
+					panic(LexerPanic{})
+
+				case '\r':
+					if quote != '`' {
+						lexer.addRangeError(logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}}, "Unterminated string literal")
+						panic(LexerPanic{})
+					}
+
+					// Template literals require newline normalization
+					needsSlowPath = true
+
+				case '\n':
+					if quote != '`' {
+						lexer.addRangeError(logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}}, "Unterminated string literal")
+						panic(LexerPanic{})
+					}
+
+				case '$':
+					if quote == '`' {
+						lexer.step()
+						if lexer.codePoint == '{' {
+							suffixLen = 2
+							lexer.step()
+							if lexer.rescanCloseBraceAsTemplateToken {
+								lexer.Token = TTemplateMiddle
+							} else {
+								lexer.Token = TTemplateHead
+							}
+							break stringLiteral
+						}
+						continue stringLiteral
+					}
+
+				case quote:
+					lexer.step()
+					break stringLiteral
+
+				default:
+					// Non-ASCII strings need the slow path
+					if lexer.codePoint >= 0x80 {
+						needsSlowPath = true
+					} else if lexer.json == JSON && lexer.codePoint < 0x20 {
+						lexer.SyntaxError()
+					}
+				}
+				lexer.step()
+			}
+
+			text := lexer.source.Contents[lexer.start+1 : lexer.end-suffixLen]
+
+			if needsSlowPath {
+				// Slow path
+				lexer.decodedStringLiteralOrNil = nil
+				lexer.encodedStringLiteralStart = lexer.start + 1
+				lexer.encodedStringLiteralText = text
+			} else {
+				// Fast path
+				n := len(text)
+				copy := make([]uint16, n)
+				for i := 0; i < n; i++ {
+					copy[i] = uint16(text[i])
+				}
+				lexer.decodedStringLiteralOrNil = copy
+			}
+
+			if quote == '\'' && (lexer.json == JSON || lexer.json == TSConfigJSON) {
+				lexer.addRangeError(lexer.Range(), "JSON strings must use double quotes")
+			}
+
+		// Note: This case is hot in profiles
+		case '_', '$',
+			'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
+			'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
+			'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M',
+			'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z':
+			// This is a fast path for long ASCII identifiers. Doing this in a loop
+			// first instead of doing "step()" and "js_ast.IsIdentifierContinue()" like we
+			// do after this is noticeably faster in the common case of ASCII-only
+			// text. For example, doing this sped up end-to-end consuming of a large
+			// TypeScript type declaration file from 97ms to 79ms (around 20% faster).
+			contents := lexer.source.Contents
+			n := len(contents)
+			i := lexer.current
+			for i < n {
+				c := contents[i]
+				if (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') && c != '_' && c != '$' {
+					break
+				}
+				i++
+			}
+			lexer.current = i
+
+			// Now do the slow path for any remaining non-ASCII identifier characters
+			lexer.step()
+			if lexer.codePoint >= 0x80 {
+				for js_ast.IsIdentifierContinue(lexer.codePoint) {
+					lexer.step()
+				}
+			}
+
+			// If there's a slash, then we're in the extra-slow (and extra-rare) case
+			// where the identifier has embedded escapes
+			if lexer.codePoint == '\\' {
+				lexer.Identifier, lexer.Token = lexer.scanIdentifierWithEscapes(normalIdentifier)
+				break
+			}
+
+			// Otherwise (if there was no escape) we can slice the code verbatim
+			lexer.Identifier = lexer.rawIdentifier()
+			lexer.Token = Keywords[lexer.Raw()]
+			if lexer.Token == 0 {
+				lexer.Token = TIdentifier
+			}
+
+		case '\\':
+			lexer.Identifier, lexer.Token = lexer.scanIdentifierWithEscapes(normalIdentifier)
+
+		case '.', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			lexer.parseNumericLiteralOrDot()
+
+		default:
+			// Check for unusual whitespace characters
+			if js_ast.IsWhitespace(lexer.codePoint) {
+				lexer.step()
+				continue
+			}
+
+			if js_ast.IsIdentifierStart(lexer.codePoint) {
+				lexer.step()
+				for js_ast.IsIdentifierContinue(lexer.codePoint) {
+					lexer.step()
+				}
+				if lexer.codePoint == '\\' {
+					lexer.Identifier, lexer.Token = lexer.scanIdentifierWithEscapes(normalIdentifier)
+				} else {
+					lexer.Token = TIdentifier
+					lexer.Identifier = lexer.rawIdentifier()
+				}
+				break
+			}
+
+			lexer.end = lexer.current
+			lexer.Token = TSyntaxError
+		}
+
+		return
+	}
+}
+
+type identifierKind uint8
+
+const (
+	normalIdentifier identifierKind = iota
+	privateIdentifier
+)
+
+// This is an edge case that doesn't really exist in the wild, so it doesn't
+// need to be as fast as possible.
+func (lexer *Lexer) scanIdentifierWithEscapes(kind identifierKind) (MaybeSubstring, T) {
+	// First pass: scan over the identifier to see how long it is
+	for {
+		// Scan a unicode escape sequence. There is at least one because that's
+		// what caused us to get on this slow path in the first place.
+		if lexer.codePoint == '\\' {
+			lexer.step()
+			if lexer.codePoint != 'u' {
+				lexer.SyntaxError()
+			}
+			lexer.step()
+			if lexer.codePoint == '{' {
+				// Variable-length
+				lexer.step()
+				for lexer.codePoint != '}' {
+					switch lexer.codePoint {
+					case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+						'a', 'b', 'c', 'd', 'e', 'f',
+						'A', 'B', 'C', 'D', 'E', 'F':
+						lexer.step()
+					default:
+						lexer.SyntaxError()
+					}
+				}
+				lexer.step()
+			} else {
+				// Fixed-length
+				for j := 0; j < 4; j++ {
+					switch lexer.codePoint {
+					case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
+						'a', 'b', 'c', 'd', 'e', 'f',
+						'A', 'B', 'C', 'D', 'E', 'F':
+						lexer.step()
+					default:
+						lexer.SyntaxError()
+					}
+				}
+			}
+			continue
+		}
+
+		// Stop when we reach the end of the identifier
+		if !js_ast.IsIdentifierContinue(lexer.codePoint) {
+			break
+		}
+		lexer.step()
+	}
+
+	// Second pass: re-use our existing escape sequence parser
+	decoded, ok, end := lexer.tryToDecodeEscapeSequences(lexer.start, lexer.Raw(), true /* reportErrors */)
+	if !ok {
+		lexer.end = end
+		lexer.SyntaxError()
+	}
+	text := string(helpers.UTF16ToString(decoded))
+
+	// Even though it was escaped, it must still be a valid identifier
+	identifier := text
+	if kind == privateIdentifier {
+		identifier = identifier[1:] // Skip over the "#"
+	}
+	if !js_ast.IsIdentifier(identifier) {
+		lexer.addRangeError(logger.Range{Loc: logger.Loc{Start: int32(lexer.start)}, Len: int32(lexer.end - lexer.start)},
+			fmt.Sprintf("Invalid identifier: %q", text))
+	}
+
+	// Escaped keywords are not allowed to work as actual keywords, but they are
+	// allowed wherever we allow identifiers or keywords. For example:
+	//
+	//   // This is an error (equivalent to "var var;")
+	//   var \u0076\u0061\u0072;
+	//
+	//   // This is an error (equivalent to "var foo;" except for this rule)
+	//   \u0076\u0061\u0072 foo;
+	//
+	//   // This is an fine (equivalent to "foo.var;")
+	//   foo.\u0076\u0061\u0072;
+	//
+	if Keywords[text] != 0 {
+		return MaybeSubstring{String: text}, TEscapedKeyword
+	} else {
+		return MaybeSubstring{String: text}, TIdentifier
+	}
+}
+
+func (lexer *Lexer) parseNumericLiteralOrDot() {
+	// Number or dot
+	first := lexer.codePoint
+	lexer.step()
+
+	// Dot without a digit after it
+	if first == '.' && (lexer.codePoint < '0' || lexer.codePoint > '9') {
+		// "..."
+		if lexer.codePoint == '.' &&
+			lexer.current < len(lexer.source.Contents) &&
+			lexer.source.Contents[lexer.current] == '.' {
+			lexer.step()
+			lexer.step()
+			lexer.Token = TDotDotDot
+			return
+		}
+
+		// "."
+		lexer.Token = TDot
+		return
+	}
+
+	underscoreCount := 0
+	lastUnderscoreEnd := 0
+	hasDotOrExponent := first == '.'
+	isMissingDigitAfterDot := false
+	base := 0.0
+	lexer.IsLegacyOctalLiteral = false
+
+	// Assume this is a number, but potentially change to a bigint later
+	lexer.Token = TNumericLiteral
+
+	// Check for binary, octal, or hexadecimal literal
+	if first == '0' {
+		switch lexer.codePoint {
+		case 'b', 'B':
+			base = 2
+
+		case 'o', 'O':
+			base = 8
+
+		case 'x', 'X':
+			base = 16
+
+		case '0', '1', '2', '3', '4', '5', '6', '7', '_':
+			base = 8
+			lexer.IsLegacyOctalLiteral = true
+
+		case '8', '9':
+			lexer.IsLegacyOctalLiteral = true
+		}
+	}
+
+	if base != 0 {
+		// Integer literal
+		isFirst := true
+		isInvalidLegacyOctalLiteral := false
+		lexer.Number = 0
+		if !lexer.IsLegacyOctalLiteral {
+			lexer.step()
+		}
+
+	integerLiteral:
+		for {
+			switch lexer.codePoint {
+			case '_':
+				// Cannot have multiple underscores in a row
+				if lastUnderscoreEnd > 0 && lexer.end == lastUnderscoreEnd+1 {
+					lexer.SyntaxError()
+				}
+
+				// The first digit must exist
+				if isFirst || lexer.IsLegacyOctalLiteral {
+					lexer.SyntaxError()
+				}
+
+				lastUnderscoreEnd = lexer.end
+				underscoreCount++
+
+			case '0', '1':
+				lexer.Number = lexer.Number*base + float64(lexer.codePoint-'0')
+
+			case '2', '3', '4', '5', '6', '7':
+				if base == 2 {
+					lexer.SyntaxError()
+				}
+				lexer.Number = lexer.Number*base + float64(lexer.codePoint-'0')
+
+			case '8', '9':
+				if lexer.IsLegacyOctalLiteral {
+					isInvalidLegacyOctalLiteral = true
+				} else if base < 10 {
+					lexer.SyntaxError()
+				}
+				lexer.Number = lexer.Number*base + float64(lexer.codePoint-'0')
+
+			case 'A', 'B', 'C', 'D', 'E', 'F':
+				if base != 16 {
+					lexer.SyntaxError()
+				}
+				lexer.Number = lexer.Number*base + float64(lexer.codePoint+10-'A')
+
+			case 'a', 'b', 'c', 'd', 'e', 'f':
+				if base != 16 {
+					lexer.SyntaxError()
+				}
+				lexer.Number = lexer.Number*base + float64(lexer.codePoint+10-'a')
+
+			default:
+				// The first digit must exist
+				if isFirst {
+					lexer.SyntaxError()
+				}
+
+				break integerLiteral
+			}
+
+			lexer.step()
+			isFirst = false
+		}
+
+		isBigIntegerLiteral := lexer.codePoint == 'n' && !hasDotOrExponent
+
+		// Slow path: do we need to re-scan the input as text?
+		if isBigIntegerLiteral || isInvalidLegacyOctalLiteral {
+			text := lexer.rawIdentifier()
+
+			// Can't use a leading zero for bigint literals
+			if isBigIntegerLiteral && lexer.IsLegacyOctalLiteral {
+				lexer.SyntaxError()
+			}
+
+			// Filter out underscores
+			if underscoreCount > 0 {
+				bytes := make([]byte, 0, len(text.String)-underscoreCount)
+				for i := 0; i < len(text.String); i++ {
+					c := text.String[i]
+					if c != '_' {
+						bytes = append(bytes, c)
+					}
+				}
+				text = MaybeSubstring{String: string(bytes)}
+			}
+
+			// Store bigints as text to avoid precision loss
+			if isBigIntegerLiteral {
+				lexer.Identifier = text
+			} else if isInvalidLegacyOctalLiteral {
+				// Legacy octal literals may turn out to be a base 10 literal after all
+				value, _ := strconv.ParseFloat(text.String, 64)
+				lexer.Number = value
+			}
+		}
+	} else {
+		// Floating-point literal
+		isInvalidLegacyOctalLiteral := first == '0' && (lexer.codePoint == '8' || lexer.codePoint == '9')
+
+		// Initial digits
+		for {
+			if lexer.codePoint < '0' || lexer.codePoint > '9' {
+				if lexer.codePoint != '_' {
+					break
+				}
+
+				// Cannot have multiple underscores in a row
+				if lastUnderscoreEnd > 0 && lexer.end == lastUnderscoreEnd+1 {
+					lexer.SyntaxError()
+				}
+
+				// The specification forbids underscores in this case
+				if isInvalidLegacyOctalLiteral {
+					lexer.SyntaxError()
+				}
+
+				lastUnderscoreEnd = lexer.end
+				underscoreCount++
+			}
+			lexer.step()
+		}
+
+		// Fractional digits
+		if first != '.' && lexer.codePoint == '.' {
+			// An underscore must not come last
+			if lastUnderscoreEnd > 0 && lexer.end == lastUnderscoreEnd+1 {
+				lexer.end--
+				lexer.SyntaxError()
+			}
+
+			hasDotOrExponent = true
+			lexer.step()
+			if lexer.codePoint == '_' {
+				lexer.SyntaxError()
+			}
+			isMissingDigitAfterDot = true
+			for {
+				if lexer.codePoint >= '0' && lexer.codePoint <= '9' {
+					isMissingDigitAfterDot = false
+				} else {
+					if lexer.codePoint != '_' {
+						break
+					}
+
+					// Cannot have multiple underscores in a row
+					if lastUnderscoreEnd > 0 && lexer.end == lastUnderscoreEnd+1 {
+						lexer.SyntaxError()
+					}
+
+					lastUnderscoreEnd = lexer.end
+					underscoreCount++
+				}
+				lexer.step()
+			}
+		}
+
+		// Exponent
+		if lexer.codePoint == 'e' || lexer.codePoint == 'E' {
+			// An underscore must not come last
+			if lastUnderscoreEnd > 0 && lexer.end == lastUnderscoreEnd+1 {
+				lexer.end--
+				lexer.SyntaxError()
+			}
+
+			hasDotOrExponent = true
+			lexer.step()
+			if lexer.codePoint == '+' || lexer.codePoint == '-' {
+				lexer.step()
+			}
+			if lexer.codePoint < '0' || lexer.codePoint > '9' {
+				lexer.SyntaxError()
+			}
+			for {
+				if lexer.codePoint < '0' || lexer.codePoint > '9' {
+					if lexer.codePoint != '_' {
+						break
+					}
+
+					// Cannot have multiple underscores in a row
+					if lastUnderscoreEnd > 0 && lexer.end == lastUnderscoreEnd+1 {
+						lexer.SyntaxError()
+					}
+
+					lastUnderscoreEnd = lexer.end
+					underscoreCount++
+				}
+				lexer.step()
+			}
+		}
+
+		// Take a slice of the text to parse
+		text := lexer.rawIdentifier()
+
+		// Filter out underscores
+		if underscoreCount > 0 {
+			bytes := make([]byte, 0, len(text.String)-underscoreCount)
+			for i := 0; i < len(text.String); i++ {
+				c := text.String[i]
+				if c != '_' {
+					bytes = append(bytes, c)
+				}
+			}
+			text = MaybeSubstring{String: string(bytes)}
+		}
+
+		if lexer.codePoint == 'n' && !hasDotOrExponent {
+			// The only bigint literal that can start with 0 is "0n"
+			if len(text.String) > 1 && first == '0' {
+				lexer.SyntaxError()
+			}
+
+			// Store bigints as text to avoid precision loss
+			lexer.Identifier = text
+		} else if !hasDotOrExponent && lexer.end-lexer.start < 10 {
+			// Parse a 32-bit integer (very fast path)
+			var number uint32 = 0
+			for _, c := range text.String {
+				number = number*10 + uint32(c-'0')
+			}
+			lexer.Number = float64(number)
+		} else {
+			// Parse a double-precision floating-point number
+			value, _ := strconv.ParseFloat(text.String, 64)
+			lexer.Number = value
+		}
+	}
+
+	// An underscore must not come last
+	if lastUnderscoreEnd > 0 && lexer.end == lastUnderscoreEnd+1 {
+		lexer.end--
+		lexer.SyntaxError()
+	}
+
+	// Handle bigint literals after the underscore-at-end check above
+	if lexer.codePoint == 'n' && !hasDotOrExponent {
+		lexer.Token = TBigIntegerLiteral
+		lexer.step()
+	}
+
+	// Identifiers can't occur immediately after numbers
+	if js_ast.IsIdentifierStart(lexer.codePoint) {
+		lexer.SyntaxError()
+	}
+
+	// None of these are allowed in JSON
+	if lexer.json == JSON && (first == '.' || base != 0 || underscoreCount > 0 || isMissingDigitAfterDot) {
+		lexer.Unexpected()
+	}
+}
+
+func (lexer *Lexer) ScanRegExp() {
+	validateAndStep := func() {
+		if lexer.codePoint == '\\' {
+			lexer.step()
+		}
+
+		switch lexer.codePoint {
+		case -1, // This indicates the end of the file
+			'\r', '\n', 0x2028, 0x2029: // Newlines aren't allowed in regular expressions
+			lexer.addRangeError(logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}}, "Unterminated regular expression")
+			panic(LexerPanic{})
+
+		default:
+			lexer.step()
+		}
+	}
+
+	for {
+		switch lexer.codePoint {
+		case '/':
+			lexer.step()
+			bits := uint32(0)
+			for js_ast.IsIdentifierContinue(lexer.codePoint) {
+				switch lexer.codePoint {
+				case 'd', 'g', 'i', 'm', 's', 'u', 'v', 'y':
+					bit := uint32(1) << uint32(lexer.codePoint-'a')
+					if (bit & bits) != 0 {
+						// Reject duplicate flags
+						r1 := logger.Range{Loc: logger.Loc{Start: int32(lexer.start)}, Len: 1}
+						r2 := logger.Range{Loc: logger.Loc{Start: int32(lexer.end)}, Len: 1}
+						for r1.Loc.Start < r2.Loc.Start && lexer.source.Contents[r1.Loc.Start] != byte(lexer.codePoint) {
+							r1.Loc.Start++
+						}
+						lexer.log.AddErrorWithNotes(&lexer.tracker, r2,
+							fmt.Sprintf("Duplicate flag \"%c\" in regular expression", lexer.codePoint),
+							[]logger.MsgData{lexer.tracker.MsgData(r1,
+								fmt.Sprintf("The first \"%c\" was here:", lexer.codePoint))})
+					} else {
+						bits |= bit
+					}
+					lexer.step()
+
+				default:
+					lexer.SyntaxError()
+				}
+			}
+			return
+
+		case '[':
+			lexer.step()
+			for lexer.codePoint != ']' {
+				validateAndStep()
+			}
+			lexer.step()
+
+		default:
+			validateAndStep()
+		}
+	}
+}
+
+func decodeJSXEntities(decoded []uint16, text string) []uint16 {
+	i := 0
+
+	for i < len(text) {
+		c, width := utf8.DecodeRuneInString(text[i:])
+		i += width
+
+		if c == '&' {
+			length := strings.IndexByte(text[i:], ';')
+			if length > 0 {
+				entity := text[i : i+length]
+				if entity[0] == '#' {
+					number := entity[1:]
+					base := 10
+					if len(number) > 1 && number[0] == 'x' {
+						number = number[1:]
+						base = 16
+					}
+					if value, err := strconv.ParseInt(number, base, 32); err == nil {
+						c = rune(value)
+						i += length + 1
+					}
+				} else if value, ok := jsxEntity[entity]; ok {
+					c = value
+					i += length + 1
+				}
+			}
+		}
+
+		if c <= 0xFFFF {
+			decoded = append(decoded, uint16(c))
+		} else {
+			c -= 0x10000
+			decoded = append(decoded, uint16(0xD800+((c>>10)&0x3FF)), uint16(0xDC00+(c&0x3FF)))
+		}
+	}
+
+	return decoded
+}
+
+func fixWhitespaceAndDecodeJSXEntities(text string) []uint16 {
+	afterLastNonWhitespace := -1
+	decoded := []uint16{}
+	i := 0
+
+	// Trim whitespace off the end of the first line
+	firstNonWhitespace := 0
+
+	// Split into lines
+	for i < len(text) {
+		c, width := utf8.DecodeRuneInString(text[i:])
+
+		switch c {
+		case '\r', '\n', '\u2028', '\u2029':
+			// Newline
+			if firstNonWhitespace != -1 && afterLastNonWhitespace != -1 {
+				if len(decoded) > 0 {
+					decoded = append(decoded, ' ')
+				}
+
+				// Trim whitespace off the start and end of lines in the middle
+				decoded = decodeJSXEntities(decoded, text[firstNonWhitespace:afterLastNonWhitespace])
+			}
+
+			// Reset for the next line
+			firstNonWhitespace = -1
+
+		case '\t', ' ':
+			// Whitespace
+
+		default:
+			// Check for unusual whitespace characters
+			if !js_ast.IsWhitespace(c) {
+				afterLastNonWhitespace = i + width
+				if firstNonWhitespace == -1 {
+					firstNonWhitespace = i
+				}
+			}
+		}
+
+		i += width
+	}
+
+	if firstNonWhitespace != -1 {
+		if len(decoded) > 0 {
+			decoded = append(decoded, ' ')
+		}
+
+		// Trim whitespace off the start of the last line
+		decoded = decodeJSXEntities(decoded, text[firstNonWhitespace:])
+	}
+
+	return decoded
+}
+
+// If this fails, this returns "nil, false, end" where "end" is the value to
+// store to "lexer.end" before calling "lexer.SyntaxError()" if relevant
+func (lexer *Lexer) tryToDecodeEscapeSequences(start int, text string, reportErrors bool) ([]uint16, bool, int) {
+	decoded := []uint16{}
+	i := 0
+
+	for i < len(text) {
+		c, width := utf8.DecodeRuneInString(text[i:])
+		i += width
+
+		switch c {
+		case '\r':
+			// From the specification:
+			//
+			// 11.8.6.1 Static Semantics: TV and TRV
+			//
+			// TV excludes the code units of LineContinuation while TRV includes
+			// them. <CR><LF> and <CR> LineTerminatorSequences are normalized to
+			// <LF> for both TV and TRV. An explicit EscapeSequence is needed to
+			// include a <CR> or <CR><LF> sequence.
+
+			// Convert '\r\n' into '\n'
+			if i < len(text) && text[i] == '\n' {
+				i++
+			}
+
+			// Convert '\r' into '\n'
+			decoded = append(decoded, '\n')
+			continue
+
+		case '\\':
+			c2, width2 := utf8.DecodeRuneInString(text[i:])
+			i += width2
+
+			switch c2 {
+			case 'b':
+				decoded = append(decoded, '\b')
+				continue
+
+			case 'f':
+				decoded = append(decoded, '\f')
+				continue
+
+			case 'n':
+				decoded = append(decoded, '\n')
+				continue
+
+			case 'r':
+				decoded = append(decoded, '\r')
+				continue
+
+			case 't':
+				decoded = append(decoded, '\t')
+				continue
+
+			case 'v':
+				if lexer.json == JSON {
+					return nil, false, start + i - width2
+				}
+
+				decoded = append(decoded, '\v')
+				continue
+
+			case '0', '1', '2', '3', '4', '5', '6', '7':
+				octalStart := i - 2
+				if lexer.json == JSON {
+					return nil, false, start + i - width2
+				}
+
+				// 1-3 digit octal
+				isBad := false
+				value := c2 - '0'
+				c3, width3 := utf8.DecodeRuneInString(text[i:])
+				switch c3 {
+				case '0', '1', '2', '3', '4', '5', '6', '7':
+					value = value*8 + c3 - '0'
+					i += width3
+					c4, width4 := utf8.DecodeRuneInString(text[i:])
+					switch c4 {
+					case '0', '1', '2', '3', '4', '5', '6', '7':
+						temp := value*8 + c4 - '0'
+						if temp < 256 {
+							value = temp
+							i += width4
+						}
+					case '8', '9':
+						isBad = true
+					}
+				case '8', '9':
+					isBad = true
+				}
+				c = value
+
+				// Forbid the use of octal literals other than "\0"
+				if isBad || text[octalStart:i] != "\\0" {
+					lexer.LegacyOctalLoc = logger.Loc{Start: int32(start + octalStart)}
+				}
+
+			case '8', '9':
+				c = c2
+
+				// Forbid the invalid octal literals "\8" and "\9"
+				lexer.LegacyOctalLoc = logger.Loc{Start: int32(start + i - 2)}
+
+			case 'x':
+				if lexer.json == JSON {
+					return nil, false, start + i - width2
+				}
+
+				// 2-digit hexadecimal
+				value := '\000'
+				for j := 0; j < 2; j++ {
+					c3, width3 := utf8.DecodeRuneInString(text[i:])
+					i += width3
+					switch c3 {
+					case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+						value = value*16 | (c3 - '0')
+					case 'a', 'b', 'c', 'd', 'e', 'f':
+						value = value*16 | (c3 + 10 - 'a')
+					case 'A', 'B', 'C', 'D', 'E', 'F':
+						value = value*16 | (c3 + 10 - 'A')
+					default:
+						return nil, false, start + i - width3
+					}
+				}
+				c = value
+
+			case 'u':
+				// Unicode
+				value := '\000'
+
+				// Check the first character
+				c3, width3 := utf8.DecodeRuneInString(text[i:])
+				i += width3
+
+				if c3 == '{' {
+					if lexer.json == JSON {
+						return nil, false, start + i - width2
+					}
+
+					// Variable-length
+					hexStart := i - width - width2 - width3
+					isFirst := true
+					isOutOfRange := false
+				variableLength:
+					for {
+						c3, width3 = utf8.DecodeRuneInString(text[i:])
+						i += width3
+
+						switch c3 {
+						case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+							value = value*16 | (c3 - '0')
+						case 'a', 'b', 'c', 'd', 'e', 'f':
+							value = value*16 | (c3 + 10 - 'a')
+						case 'A', 'B', 'C', 'D', 'E', 'F':
+							value = value*16 | (c3 + 10 - 'A')
+						case '}':
+							if isFirst {
+								return nil, false, start + i - width3
+							}
+							break variableLength
+						default:
+							return nil, false, start + i - width3
+						}
+
+						if value > utf8.MaxRune {
+							isOutOfRange = true
+						}
+
+						isFirst = false
+					}
+
+					if isOutOfRange && reportErrors {
+						lexer.addRangeError(logger.Range{Loc: logger.Loc{Start: int32(start + hexStart)}, Len: int32(i - hexStart)},
+							"Unicode escape sequence is out of range")
+						panic(LexerPanic{})
+					}
+				} else {
+					// Fixed-length
+					for j := 0; j < 4; j++ {
+						switch c3 {
+						case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+							value = value*16 | (c3 - '0')
+						case 'a', 'b', 'c', 'd', 'e', 'f':
+							value = value*16 | (c3 + 10 - 'a')
+						case 'A', 'B', 'C', 'D', 'E', 'F':
+							value = value*16 | (c3 + 10 - 'A')
+						default:
+							return nil, false, start + i - width3
+						}
+
+						if j < 3 {
+							c3, width3 = utf8.DecodeRuneInString(text[i:])
+							i += width3
+						}
+					}
+				}
+				c = value
+
+			case '\r':
+				if lexer.json == JSON {
+					return nil, false, start + i - width2
+				}
+
+				// Ignore line continuations. A line continuation is not an escaped newline.
+				if i < len(text) && text[i] == '\n' {
+					// Make sure Windows CRLF counts as a single newline
+					i++
+				}
+				continue
+
+			case '\n', '\u2028', '\u2029':
+				if lexer.json == JSON {
+					return nil, false, start + i - width2
+				}
+
+				// Ignore line continuations. A line continuation is not an escaped newline.
+				continue
+
+			default:
+				if lexer.json == JSON {
+					switch c2 {
+					case '"', '\\', '/':
+
+					default:
+						return nil, false, start + i - width2
+					}
+				}
+
+				c = c2
+			}
+		}
+
+		if c <= 0xFFFF {
+			decoded = append(decoded, uint16(c))
+		} else {
+			c -= 0x10000
+			decoded = append(decoded, uint16(0xD800+((c>>10)&0x3FF)), uint16(0xDC00+(c&0x3FF)))
+		}
+	}
+
+	return decoded, true, 0
+}
+
+func (lexer *Lexer) RescanCloseBraceAsTemplateToken() {
+	if lexer.Token != TCloseBrace {
+		lexer.Expected(TCloseBrace)
+	}
+
+	lexer.rescanCloseBraceAsTemplateToken = true
+	lexer.codePoint = '`'
+	lexer.current = lexer.end
+	lexer.end -= 1
+	lexer.Next()
+	lexer.rescanCloseBraceAsTemplateToken = false
+}
+
+func (lexer *Lexer) step() {
+	codePoint, width := utf8.DecodeRuneInString(lexer.source.Contents[lexer.current:])
+
+	// Use -1 to indicate the end of the file
+	if width == 0 {
+		codePoint = -1
+	}
+
+	// Track the approximate number of newlines in the file so we can preallocate
+	// the line offset table in the printer for source maps. The line offset table
+	// is the #1 highest allocation in the heap profile, so this is worth doing.
+	// This count is approximate because it handles "\n" and "\r\n" (the common
+	// cases) but not "\r" or "\u2028" or "\u2029". Getting this wrong is harmless
+	// because it's only a preallocation. The array will just grow if it's too small.
+	if codePoint == '\n' {
+		lexer.ApproximateNewlineCount++
+	}
+
+	lexer.codePoint = codePoint
+	lexer.end = lexer.current
+	lexer.current += width
+}
+
+func (lexer *Lexer) addRangeError(r logger.Range, text string) {
+	// Don't report multiple errors in the same spot
+	if r.Loc == lexer.prevErrorLoc {
+		return
+	}
+	lexer.prevErrorLoc = r.Loc
+
+	if !lexer.IsLogDisabled {
+		lexer.log.AddError(&lexer.tracker, r, text)
+	}
+}
+
+func (lexer *Lexer) addRangeErrorWithSuggestion(r logger.Range, text string, suggestion string) {
+	// Don't report multiple errors in the same spot
+	if r.Loc == lexer.prevErrorLoc {
+		return
+	}
+	lexer.prevErrorLoc = r.Loc
+
+	if !lexer.IsLogDisabled {
+		data := lexer.tracker.MsgData(r, text)
+		data.Location.Suggestion = suggestion
+		lexer.log.AddMsg(logger.Msg{Kind: logger.Error, Data: data})
+	}
+}
+
+func (lexer *Lexer) AddRangeErrorWithNotes(r logger.Range, text string, notes []logger.MsgData) {
+	// Don't report multiple errors in the same spot
+	if r.Loc == lexer.prevErrorLoc {
+		return
+	}
+	lexer.prevErrorLoc = r.Loc
+
+	if !lexer.IsLogDisabled {
+		lexer.log.AddErrorWithNotes(&lexer.tracker, r, text, notes)
+	}
+}
+
+func hasPrefixWithWordBoundary(text string, prefix string) bool {
+	t := len(text)
+	p := len(prefix)
+	if t >= p && text[0:p] == prefix {
+		if t == p {
+			return true
+		}
+		c, _ := utf8.DecodeRuneInString(text[p:])
+		if !js_ast.IsIdentifierContinue(c) {
+			return true
+		}
+	}
+	return false
+}
+
+type pragmaArg uint8
+
+const (
+	pragmaNoSpaceFirst pragmaArg = iota
+	pragmaSkipSpaceFirst
+)
+
+func scanForPragmaArg(kind pragmaArg, start int, pragma string, text string) (logger.Span, bool) {
+	text = text[len(pragma):]
+	start += len(pragma)
+
+	if text == "" {
+		return logger.Span{}, false
+	}
+
+	// One or more whitespace characters
+	c, width := utf8.DecodeRuneInString(text)
+	if kind == pragmaSkipSpaceFirst {
+		if !js_ast.IsWhitespace(c) {
+			return logger.Span{}, false
+		}
+		for js_ast.IsWhitespace(c) {
+			text = text[width:]
+			start += width
+			if text == "" {
+				return logger.Span{}, false
+			}
+			c, width = utf8.DecodeRuneInString(text)
+		}
+	}
+
+	// One or more non-whitespace characters
+	i := 0
+	for !js_ast.IsWhitespace(c) {
+		i += width
+		if i >= len(text) {
+			break
+		}
+		c, width = utf8.DecodeRuneInString(text[i:])
+		if js_ast.IsWhitespace(c) {
+			break
+		}
+	}
+
+	return logger.Span{
+		Text: text[:i],
+		Range: logger.Range{
+			Loc: logger.Loc{Start: int32(start)},
+			Len: int32(i),
+		},
+	}, true
+}
+
+func isUpperASCII(c byte) bool {
+	return c >= 'A' && c <= 'Z'
+}
+
+func isLetterASCII(c byte) bool {
+	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
+}
+
+func (lexer *Lexer) scanCommentText() {
+	text := lexer.source.Contents[lexer.start:lexer.end]
+	hasLegalAnnotation := len(text) > 2 && text[2] == '!'
+	isMultiLineComment := text[1] == '*'
+	omitFromGeneralCommentPreservation := false
+
+	// Save the original comment text so we can subtract comments from the
+	// character frequency analysis used by symbol minification
+	lexer.AllComments = append(lexer.AllComments, lexer.Range())
+
+	// Omit the trailing "*/" from the checks below
+	endOfCommentText := len(text)
+	if isMultiLineComment {
+		endOfCommentText -= 2
+	}
+
+	for i, n := 0, len(text); i < n; i++ {
+		switch text[i] {
+		case '#':
+			rest := text[i+1 : endOfCommentText]
+			if hasPrefixWithWordBoundary(rest, "__PURE__") {
+				omitFromGeneralCommentPreservation = true
+				lexer.HasCommentBefore |= PureCommentBefore
+			} else if hasPrefixWithWordBoundary(rest, "__KEY__") {
+				omitFromGeneralCommentPreservation = true
+				lexer.HasCommentBefore |= KeyCommentBefore
+			} else if hasPrefixWithWordBoundary(rest, "__NO_SIDE_EFFECTS__") {
+				omitFromGeneralCommentPreservation = true
+				lexer.HasCommentBefore |= NoSideEffectsCommentBefore
+			} else if i == 2 && strings.HasPrefix(rest, " sourceMappingURL=") {
+				if arg, ok := scanForPragmaArg(pragmaNoSpaceFirst, lexer.start+i+1, " sourceMappingURL=", rest); ok {
+					omitFromGeneralCommentPreservation = true
+					lexer.SourceMappingURL = arg
+				}
+			}
+
+		case '@':
+			rest := text[i+1 : endOfCommentText]
+			if hasPrefixWithWordBoundary(rest, "__PURE__") {
+				omitFromGeneralCommentPreservation = true
+				lexer.HasCommentBefore |= PureCommentBefore
+			} else if hasPrefixWithWordBoundary(rest, "__KEY__") {
+				omitFromGeneralCommentPreservation = true
+				lexer.HasCommentBefore |= KeyCommentBefore
+			} else if hasPrefixWithWordBoundary(rest, "__NO_SIDE_EFFECTS__") {
+				omitFromGeneralCommentPreservation = true
+				lexer.HasCommentBefore |= NoSideEffectsCommentBefore
+			} else if hasPrefixWithWordBoundary(rest, "preserve") || hasPrefixWithWordBoundary(rest, "license") {
+				hasLegalAnnotation = true
+			} else if hasPrefixWithWordBoundary(rest, "jsx") {
+				if arg, ok := scanForPragmaArg(pragmaSkipSpaceFirst, lexer.start+i+1, "jsx", rest); ok {
+					lexer.JSXFactoryPragmaComment = arg
+				}
+			} else if hasPrefixWithWordBoundary(rest, "jsxFrag") {
+				if arg, ok := scanForPragmaArg(pragmaSkipSpaceFirst, lexer.start+i+1, "jsxFrag", rest); ok {
+					lexer.JSXFragmentPragmaComment = arg
+				}
+			} else if hasPrefixWithWordBoundary(rest, "jsxRuntime") {
+				if arg, ok := scanForPragmaArg(pragmaSkipSpaceFirst, lexer.start+i+1, "jsxRuntime", rest); ok {
+					lexer.JSXRuntimePragmaComment = arg
+				}
+			} else if hasPrefixWithWordBoundary(rest, "jsxImportSource") {
+				if arg, ok := scanForPragmaArg(pragmaSkipSpaceFirst, lexer.start+i+1, "jsxImportSource", rest); ok {
+					lexer.JSXImportSourcePragmaComment = arg
+				}
+			} else if i == 2 && strings.HasPrefix(rest, " sourceMappingURL=") {
+				if arg, ok := scanForPragmaArg(pragmaNoSpaceFirst, lexer.start+i+1, " sourceMappingURL=", rest); ok {
+					omitFromGeneralCommentPreservation = true
+					lexer.SourceMappingURL = arg
+				}
+			}
+		}
+	}
+
+	if hasLegalAnnotation {
+		lexer.LegalCommentsBeforeToken = append(lexer.LegalCommentsBeforeToken, lexer.Range())
+	}
+
+	if !omitFromGeneralCommentPreservation {
+		lexer.CommentsBeforeToken = append(lexer.CommentsBeforeToken, lexer.Range())
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_lexer/tables.go b/source/vendor/github.com/evanw/esbuild/internal/js_lexer/tables.go
new file mode 100644
index 0000000..c96b6b5
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_lexer/tables.go
@@ -0,0 +1,382 @@
+package js_lexer
+
+var tokenToString = map[T]string{
+	TEndOfFile:   "end of file",
+	TSyntaxError: "syntax error",
+	THashbang:    "hashbang comment",
+
+	// Literals
+	TNoSubstitutionTemplateLiteral: "template literal",
+	TNumericLiteral:                "number",
+	TStringLiteral:                 "string",
+	TBigIntegerLiteral:             "bigint",
+
+	// Pseudo-literals
+	TTemplateHead:   "template literal",
+	TTemplateMiddle: "template literal",
+	TTemplateTail:   "template literal",
+
+	// Punctuation
+	TAmpersand:                         "\"&\"",
+	TAmpersandAmpersand:                "\"&&\"",
+	TAsterisk:                          "\"*\"",
+	TAsteriskAsterisk:                  "\"**\"",
+	TAt:                                "\"@\"",
+	TBar:                               "\"|\"",
+	TBarBar:                            "\"||\"",
+	TCaret:                             "\"^\"",
+	TCloseBrace:                        "\"}\"",
+	TCloseBracket:                      "\"]\"",
+	TCloseParen:                        "\")\"",
+	TColon:                             "\":\"",
+	TComma:                             "\",\"",
+	TDot:                               "\".\"",
+	TDotDotDot:                         "\"...\"",
+	TEqualsEquals:                      "\"==\"",
+	TEqualsEqualsEquals:                "\"===\"",
+	TEqualsGreaterThan:                 "\"=>\"",
+	TExclamation:                       "\"!\"",
+	TExclamationEquals:                 "\"!=\"",
+	TExclamationEqualsEquals:           "\"!==\"",
+	TGreaterThan:                       "\">\"",
+	TGreaterThanEquals:                 "\">=\"",
+	TGreaterThanGreaterThan:            "\">>\"",
+	TGreaterThanGreaterThanGreaterThan: "\">>>\"",
+	TLessThan:                          "\"<\"",
+	TLessThanEquals:                    "\"<=\"",
+	TLessThanLessThan:                  "\"<<\"",
+	TMinus:                             "\"-\"",
+	TMinusMinus:                        "\"--\"",
+	TOpenBrace:                         "\"{\"",
+	TOpenBracket:                       "\"[\"",
+	TOpenParen:                         "\"(\"",
+	TPercent:                           "\"%\"",
+	TPlus:                              "\"+\"",
+	TPlusPlus:                          "\"++\"",
+	TQuestion:                          "\"?\"",
+	TQuestionDot:                       "\"?.\"",
+	TQuestionQuestion:                  "\"??\"",
+	TSemicolon:                         "\";\"",
+	TSlash:                             "\"/\"",
+	TTilde:                             "\"~\"",
+
+	// Assignments
+	TAmpersandAmpersandEquals:                "\"&&=\"",
+	TAmpersandEquals:                         "\"&=\"",
+	TAsteriskAsteriskEquals:                  "\"**=\"",
+	TAsteriskEquals:                          "\"*=\"",
+	TBarBarEquals:                            "\"||=\"",
+	TBarEquals:                               "\"|=\"",
+	TCaretEquals:                             "\"^=\"",
+	TEquals:                                  "\"=\"",
+	TGreaterThanGreaterThanEquals:            "\">>=\"",
+	TGreaterThanGreaterThanGreaterThanEquals: "\">>>=\"",
+	TLessThanLessThanEquals:                  "\"<<=\"",
+	TMinusEquals:                             "\"-=\"",
+	TPercentEquals:                           "\"%=\"",
+	TPlusEquals:                              "\"+=\"",
+	TQuestionQuestionEquals:                  "\"??=\"",
+	TSlashEquals:                             "\"/=\"",
+
+	// Class-private fields and methods
+	TPrivateIdentifier: "private identifier",
+
+	// Identifiers
+	TIdentifier:     "identifier",
+	TEscapedKeyword: "escaped keyword",
+
+	// Reserved words
+	TBreak:      "\"break\"",
+	TCase:       "\"case\"",
+	TCatch:      "\"catch\"",
+	TClass:      "\"class\"",
+	TConst:      "\"const\"",
+	TContinue:   "\"continue\"",
+	TDebugger:   "\"debugger\"",
+	TDefault:    "\"default\"",
+	TDelete:     "\"delete\"",
+	TDo:         "\"do\"",
+	TElse:       "\"else\"",
+	TEnum:       "\"enum\"",
+	TExport:     "\"export\"",
+	TExtends:    "\"extends\"",
+	TFalse:      "\"false\"",
+	TFinally:    "\"finally\"",
+	TFor:        "\"for\"",
+	TFunction:   "\"function\"",
+	TIf:         "\"if\"",
+	TImport:     "\"import\"",
+	TIn:         "\"in\"",
+	TInstanceof: "\"instanceof\"",
+	TNew:        "\"new\"",
+	TNull:       "\"null\"",
+	TReturn:     "\"return\"",
+	TSuper:      "\"super\"",
+	TSwitch:     "\"switch\"",
+	TThis:       "\"this\"",
+	TThrow:      "\"throw\"",
+	TTrue:       "\"true\"",
+	TTry:        "\"try\"",
+	TTypeof:     "\"typeof\"",
+	TVar:        "\"var\"",
+	TVoid:       "\"void\"",
+	TWhile:      "\"while\"",
+	TWith:       "\"with\"",
+}
+
+// This is from https://github.com/microsoft/TypeScript/blob/master/src/compiler/transformers/jsx.ts
+var jsxEntity = map[string]rune{
+	"quot":     0x0022,
+	"amp":      0x0026,
+	"apos":     0x0027,
+	"lt":       0x003C,
+	"gt":       0x003E,
+	"nbsp":     0x00A0,
+	"iexcl":    0x00A1,
+	"cent":     0x00A2,
+	"pound":    0x00A3,
+	"curren":   0x00A4,
+	"yen":      0x00A5,
+	"brvbar":   0x00A6,
+	"sect":     0x00A7,
+	"uml":      0x00A8,
+	"copy":     0x00A9,
+	"ordf":     0x00AA,
+	"laquo":    0x00AB,
+	"not":      0x00AC,
+	"shy":      0x00AD,
+	"reg":      0x00AE,
+	"macr":     0x00AF,
+	"deg":      0x00B0,
+	"plusmn":   0x00B1,
+	"sup2":     0x00B2,
+	"sup3":     0x00B3,
+	"acute":    0x00B4,
+	"micro":    0x00B5,
+	"para":     0x00B6,
+	"middot":   0x00B7,
+	"cedil":    0x00B8,
+	"sup1":     0x00B9,
+	"ordm":     0x00BA,
+	"raquo":    0x00BB,
+	"frac14":   0x00BC,
+	"frac12":   0x00BD,
+	"frac34":   0x00BE,
+	"iquest":   0x00BF,
+	"Agrave":   0x00C0,
+	"Aacute":   0x00C1,
+	"Acirc":    0x00C2,
+	"Atilde":   0x00C3,
+	"Auml":     0x00C4,
+	"Aring":    0x00C5,
+	"AElig":    0x00C6,
+	"Ccedil":   0x00C7,
+	"Egrave":   0x00C8,
+	"Eacute":   0x00C9,
+	"Ecirc":    0x00CA,
+	"Euml":     0x00CB,
+	"Igrave":   0x00CC,
+	"Iacute":   0x00CD,
+	"Icirc":    0x00CE,
+	"Iuml":     0x00CF,
+	"ETH":      0x00D0,
+	"Ntilde":   0x00D1,
+	"Ograve":   0x00D2,
+	"Oacute":   0x00D3,
+	"Ocirc":    0x00D4,
+	"Otilde":   0x00D5,
+	"Ouml":     0x00D6,
+	"times":    0x00D7,
+	"Oslash":   0x00D8,
+	"Ugrave":   0x00D9,
+	"Uacute":   0x00DA,
+	"Ucirc":    0x00DB,
+	"Uuml":     0x00DC,
+	"Yacute":   0x00DD,
+	"THORN":    0x00DE,
+	"szlig":    0x00DF,
+	"agrave":   0x00E0,
+	"aacute":   0x00E1,
+	"acirc":    0x00E2,
+	"atilde":   0x00E3,
+	"auml":     0x00E4,
+	"aring":    0x00E5,
+	"aelig":    0x00E6,
+	"ccedil":   0x00E7,
+	"egrave":   0x00E8,
+	"eacute":   0x00E9,
+	"ecirc":    0x00EA,
+	"euml":     0x00EB,
+	"igrave":   0x00EC,
+	"iacute":   0x00ED,
+	"icirc":    0x00EE,
+	"iuml":     0x00EF,
+	"eth":      0x00F0,
+	"ntilde":   0x00F1,
+	"ograve":   0x00F2,
+	"oacute":   0x00F3,
+	"ocirc":    0x00F4,
+	"otilde":   0x00F5,
+	"ouml":     0x00F6,
+	"divide":   0x00F7,
+	"oslash":   0x00F8,
+	"ugrave":   0x00F9,
+	"uacute":   0x00FA,
+	"ucirc":    0x00FB,
+	"uuml":     0x00FC,
+	"yacute":   0x00FD,
+	"thorn":    0x00FE,
+	"yuml":     0x00FF,
+	"OElig":    0x0152,
+	"oelig":    0x0153,
+	"Scaron":   0x0160,
+	"scaron":   0x0161,
+	"Yuml":     0x0178,
+	"fnof":     0x0192,
+	"circ":     0x02C6,
+	"tilde":    0x02DC,
+	"Alpha":    0x0391,
+	"Beta":     0x0392,
+	"Gamma":    0x0393,
+	"Delta":    0x0394,
+	"Epsilon":  0x0395,
+	"Zeta":     0x0396,
+	"Eta":      0x0397,
+	"Theta":    0x0398,
+	"Iota":     0x0399,
+	"Kappa":    0x039A,
+	"Lambda":   0x039B,
+	"Mu":       0x039C,
+	"Nu":       0x039D,
+	"Xi":       0x039E,
+	"Omicron":  0x039F,
+	"Pi":       0x03A0,
+	"Rho":      0x03A1,
+	"Sigma":    0x03A3,
+	"Tau":      0x03A4,
+	"Upsilon":  0x03A5,
+	"Phi":      0x03A6,
+	"Chi":      0x03A7,
+	"Psi":      0x03A8,
+	"Omega":    0x03A9,
+	"alpha":    0x03B1,
+	"beta":     0x03B2,
+	"gamma":    0x03B3,
+	"delta":    0x03B4,
+	"epsilon":  0x03B5,
+	"zeta":     0x03B6,
+	"eta":      0x03B7,
+	"theta":    0x03B8,
+	"iota":     0x03B9,
+	"kappa":    0x03BA,
+	"lambda":   0x03BB,
+	"mu":       0x03BC,
+	"nu":       0x03BD,
+	"xi":       0x03BE,
+	"omicron":  0x03BF,
+	"pi":       0x03C0,
+	"rho":      0x03C1,
+	"sigmaf":   0x03C2,
+	"sigma":    0x03C3,
+	"tau":      0x03C4,
+	"upsilon":  0x03C5,
+	"phi":      0x03C6,
+	"chi":      0x03C7,
+	"psi":      0x03C8,
+	"omega":    0x03C9,
+	"thetasym": 0x03D1,
+	"upsih":    0x03D2,
+	"piv":      0x03D6,
+	"ensp":     0x2002,
+	"emsp":     0x2003,
+	"thinsp":   0x2009,
+	"zwnj":     0x200C,
+	"zwj":      0x200D,
+	"lrm":      0x200E,
+	"rlm":      0x200F,
+	"ndash":    0x2013,
+	"mdash":    0x2014,
+	"lsquo":    0x2018,
+	"rsquo":    0x2019,
+	"sbquo":    0x201A,
+	"ldquo":    0x201C,
+	"rdquo":    0x201D,
+	"bdquo":    0x201E,
+	"dagger":   0x2020,
+	"Dagger":   0x2021,
+	"bull":     0x2022,
+	"hellip":   0x2026,
+	"permil":   0x2030,
+	"prime":    0x2032,
+	"Prime":    0x2033,
+	"lsaquo":   0x2039,
+	"rsaquo":   0x203A,
+	"oline":    0x203E,
+	"frasl":    0x2044,
+	"euro":     0x20AC,
+	"image":    0x2111,
+	"weierp":   0x2118,
+	"real":     0x211C,
+	"trade":    0x2122,
+	"alefsym":  0x2135,
+	"larr":     0x2190,
+	"uarr":     0x2191,
+	"rarr":     0x2192,
+	"darr":     0x2193,
+	"harr":     0x2194,
+	"crarr":    0x21B5,
+	"lArr":     0x21D0,
+	"uArr":     0x21D1,
+	"rArr":     0x21D2,
+	"dArr":     0x21D3,
+	"hArr":     0x21D4,
+	"forall":   0x2200,
+	"part":     0x2202,
+	"exist":    0x2203,
+	"empty":    0x2205,
+	"nabla":    0x2207,
+	"isin":     0x2208,
+	"notin":    0x2209,
+	"ni":       0x220B,
+	"prod":     0x220F,
+	"sum":      0x2211,
+	"minus":    0x2212,
+	"lowast":   0x2217,
+	"radic":    0x221A,
+	"prop":     0x221D,
+	"infin":    0x221E,
+	"ang":      0x2220,
+	"and":      0x2227,
+	"or":       0x2228,
+	"cap":      0x2229,
+	"cup":      0x222A,
+	"int":      0x222B,
+	"there4":   0x2234,
+	"sim":      0x223C,
+	"cong":     0x2245,
+	"asymp":    0x2248,
+	"ne":       0x2260,
+	"equiv":    0x2261,
+	"le":       0x2264,
+	"ge":       0x2265,
+	"sub":      0x2282,
+	"sup":      0x2283,
+	"nsub":     0x2284,
+	"sube":     0x2286,
+	"supe":     0x2287,
+	"oplus":    0x2295,
+	"otimes":   0x2297,
+	"perp":     0x22A5,
+	"sdot":     0x22C5,
+	"lceil":    0x2308,
+	"rceil":    0x2309,
+	"lfloor":   0x230A,
+	"rfloor":   0x230B,
+	"lang":     0x2329,
+	"rang":     0x232A,
+	"loz":      0x25CA,
+	"spades":   0x2660,
+	"clubs":    0x2663,
+	"hearts":   0x2665,
+	"diams":    0x2666,
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_parser/global_name_parser.go b/source/vendor/github.com/evanw/esbuild/internal/js_parser/global_name_parser.go
new file mode 100644
index 0000000..0064990
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_parser/global_name_parser.go
@@ -0,0 +1,49 @@
+package js_parser
+
+import (
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+func ParseGlobalName(log logger.Log, source logger.Source) (result []string, ok bool) {
+	ok = true
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			ok = false
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	lexer := js_lexer.NewLexerGlobalName(log, source)
+
+	// Start off with an identifier
+	result = append(result, lexer.Identifier.String)
+	lexer.Expect(js_lexer.TIdentifier)
+
+	// Follow with dot or index expressions
+	for lexer.Token != js_lexer.TEndOfFile {
+		switch lexer.Token {
+		case js_lexer.TDot:
+			lexer.Next()
+			if !lexer.IsIdentifierOrKeyword() {
+				lexer.Expect(js_lexer.TIdentifier)
+			}
+			result = append(result, lexer.Identifier.String)
+			lexer.Next()
+
+		case js_lexer.TOpenBracket:
+			lexer.Next()
+			result = append(result, helpers.UTF16ToString(lexer.StringLiteral()))
+			lexer.Expect(js_lexer.TStringLiteral)
+			lexer.Expect(js_lexer.TCloseBracket)
+
+		default:
+			lexer.Expect(js_lexer.TDot)
+		}
+	}
+
+	return
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser.go b/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser.go
new file mode 100644
index 0000000..45daca9
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser.go
@@ -0,0 +1,18021 @@
+package js_parser
+
+import (
+	"fmt"
+	"math"
+	"regexp"
+	"sort"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/renamer"
+	"github.com/evanw/esbuild/internal/runtime"
+)
+
+// This parser does two passes:
+//
+// 1. Parse the source into an AST, create the scope tree, and declare symbols.
+//
+//  2. Visit each node in the AST, bind identifiers to declared symbols, do
+//     constant folding, substitute compile-time variable definitions, and
+//     lower certain syntactic constructs as appropriate given the language
+//     target.
+//
+// So many things have been put in so few passes because we want to minimize
+// the number of full-tree passes to improve performance. However, we need
+// to have at least two separate passes to handle variable hoisting. See the
+// comment about scopesInOrder below for more information.
+type parser struct {
+	options                    Options
+	log                        logger.Log
+	source                     logger.Source
+	tracker                    logger.LineColumnTracker
+	fnOrArrowDataParse         fnOrArrowDataParse
+	fnOnlyDataVisit            fnOnlyDataVisit
+	allocatedNames             []string
+	currentScope               *js_ast.Scope
+	scopesForCurrentPart       []*js_ast.Scope
+	symbols                    []ast.Symbol
+	astHelpers                 js_ast.HelperContext
+	tsUseCounts                []uint32
+	injectedDefineSymbols      []ast.Ref
+	injectedSymbolSources      map[ast.Ref]injectedSymbolSource
+	injectedDotNames           map[string][]injectedDotName
+	dropLabelsMap              map[string]struct{}
+	exprComments               map[logger.Loc][]string
+	mangledProps               map[string]ast.Ref
+	reservedProps              map[string]bool
+	symbolUses                 map[ast.Ref]js_ast.SymbolUse
+	importSymbolPropertyUses   map[ast.Ref]map[string]js_ast.SymbolUse
+	symbolCallUses             map[ast.Ref]js_ast.SymbolCallUse
+	declaredSymbols            []js_ast.DeclaredSymbol
+	globPatternImports         []globPatternImport
+	runtimeImports             map[string]ast.LocRef
+	duplicateCaseChecker       duplicateCaseChecker
+	unrepresentableIdentifiers map[string]bool
+	legacyOctalLiterals        map[js_ast.E]logger.Range
+	scopesInOrderForEnum       map[logger.Loc][]scopeOrder
+	binaryExprStack            []binaryExprVisitor
+
+	// For strict mode handling
+	hoistedRefForSloppyModeBlockFn map[ast.Ref]ast.Ref
+
+	// For lowering private methods
+	privateGetters map[ast.Ref]ast.Ref
+	privateSetters map[ast.Ref]ast.Ref
+
+	// These are for TypeScript
+	//
+	// We build up enough information about the TypeScript namespace hierarchy to
+	// be able to resolve scope lookups and property accesses for TypeScript enum
+	// and namespace features. Each JavaScript scope object inside a namespace
+	// has a reference to a map of exported namespace members from sibling scopes.
+	//
+	// In addition, there is a map from each relevant symbol reference to the data
+	// associated with that namespace or namespace member: "refToTSNamespaceMemberData".
+	// This gives enough info to be able to resolve queries into the namespace.
+	//
+	// When visiting expressions, namespace metadata is associated with the most
+	// recently visited node. If namespace metadata is present, "tsNamespaceTarget"
+	// will be set to the most recently visited node (as a way to mark that this
+	// node has metadata) and "tsNamespaceMemberData" will be set to the metadata.
+	refToTSNamespaceMemberData map[ast.Ref]js_ast.TSNamespaceMemberData
+	tsNamespaceTarget          js_ast.E
+	tsNamespaceMemberData      js_ast.TSNamespaceMemberData
+	emittedNamespaceVars       map[ast.Ref]bool
+	isExportedInsideNamespace  map[ast.Ref]ast.Ref
+	localTypeNames             map[string]bool
+	tsEnums                    map[ast.Ref]map[string]js_ast.TSEnumValue
+	constValues                map[ast.Ref]js_ast.ConstValue
+	propDerivedCtorValue       js_ast.E
+	propMethodDecoratorScope   *js_ast.Scope
+
+	// This is the reference to the generated function argument for the namespace,
+	// which is different than the reference to the namespace itself:
+	//
+	//   namespace ns {
+	//   }
+	//
+	// The code above is transformed into something like this:
+	//
+	//   var ns1;
+	//   (function(ns2) {
+	//   })(ns1 || (ns1 = {}));
+	//
+	// This variable is "ns2" not "ns1". It is only used during the second
+	// "visit" pass.
+	enclosingNamespaceArgRef *ast.Ref
+
+	// Imports (both ES6 and CommonJS) are tracked at the top level
+	importRecords               []ast.ImportRecord
+	importRecordsForCurrentPart []uint32
+	exportStarImportRecords     []uint32
+
+	// These are for handling ES6 imports and exports
+	importItemsForNamespace map[ast.Ref]namespaceImportItems
+	isImportItem            map[ast.Ref]bool
+	namedImports            map[ast.Ref]js_ast.NamedImport
+	namedExports            map[string]js_ast.NamedExport
+	topLevelSymbolToParts   map[ast.Ref][]uint32
+	importNamespaceCCMap    map[importNamespaceCall]bool
+
+	// The parser does two passes and we need to pass the scope tree information
+	// from the first pass to the second pass. That's done by tracking the calls
+	// to pushScopeForParsePass() and popScope() during the first pass in
+	// scopesInOrder.
+	//
+	// Then, when the second pass calls pushScopeForVisitPass() and popScope(),
+	// we consume entries from scopesInOrder and make sure they are in the same
+	// order. This way the second pass can efficiently use the same scope tree
+	// as the first pass without having to attach the scope tree to the AST.
+	//
+	// We need to split this into two passes because the pass that declares the
+	// symbols must be separate from the pass that binds identifiers to declared
+	// symbols to handle declaring a hoisted "var" symbol in a nested scope and
+	// binding a name to it in a parent or sibling scope.
+	scopesInOrder []scopeOrder
+
+	// These propagate the name from the parent context into an anonymous child
+	// expression. For example:
+	//
+	//   let foo = function() {}
+	//   assert.strictEqual(foo.name, 'foo')
+	//
+	nameToKeep      string
+	nameToKeepIsFor js_ast.E
+
+	// These properties are for the visit pass, which runs after the parse pass.
+	// The visit pass binds identifiers to declared symbols, does constant
+	// folding, substitutes compile-time variable definitions, and lowers certain
+	// syntactic constructs as appropriate.
+	stmtExprValue                        js_ast.E
+	callTarget                           js_ast.E
+	dotOrIndexTarget                     js_ast.E
+	templateTag                          js_ast.E
+	deleteTarget                         js_ast.E
+	loopBody                             js_ast.S
+	suspiciousLogicalOperatorInsideArrow js_ast.E
+	moduleScope                          *js_ast.Scope
+
+	// This is internal-only data used for the implementation of Yarn PnP
+	manifestForYarnPnP     js_ast.Expr
+	stringLocalsForYarnPnP map[ast.Ref]stringLocalForYarnPnP
+
+	// This helps recognize the "await import()" pattern. When this is present,
+	// warnings about non-string import paths will be omitted inside try blocks.
+	awaitTarget js_ast.E
+
+	// This helps recognize the "import().catch()" pattern. We also try to avoid
+	// warning about this just like the "try { await import() }" pattern.
+	thenCatchChain thenCatchChain
+
+	// When bundling, hoisted top-level local variables declared with "var" in
+	// nested scopes are moved up to be declared in the top-level scope instead.
+	// The old "var" statements are turned into regular assignments instead. This
+	// makes it easier to quickly scan the top-level statements for "var" locals
+	// with the guarantee that all will be found.
+	relocatedTopLevelVars []ast.LocRef
+
+	// We need to lower private names such as "#foo" if they are used in a brand
+	// check such as "#foo in x" even if the private name syntax would otherwise
+	// be supported. This is because private names are a newly-added feature.
+	//
+	// However, this parser operates in only two passes for speed. The first pass
+	// parses things and declares variables, and the second pass lowers things and
+	// resolves references to declared variables. So the existence of a "#foo in x"
+	// expression for a specific "#foo" cannot be used to decide to lower "#foo"
+	// because it's too late by that point. There may be another expression such
+	// as "x.#foo" before that point and that must be lowered as well even though
+	// it has already been visited.
+	//
+	// Instead what we do is track just the names of fields used in private brand
+	// checks during the first pass. This tracks the names themselves, not symbol
+	// references. Then, during the second pass when we are about to enter into
+	// a class, we conservatively decide to lower all private names in that class
+	// which are used in a brand check anywhere in the file.
+	lowerAllOfThesePrivateNames map[string]bool
+
+	// Temporary variables used for lowering
+	tempLetsToDeclare         []ast.Ref
+	tempRefsToDeclare         []tempRef
+	topLevelTempRefsToDeclare []tempRef
+
+	lexer js_lexer.Lexer
+
+	// Private field access in a decorator lowers all private fields in that class
+	parseExperimentalDecoratorNesting int
+
+	// Temporary variables used for lowering
+	tempRefCount         int
+	topLevelTempRefCount int
+
+	// We need to scan over the source contents to recover the line and column offsets
+	jsxSourceLoc    int
+	jsxSourceLine   int
+	jsxSourceColumn int
+
+	exportsRef    ast.Ref
+	requireRef    ast.Ref
+	moduleRef     ast.Ref
+	importMetaRef ast.Ref
+	promiseRef    ast.Ref
+	regExpRef     ast.Ref
+	superCtorRef  ast.Ref
+
+	// Imports from "react/jsx-runtime" and "react", respectively.
+	// (Or whatever was specified in the "importSource" option)
+	jsxRuntimeImports map[string]ast.LocRef
+	jsxLegacyImports  map[string]ast.LocRef
+
+	// For lowering private methods
+	weakMapRef ast.Ref
+	weakSetRef ast.Ref
+
+	esmImportStatementKeyword logger.Range
+	esmImportMeta             logger.Range
+	esmExportKeyword          logger.Range
+	enclosingClassKeyword     logger.Range
+	topLevelAwaitKeyword      logger.Range
+	liveTopLevelAwaitKeyword  logger.Range
+
+	latestArrowArgLoc      logger.Loc
+	forbidSuffixAfterAsLoc logger.Loc
+	firstJSXElementLoc     logger.Loc
+
+	fnOrArrowDataVisit fnOrArrowDataVisit
+
+	// ArrowFunction is a special case in the grammar. Although it appears to be
+	// a PrimaryExpression, it's actually an AssignmentExpression. This means if
+	// a AssignmentExpression ends up producing an ArrowFunction then nothing can
+	// come after it other than the comma operator, since the comma operator is
+	// the only thing above AssignmentExpression under the Expression rule:
+	//
+	//   AssignmentExpression:
+	//     ArrowFunction
+	//     ConditionalExpression
+	//     LeftHandSideExpression = AssignmentExpression
+	//     LeftHandSideExpression AssignmentOperator AssignmentExpression
+	//
+	//   Expression:
+	//     AssignmentExpression
+	//     Expression , AssignmentExpression
+	//
+	afterArrowBodyLoc logger.Loc
+
+	// Setting this to true disables warnings about code that is very likely to
+	// be a bug. This is used to ignore issues inside "node_modules" directories.
+	// This has caught real issues in the past. However, it's not esbuild's job
+	// to find bugs in other libraries, and these warnings are problematic for
+	// people using these libraries with esbuild. The only fix is to either
+	// disable all esbuild warnings and not get warnings about your own code, or
+	// to try to get the warning fixed in the affected library. This is
+	// especially annoying if the warning is a false positive as was the case in
+	// https://github.com/firebase/firebase-js-sdk/issues/3814. So these warnings
+	// are now disabled for code inside "node_modules" directories.
+	suppressWarningsAboutWeirdCode bool
+
+	// A file is considered to be an ECMAScript module if it has any of the
+	// features of one (e.g. the "export" keyword), otherwise it's considered
+	// a CommonJS module.
+	//
+	// However, we have a single exception: a file where the only ESM feature
+	// is the "import" keyword is allowed to have CommonJS exports. This feature
+	// is necessary to be able to synchronously import ESM code into CommonJS,
+	// which we need to enable in a few important cases. Some examples are:
+	// our runtime code, injected files (the "inject" feature is ESM-only),
+	// and certain automatically-generated virtual modules from plugins.
+	isFileConsideredToHaveESMExports bool // Use only for export-related stuff
+	isFileConsideredESM              bool // Use for all other stuff
+
+	// Inside a TypeScript namespace, an "export declare" statement can be used
+	// to cause a namespace to be emitted even though it has no other observable
+	// effect. This flag is used to implement this feature.
+	//
+	// Specifically, namespaces should be generated for all of the following
+	// namespaces below except for "f", which should not be generated:
+	//
+	//   namespace a { export declare const a }
+	//   namespace b { export declare let [[b]] }
+	//   namespace c { export declare function c() }
+	//   namespace d { export declare class d {} }
+	//   namespace e { export declare enum e {} }
+	//   namespace f { export declare namespace f {} }
+	//
+	// The TypeScript compiler compiles this into the following code (notice "f"
+	// is missing):
+	//
+	//   var a; (function (a_1) {})(a || (a = {}));
+	//   var b; (function (b_1) {})(b || (b = {}));
+	//   var c; (function (c_1) {})(c || (c = {}));
+	//   var d; (function (d_1) {})(d || (d = {}));
+	//   var e; (function (e_1) {})(e || (e = {}));
+	//
+	// Note that this should not be implemented by declaring symbols for "export
+	// declare" statements because the TypeScript compiler doesn't generate any
+	// code for these statements, so these statements are actually references to
+	// global variables. There is one exception, which is that local variables
+	// *should* be declared as symbols because they are replaced with. This seems
+	// like very arbitrary behavior but it's what the TypeScript compiler does,
+	// so we try to match it.
+	//
+	// Specifically, in the following code below "a" and "b" should be declared
+	// and should be substituted with "ns.a" and "ns.b" but the other symbols
+	// shouldn't. References to the other symbols actually refer to global
+	// variables instead of to symbols that are exported from the namespace.
+	// This is the case as of TypeScript 4.3. I assume this is a TypeScript bug:
+	//
+	//   namespace ns {
+	//     export declare const a
+	//     export declare let [[b]]
+	//     export declare function c()
+	//     export declare class d { }
+	//     export declare enum e { }
+	//     console.log(a, b, c, d, e)
+	//   }
+	//
+	// The TypeScript compiler compiles this into the following code:
+	//
+	//   var ns;
+	//   (function (ns) {
+	//       console.log(ns.a, ns.b, c, d, e);
+	//   })(ns || (ns = {}));
+	//
+	// Relevant issue: https://github.com/evanw/esbuild/issues/1158
+	hasNonLocalExportDeclareInsideNamespace bool
+
+	// When this flag is enabled, we attempt to fold all expressions that
+	// TypeScript would consider to be "constant expressions". This flag is
+	// enabled inside each enum body block since TypeScript requires numeric
+	// constant folding in enum definitions.
+	//
+	// We also enable this flag in certain cases in JavaScript files such as when
+	// parsing "const" declarations at the top of a non-ESM file, but we still
+	// reuse TypeScript's notion of "constant expressions" for our own convenience.
+	//
+	// As of TypeScript 5.0, a "constant expression" is defined as follows:
+	//
+	//   An expression is considered a constant expression if it is
+	//
+	//   * a number or string literal,
+	//   * a unary +, -, or ~ applied to a numeric constant expression,
+	//   * a binary +, -, *, /, %, **, <<, >>, >>>, |, &, ^ applied to two numeric constant expressions,
+	//   * a binary + applied to two constant expressions whereof at least one is a string,
+	//   * a template expression where each substitution expression is a constant expression,
+	//   * a parenthesized constant expression,
+	//   * a dotted name (e.g. x.y.z) that references a const variable with a constant expression initializer and no type annotation,
+	//   * a dotted name that references an enum member with an enum literal type, or
+	//   * a dotted name indexed by a string literal (e.g. x.y["z"]) that references an enum member with an enum literal type.
+	//
+	// More detail: https://github.com/microsoft/TypeScript/pull/50528. Note that
+	// we don't implement certain items in this list. For example, we don't do all
+	// number-to-string conversions since ours might differ from how JavaScript
+	// would do it, which would be a correctness issue.
+	shouldFoldTypeScriptConstantExpressions bool
+
+	allowIn                     bool
+	allowPrivateIdentifiers     bool
+	hasTopLevelReturn           bool
+	latestReturnHadSemicolon    bool
+	messageAboutThisIsUndefined bool
+	isControlFlowDead           bool
+
+	// If this is true, then all top-level statements are wrapped in a try/catch
+	willWrapModuleInTryCatchForUsing bool
+}
+
+type globPatternImport struct {
+	assertOrWith     *ast.ImportAssertOrWith
+	parts            []helpers.GlobPart
+	name             string
+	approximateRange logger.Range
+	ref              ast.Ref
+	kind             ast.ImportKind
+}
+
+type namespaceImportItems struct {
+	entries           map[string]ast.LocRef
+	importRecordIndex uint32
+}
+
+type stringLocalForYarnPnP struct {
+	value []uint16
+	loc   logger.Loc
+}
+
+type injectedSymbolSource struct {
+	source logger.Source
+	loc    logger.Loc
+}
+
+type injectedDotName struct {
+	parts               []string
+	injectedDefineIndex uint32
+}
+
+type importNamespaceCallKind uint8
+
+const (
+	exprKindCall importNamespaceCallKind = iota
+	exprKindNew
+	exprKindJSXTag
+)
+
+type importNamespaceCall struct {
+	ref  ast.Ref
+	kind importNamespaceCallKind
+}
+
+type thenCatchChain struct {
+	nextTarget      js_ast.E
+	catchLoc        logger.Loc
+	hasMultipleArgs bool
+	hasCatch        bool
+}
+
+// This is used as part of an incremental build cache key. Some of these values
+// can potentially change between builds if they are derived from nearby
+// "package.json" or "tsconfig.json" files that were changed since the last
+// build.
+type Options struct {
+	injectedFiles  []config.InjectedFile
+	jsx            config.JSXOptions
+	tsAlwaysStrict *config.TSAlwaysStrict
+	mangleProps    *regexp.Regexp
+	reserveProps   *regexp.Regexp
+	dropLabels     []string
+
+	// This pointer will always be different for each build but the contents
+	// shouldn't ever behave different semantically. We ignore this field for the
+	// equality comparison.
+	defines *config.ProcessedDefines
+
+	// This is an embedded struct. Always access these directly instead of off
+	// the name "optionsThatSupportStructuralEquality". This is only grouped like
+	// this to make the equality comparison easier and safer (and hopefully faster).
+	optionsThatSupportStructuralEquality
+}
+
+type optionsThatSupportStructuralEquality struct {
+	originalTargetEnv                 string
+	moduleTypeData                    js_ast.ModuleTypeData
+	unsupportedJSFeatures             compat.JSFeature
+	unsupportedJSFeatureOverrides     compat.JSFeature
+	unsupportedJSFeatureOverridesMask compat.JSFeature
+
+	// Byte-sized values go here (gathered together here to keep this object compact)
+	ts                     config.TSOptions
+	mode                   config.Mode
+	platform               config.Platform
+	outputFormat           config.Format
+	asciiOnly              bool
+	keepNames              bool
+	minifySyntax           bool
+	minifyIdentifiers      bool
+	minifyWhitespace       bool
+	omitRuntimeForTests    bool
+	omitJSXRuntimeForTests bool
+	ignoreDCEAnnotations   bool
+	treeShaking            bool
+	dropDebugger           bool
+	mangleQuoted           bool
+
+	// This is an internal-only option used for the implementation of Yarn PnP
+	decodeHydrateRuntimeStateYarnPnP bool
+}
+
+func OptionsForYarnPnP() Options {
+	return Options{
+		optionsThatSupportStructuralEquality: optionsThatSupportStructuralEquality{
+			decodeHydrateRuntimeStateYarnPnP: true,
+		},
+	}
+}
+
+func OptionsFromConfig(options *config.Options) Options {
+	return Options{
+		injectedFiles:  options.InjectedFiles,
+		jsx:            options.JSX,
+		defines:        options.Defines,
+		tsAlwaysStrict: options.TSAlwaysStrict,
+		mangleProps:    options.MangleProps,
+		reserveProps:   options.ReserveProps,
+		dropLabels:     options.DropLabels,
+
+		optionsThatSupportStructuralEquality: optionsThatSupportStructuralEquality{
+			unsupportedJSFeatures:             options.UnsupportedJSFeatures,
+			unsupportedJSFeatureOverrides:     options.UnsupportedJSFeatureOverrides,
+			unsupportedJSFeatureOverridesMask: options.UnsupportedJSFeatureOverridesMask,
+			originalTargetEnv:                 options.OriginalTargetEnv,
+			ts:                                options.TS,
+			mode:                              options.Mode,
+			platform:                          options.Platform,
+			outputFormat:                      options.OutputFormat,
+			moduleTypeData:                    options.ModuleTypeData,
+			asciiOnly:                         options.ASCIIOnly,
+			keepNames:                         options.KeepNames,
+			minifySyntax:                      options.MinifySyntax,
+			minifyIdentifiers:                 options.MinifyIdentifiers,
+			minifyWhitespace:                  options.MinifyWhitespace,
+			omitRuntimeForTests:               options.OmitRuntimeForTests,
+			omitJSXRuntimeForTests:            options.OmitJSXRuntimeForTests,
+			ignoreDCEAnnotations:              options.IgnoreDCEAnnotations,
+			treeShaking:                       options.TreeShaking,
+			dropDebugger:                      options.DropDebugger,
+			mangleQuoted:                      options.MangleQuoted,
+		},
+	}
+}
+
+func (a *Options) Equal(b *Options) bool {
+	// Compare "optionsThatSupportStructuralEquality"
+	if a.optionsThatSupportStructuralEquality != b.optionsThatSupportStructuralEquality {
+		return false
+	}
+
+	// Compare "tsAlwaysStrict"
+	if (a.tsAlwaysStrict == nil && b.tsAlwaysStrict != nil) || (a.tsAlwaysStrict != nil && b.tsAlwaysStrict == nil) ||
+		(a.tsAlwaysStrict != nil && b.tsAlwaysStrict != nil && *a.tsAlwaysStrict != *b.tsAlwaysStrict) {
+		return false
+	}
+
+	// Compare "mangleProps" and "reserveProps"
+	if !isSameRegexp(a.mangleProps, b.mangleProps) || !isSameRegexp(a.reserveProps, b.reserveProps) {
+		return false
+	}
+
+	// Compare "dropLabels"
+	if !helpers.StringArraysEqual(a.dropLabels, b.dropLabels) {
+		return false
+	}
+
+	// Compare "injectedFiles"
+	if len(a.injectedFiles) != len(b.injectedFiles) {
+		return false
+	}
+	for i, x := range a.injectedFiles {
+		y := b.injectedFiles[i]
+		if x.Source != y.Source || x.DefineName != y.DefineName || len(x.Exports) != len(y.Exports) {
+			return false
+		}
+		for j := range x.Exports {
+			if x.Exports[j] != y.Exports[j] {
+				return false
+			}
+		}
+	}
+
+	// Compare "jsx"
+	if a.jsx.Parse != b.jsx.Parse || !jsxExprsEqual(a.jsx.Factory, b.jsx.Factory) || !jsxExprsEqual(a.jsx.Fragment, b.jsx.Fragment) {
+		return false
+	}
+
+	// Do a cheap assert that the defines object hasn't changed
+	if (a.defines != nil || b.defines != nil) && (a.defines == nil || b.defines == nil ||
+		len(a.defines.IdentifierDefines) != len(b.defines.IdentifierDefines) ||
+		len(a.defines.DotDefines) != len(b.defines.DotDefines)) {
+		panic("Internal error")
+	}
+
+	return true
+}
+
+func isSameRegexp(a *regexp.Regexp, b *regexp.Regexp) bool {
+	if a == nil {
+		return b == nil
+	} else {
+		return b != nil && a.String() == b.String()
+	}
+}
+
+func jsxExprsEqual(a config.DefineExpr, b config.DefineExpr) bool {
+	if !helpers.StringArraysEqual(a.Parts, b.Parts) {
+		return false
+	}
+
+	if a.Constant != nil {
+		if b.Constant == nil || !js_ast.ValuesLookTheSame(a.Constant, b.Constant) {
+			return false
+		}
+	} else if b.Constant != nil {
+		return false
+	}
+
+	return true
+}
+
+type tempRef struct {
+	valueOrNil js_ast.Expr
+	ref        ast.Ref
+}
+
+const (
+	locModuleScope = -1
+)
+
+type scopeOrder struct {
+	scope *js_ast.Scope
+	loc   logger.Loc
+}
+
+type awaitOrYield uint8
+
+const (
+	// The keyword is used as an identifier, not a special expression
+	allowIdent awaitOrYield = iota
+
+	// Declaring the identifier is forbidden, and the keyword is used as a special expression
+	allowExpr
+
+	// Declaring the identifier is forbidden, and using the identifier is also forbidden
+	forbidAll
+)
+
+// This is function-specific information used during parsing. It is saved and
+// restored on the call stack around code that parses nested functions and
+// arrow expressions.
+type fnOrArrowDataParse struct {
+	arrowArgErrors      *deferredArrowArgErrors
+	decoratorScope      *js_ast.Scope
+	asyncRange          logger.Range
+	needsAsyncLoc       logger.Loc
+	await               awaitOrYield
+	yield               awaitOrYield
+	allowSuperCall      bool
+	allowSuperProperty  bool
+	isTopLevel          bool
+	isConstructor       bool
+	isTypeScriptDeclare bool
+	isThisDisallowed    bool
+	isReturnDisallowed  bool
+
+	// In TypeScript, forward declarations of functions have no bodies
+	allowMissingBodyForTypeScript bool
+}
+
+// This is function-specific information used during visiting. It is saved and
+// restored on the call stack around code that parses nested functions and
+// arrow expressions.
+type fnOrArrowDataVisit struct {
+	// This is used to silence unresolvable imports due to "require" calls inside
+	// a try/catch statement. The assumption is that the try/catch statement is
+	// there to handle the case where the reference to "require" crashes.
+	tryBodyCount int32
+	tryCatchLoc  logger.Loc
+
+	isArrow                        bool
+	isAsync                        bool
+	isGenerator                    bool
+	isInsideLoop                   bool
+	isInsideSwitch                 bool
+	isDerivedClassCtor             bool
+	isOutsideFnOrArrow             bool
+	shouldLowerSuperPropertyAccess bool
+}
+
+// This is function-specific information used during visiting. It is saved and
+// restored on the call stack around code that parses nested functions (but not
+// nested arrow functions).
+type fnOnlyDataVisit struct {
+	// This is a reference to the magic "arguments" variable that exists inside
+	// functions in JavaScript. It will be non-nil inside functions and nil
+	// otherwise.
+	argumentsRef *ast.Ref
+
+	// Arrow functions don't capture the value of "this" and "arguments". Instead,
+	// the values are inherited from the surrounding context. If arrow functions
+	// are turned into regular functions due to lowering, we will need to generate
+	// local variables to capture these values so they are preserved correctly.
+	thisCaptureRef      *ast.Ref
+	argumentsCaptureRef *ast.Ref
+
+	// If true, we're inside a static class context where "this" expressions
+	// should be replaced with the class name.
+	shouldReplaceThisWithInnerClassNameRef bool
+
+	// This is true if "this" is equal to the class name. It's true if we're in a
+	// static class field initializer, a static class method, or a static class
+	// block.
+	isInStaticClassContext bool
+
+	// This is a reference to the enclosing class name if there is one. It's used
+	// to implement "this" and "super" references. A name is automatically generated
+	// if one is missing so this will always be present inside a class body.
+	innerClassNameRef *ast.Ref
+
+	// If we're inside an async arrow function and async functions are not
+	// supported, then we will have to convert that arrow function to a generator
+	// function. That means references to "arguments" inside the arrow function
+	// will have to reference a captured variable instead of the real variable.
+	isInsideAsyncArrowFn bool
+
+	// If false, disallow "new.target" expressions. We disallow all "new.target"
+	// expressions at the top-level of the file (i.e. not inside a function or
+	// a class field). Technically since CommonJS files are wrapped in a function
+	// you can use "new.target" in node as an alias for "undefined" but we don't
+	// support that.
+	isNewTargetAllowed bool
+
+	// If false, the value for "this" is the top-level module scope "this" value.
+	// That means it's "undefined" for ECMAScript modules and "exports" for
+	// CommonJS modules. We track this information so that we can substitute the
+	// correct value for these top-level "this" references at compile time instead
+	// of passing the "this" expression through to the output and leaving the
+	// interpretation up to the run-time behavior of the generated code.
+	//
+	// If true, the value for "this" is nested inside something (either a function
+	// or a class declaration). That means the top-level module scope "this" value
+	// has been shadowed and is now inaccessible.
+	isThisNested bool
+
+	// Do not warn about "this" being undefined for code that the TypeScript
+	// compiler generates that looks like this:
+	//
+	//   var __rest = (this && this.__rest) || function (s, e) {
+	//     ...
+	//   };
+	//
+	silenceMessageAboutThisBeingUndefined bool
+}
+
+const bloomFilterSize = 251
+
+type duplicateCaseValue struct {
+	value js_ast.Expr
+	hash  uint32
+}
+
+type duplicateCaseChecker struct {
+	cases       []duplicateCaseValue
+	bloomFilter [(bloomFilterSize + 7) / 8]byte
+}
+
+func (dc *duplicateCaseChecker) reset() {
+	// Preserve capacity
+	dc.cases = dc.cases[:0]
+
+	// This should be optimized by the compiler. See this for more information:
+	// https://github.com/golang/go/issues/5373
+	bytes := dc.bloomFilter
+	for i := range bytes {
+		bytes[i] = 0
+	}
+}
+
+func (dc *duplicateCaseChecker) check(p *parser, expr js_ast.Expr) {
+	if hash, ok := duplicateCaseHash(expr); ok {
+		bucket := hash % bloomFilterSize
+		entry := &dc.bloomFilter[bucket/8]
+		mask := byte(1) << (bucket % 8)
+
+		// Check for collisions
+		if (*entry & mask) != 0 {
+			for _, c := range dc.cases {
+				if c.hash == hash {
+					if equals, couldBeIncorrect := duplicateCaseEquals(c.value, expr); equals {
+						var laterRange logger.Range
+						var earlierRange logger.Range
+						if _, ok := expr.Data.(*js_ast.EString); ok {
+							laterRange = p.source.RangeOfString(expr.Loc)
+						} else {
+							laterRange = p.source.RangeOfOperatorBefore(expr.Loc, "case")
+						}
+						if _, ok := c.value.Data.(*js_ast.EString); ok {
+							earlierRange = p.source.RangeOfString(c.value.Loc)
+						} else {
+							earlierRange = p.source.RangeOfOperatorBefore(c.value.Loc, "case")
+						}
+						text := "This case clause will never be evaluated because it duplicates an earlier case clause"
+						if couldBeIncorrect {
+							text = "This case clause may never be evaluated because it likely duplicates an earlier case clause"
+						}
+						kind := logger.Warning
+						if p.suppressWarningsAboutWeirdCode {
+							kind = logger.Debug
+						}
+						p.log.AddIDWithNotes(logger.MsgID_JS_DuplicateCase, kind, &p.tracker, laterRange, text,
+							[]logger.MsgData{p.tracker.MsgData(earlierRange, "The earlier case clause is here:")})
+					}
+					return
+				}
+			}
+		}
+
+		*entry |= mask
+		dc.cases = append(dc.cases, duplicateCaseValue{hash: hash, value: expr})
+	}
+}
+
+func duplicateCaseHash(expr js_ast.Expr) (uint32, bool) {
+	switch e := expr.Data.(type) {
+	case *js_ast.EInlinedEnum:
+		return duplicateCaseHash(e.Value)
+
+	case *js_ast.ENull:
+		return 0, true
+
+	case *js_ast.EUndefined:
+		return 1, true
+
+	case *js_ast.EBoolean:
+		if e.Value {
+			return helpers.HashCombine(2, 1), true
+		}
+		return helpers.HashCombine(2, 0), true
+
+	case *js_ast.ENumber:
+		bits := math.Float64bits(e.Value)
+		return helpers.HashCombine(helpers.HashCombine(3, uint32(bits)), uint32(bits>>32)), true
+
+	case *js_ast.EString:
+		hash := uint32(4)
+		for _, c := range e.Value {
+			hash = helpers.HashCombine(hash, uint32(c))
+		}
+		return hash, true
+
+	case *js_ast.EBigInt:
+		hash := uint32(5)
+		for _, c := range e.Value {
+			hash = helpers.HashCombine(hash, uint32(c))
+		}
+		return hash, true
+
+	case *js_ast.EIdentifier:
+		return helpers.HashCombine(6, e.Ref.InnerIndex), true
+
+	case *js_ast.EDot:
+		if target, ok := duplicateCaseHash(e.Target); ok {
+			return helpers.HashCombineString(helpers.HashCombine(7, target), e.Name), true
+		}
+
+	case *js_ast.EIndex:
+		if target, ok := duplicateCaseHash(e.Target); ok {
+			if index, ok := duplicateCaseHash(e.Index); ok {
+				return helpers.HashCombine(helpers.HashCombine(8, target), index), true
+			}
+		}
+	}
+
+	return 0, false
+}
+
+func duplicateCaseEquals(left js_ast.Expr, right js_ast.Expr) (equals bool, couldBeIncorrect bool) {
+	if b, ok := right.Data.(*js_ast.EInlinedEnum); ok {
+		return duplicateCaseEquals(left, b.Value)
+	}
+
+	switch a := left.Data.(type) {
+	case *js_ast.EInlinedEnum:
+		return duplicateCaseEquals(a.Value, right)
+
+	case *js_ast.ENull:
+		_, ok := right.Data.(*js_ast.ENull)
+		return ok, false
+
+	case *js_ast.EUndefined:
+		_, ok := right.Data.(*js_ast.EUndefined)
+		return ok, false
+
+	case *js_ast.EBoolean:
+		b, ok := right.Data.(*js_ast.EBoolean)
+		return ok && a.Value == b.Value, false
+
+	case *js_ast.ENumber:
+		b, ok := right.Data.(*js_ast.ENumber)
+		return ok && a.Value == b.Value, false
+
+	case *js_ast.EString:
+		b, ok := right.Data.(*js_ast.EString)
+		return ok && helpers.UTF16EqualsUTF16(a.Value, b.Value), false
+
+	case *js_ast.EBigInt:
+		if b, ok := right.Data.(*js_ast.EBigInt); ok {
+			equal, ok := js_ast.CheckEqualityBigInt(a.Value, b.Value)
+			return ok && equal, false
+		}
+
+	case *js_ast.EIdentifier:
+		b, ok := right.Data.(*js_ast.EIdentifier)
+		return ok && a.Ref == b.Ref, false
+
+	case *js_ast.EDot:
+		if b, ok := right.Data.(*js_ast.EDot); ok && a.OptionalChain == b.OptionalChain && a.Name == b.Name {
+			equals, _ := duplicateCaseEquals(a.Target, b.Target)
+			return equals, true
+		}
+
+	case *js_ast.EIndex:
+		if b, ok := right.Data.(*js_ast.EIndex); ok && a.OptionalChain == b.OptionalChain {
+			if equals, _ := duplicateCaseEquals(a.Index, b.Index); equals {
+				equals, _ := duplicateCaseEquals(a.Target, b.Target)
+				return equals, true
+			}
+		}
+	}
+
+	return false, false
+}
+
+type duplicatePropertiesIn uint8
+
+const (
+	duplicatePropertiesInObject duplicatePropertiesIn = iota
+	duplicatePropertiesInClass
+)
+
+func (p *parser) warnAboutDuplicateProperties(properties []js_ast.Property, in duplicatePropertiesIn) {
+	if len(properties) < 2 {
+		return
+	}
+
+	type keyKind uint8
+	type existingKey struct {
+		loc  logger.Loc
+		kind keyKind
+	}
+	const (
+		keyMissing keyKind = iota
+		keyNormal
+		keyGet
+		keySet
+		keyGetAndSet
+	)
+	instanceKeys := make(map[string]existingKey)
+	staticKeys := make(map[string]existingKey)
+
+	for _, property := range properties {
+		if property.Kind != js_ast.PropertySpread {
+			if str, ok := property.Key.Data.(*js_ast.EString); ok {
+				var keys map[string]existingKey
+				if property.Flags.Has(js_ast.PropertyIsStatic) {
+					keys = staticKeys
+				} else {
+					keys = instanceKeys
+				}
+				key := helpers.UTF16ToString(str.Value)
+				prevKey := keys[key]
+				nextKey := existingKey{kind: keyNormal, loc: property.Key.Loc}
+
+				if property.Kind == js_ast.PropertyGetter {
+					nextKey.kind = keyGet
+				} else if property.Kind == js_ast.PropertySetter {
+					nextKey.kind = keySet
+				}
+
+				if prevKey.kind != keyMissing && (in != duplicatePropertiesInObject || key != "__proto__") && (in != duplicatePropertiesInClass || key != "constructor") {
+					if (prevKey.kind == keyGet && nextKey.kind == keySet) || (prevKey.kind == keySet && nextKey.kind == keyGet) {
+						nextKey.kind = keyGetAndSet
+					} else {
+						var id logger.MsgID
+						var what string
+						var where string
+						switch in {
+						case duplicatePropertiesInObject:
+							id = logger.MsgID_JS_DuplicateObjectKey
+							what = "key"
+							where = "object literal"
+						case duplicatePropertiesInClass:
+							id = logger.MsgID_JS_DuplicateClassMember
+							what = "member"
+							where = "class body"
+						}
+						r := js_lexer.RangeOfIdentifier(p.source, property.Key.Loc)
+						p.log.AddIDWithNotes(id, logger.Warning, &p.tracker, r,
+							fmt.Sprintf("Duplicate %s %q in %s", what, key, where),
+							[]logger.MsgData{p.tracker.MsgData(js_lexer.RangeOfIdentifier(p.source, prevKey.loc),
+								fmt.Sprintf("The original %s %q is here:", what, key))})
+					}
+				}
+
+				keys[key] = nextKey
+			}
+		}
+	}
+}
+
+func isJumpStatement(data js_ast.S) bool {
+	switch data.(type) {
+	case *js_ast.SBreak, *js_ast.SContinue, *js_ast.SReturn, *js_ast.SThrow:
+		return true
+	}
+
+	return false
+}
+
+func jumpStmtsLookTheSame(left js_ast.S, right js_ast.S) bool {
+	switch a := left.(type) {
+	case *js_ast.SBreak:
+		b, ok := right.(*js_ast.SBreak)
+		return ok && (a.Label == nil) == (b.Label == nil) && (a.Label == nil || a.Label.Ref == b.Label.Ref)
+
+	case *js_ast.SContinue:
+		b, ok := right.(*js_ast.SContinue)
+		return ok && (a.Label == nil) == (b.Label == nil) && (a.Label == nil || a.Label.Ref == b.Label.Ref)
+
+	case *js_ast.SReturn:
+		b, ok := right.(*js_ast.SReturn)
+		return ok && (a.ValueOrNil.Data == nil) == (b.ValueOrNil.Data == nil) &&
+			(a.ValueOrNil.Data == nil || js_ast.ValuesLookTheSame(a.ValueOrNil.Data, b.ValueOrNil.Data))
+
+	case *js_ast.SThrow:
+		b, ok := right.(*js_ast.SThrow)
+		return ok && js_ast.ValuesLookTheSame(a.Value.Data, b.Value.Data)
+	}
+
+	return false
+}
+
+func (p *parser) selectLocalKind(kind js_ast.LocalKind) js_ast.LocalKind {
+	// Use "var" instead of "let" and "const" if the variable declaration may
+	// need to be separated from the initializer. This allows us to safely move
+	// this declaration into a nested scope.
+	if p.currentScope.Parent == nil && (kind == js_ast.LocalLet || kind == js_ast.LocalConst) &&
+		(p.options.mode == config.ModeBundle || p.willWrapModuleInTryCatchForUsing) {
+		return js_ast.LocalVar
+	}
+
+	// Optimization: use "let" instead of "const" because it's shorter. This is
+	// only done when bundling because assigning to "const" is only an error when
+	// bundling.
+	if p.options.mode == config.ModeBundle && kind == js_ast.LocalConst && p.options.minifySyntax {
+		return js_ast.LocalLet
+	}
+
+	return kind
+}
+
+func (p *parser) pushScopeForParsePass(kind js_ast.ScopeKind, loc logger.Loc) int {
+	parent := p.currentScope
+	scope := &js_ast.Scope{
+		Kind:    kind,
+		Parent:  parent,
+		Members: make(map[string]js_ast.ScopeMember),
+		Label:   ast.LocRef{Ref: ast.InvalidRef},
+	}
+	if parent != nil {
+		parent.Children = append(parent.Children, scope)
+		scope.StrictMode = parent.StrictMode
+		scope.UseStrictLoc = parent.UseStrictLoc
+	}
+	p.currentScope = scope
+
+	// Enforce that scope locations are strictly increasing to help catch bugs
+	// where the pushed scopes are mismatched between the first and second passes
+	if len(p.scopesInOrder) > 0 {
+		prevStart := p.scopesInOrder[len(p.scopesInOrder)-1].loc.Start
+		if prevStart >= loc.Start {
+			panic(fmt.Sprintf("Scope location %d must be greater than %d", loc.Start, prevStart))
+		}
+	}
+
+	// Copy down function arguments into the function body scope. That way we get
+	// errors if a statement in the function body tries to re-declare any of the
+	// arguments.
+	if kind == js_ast.ScopeFunctionBody {
+		if scope.Parent.Kind != js_ast.ScopeFunctionArgs {
+			panic("Internal error")
+		}
+		for name, member := range scope.Parent.Members {
+			// Don't copy down the optional function expression name. Re-declaring
+			// the name of a function expression is allowed.
+			kind := p.symbols[member.Ref.InnerIndex].Kind
+			if kind != ast.SymbolHoistedFunction {
+				scope.Members[name] = member
+			}
+		}
+	}
+
+	// Remember the length in case we call popAndDiscardScope() later
+	scopeIndex := len(p.scopesInOrder)
+	p.scopesInOrder = append(p.scopesInOrder, scopeOrder{loc: loc, scope: scope})
+	return scopeIndex
+}
+
+func (p *parser) popScope() {
+	// We cannot rename anything inside a scope containing a direct eval() call
+	if p.currentScope.ContainsDirectEval {
+		for _, member := range p.currentScope.Members {
+			// Using direct eval when bundling is not a good idea in general because
+			// esbuild must assume that it can potentially reach anything in any of
+			// the containing scopes. We try to make it work but this isn't possible
+			// in some cases.
+			//
+			// For example, symbols imported using an ESM import are a live binding
+			// to the underlying symbol in another file. This is emulated during
+			// scope hoisting by erasing the ESM import and just referencing the
+			// underlying symbol in the flattened bundle directly. However, that
+			// symbol may have a different name which could break uses of direct
+			// eval:
+			//
+			//   // Before bundling
+			//   import { foo as bar } from './foo.js'
+			//   console.log(eval('bar'))
+			//
+			//   // After bundling
+			//   let foo = 123 // The contents of "foo.js"
+			//   console.log(eval('bar'))
+			//
+			// There really isn't any way to fix this. You can't just rename "foo" to
+			// "bar" in the example above because there may be a third bundled file
+			// that also contains direct eval and imports the same symbol with a
+			// different conflicting import alias. And there is no way to store a
+			// live binding to the underlying symbol in a variable with the import's
+			// name so that direct eval can access it:
+			//
+			//   // After bundling
+			//   let foo = 123 // The contents of "foo.js"
+			//   const bar = /* cannot express a live binding to "foo" here */
+			//   console.log(eval('bar'))
+			//
+			// Technically a "with" statement could potentially make this work (with
+			// a big hit to performance), but they are deprecated and are unavailable
+			// in strict mode. This is a non-starter since all ESM code is strict mode.
+			//
+			// So while we still try to obey the requirement that all symbol names are
+			// pinned when direct eval is present, we make an exception for top-level
+			// symbols in an ESM file when bundling is enabled. We make no guarantee
+			// that "eval" will be able to reach these symbols and we allow them to be
+			// renamed or removed by tree shaking.
+			if p.options.mode == config.ModeBundle && p.currentScope.Parent == nil && p.isFileConsideredESM {
+				continue
+			}
+
+			p.symbols[member.Ref.InnerIndex].Flags |= ast.MustNotBeRenamed
+		}
+	}
+
+	p.currentScope = p.currentScope.Parent
+}
+
+func (p *parser) popAndDiscardScope(scopeIndex int) {
+	// Unwind any newly-added scopes in reverse order
+	for i := len(p.scopesInOrder) - 1; i >= scopeIndex; i-- {
+		scope := p.scopesInOrder[i].scope
+		parent := scope.Parent
+		last := len(parent.Children) - 1
+		if parent.Children[last] != scope {
+			panic("Internal error")
+		}
+		parent.Children = parent.Children[:last]
+	}
+
+	// Move up to the parent scope
+	p.currentScope = p.currentScope.Parent
+
+	// Truncate the scope order where we started to pretend we never saw this scope
+	p.scopesInOrder = p.scopesInOrder[:scopeIndex]
+}
+
+func (p *parser) popAndFlattenScope(scopeIndex int) {
+	// Move up to the parent scope
+	toFlatten := p.currentScope
+	parent := toFlatten.Parent
+	p.currentScope = parent
+
+	// Erase this scope from the order. This will shift over the indices of all
+	// the scopes that were created after us. However, we shouldn't have to
+	// worry about other code with outstanding scope indices for these scopes.
+	// These scopes were all created in between this scope's push and pop
+	// operations, so they should all be child scopes and should all be popped
+	// by the time we get here.
+	copy(p.scopesInOrder[scopeIndex:], p.scopesInOrder[scopeIndex+1:])
+	p.scopesInOrder = p.scopesInOrder[:len(p.scopesInOrder)-1]
+
+	// Remove the last child from the parent scope
+	last := len(parent.Children) - 1
+	if parent.Children[last] != toFlatten {
+		panic("Internal error")
+	}
+	parent.Children = parent.Children[:last]
+
+	// Reparent our child scopes into our parent
+	for _, scope := range toFlatten.Children {
+		scope.Parent = parent
+		parent.Children = append(parent.Children, scope)
+	}
+}
+
+// Undo all scopes pushed and popped after this scope index. This assumes that
+// the scope stack is at the same level now as it was at the given scope index.
+func (p *parser) discardScopesUpTo(scopeIndex int) {
+	// Remove any direct children from their parent
+	children := p.currentScope.Children
+	for _, child := range p.scopesInOrder[scopeIndex:] {
+		if child.scope.Parent == p.currentScope {
+			for i := len(children) - 1; i >= 0; i-- {
+				if children[i] == child.scope {
+					children = append(children[:i], children[i+1:]...)
+					break
+				}
+			}
+		}
+	}
+	p.currentScope.Children = children
+
+	// Truncate the scope order where we started to pretend we never saw this scope
+	p.scopesInOrder = p.scopesInOrder[:scopeIndex]
+}
+
+func (p *parser) newSymbol(kind ast.SymbolKind, name string) ast.Ref {
+	ref := ast.Ref{SourceIndex: p.source.Index, InnerIndex: uint32(len(p.symbols))}
+	p.symbols = append(p.symbols, ast.Symbol{
+		Kind:         kind,
+		OriginalName: name,
+		Link:         ast.InvalidRef,
+	})
+	if p.options.ts.Parse {
+		p.tsUseCounts = append(p.tsUseCounts, 0)
+	}
+	return ref
+}
+
+// This is similar to "ast.MergeSymbols" but it works with this parser's
+// one-level symbol map instead of the linker's two-level symbol map. It also
+// doesn't handle cycles since they shouldn't come up due to the way this
+// function is used.
+func (p *parser) mergeSymbols(old ast.Ref, new ast.Ref) ast.Ref {
+	if old == new {
+		return new
+	}
+
+	oldSymbol := &p.symbols[old.InnerIndex]
+	if oldSymbol.Link != ast.InvalidRef {
+		oldSymbol.Link = p.mergeSymbols(oldSymbol.Link, new)
+		return oldSymbol.Link
+	}
+
+	newSymbol := &p.symbols[new.InnerIndex]
+	if newSymbol.Link != ast.InvalidRef {
+		newSymbol.Link = p.mergeSymbols(old, newSymbol.Link)
+		return newSymbol.Link
+	}
+
+	oldSymbol.Link = new
+	newSymbol.MergeContentsWith(oldSymbol)
+	return new
+}
+
+type mergeResult int
+
+const (
+	mergeForbidden = iota
+	mergeReplaceWithNew
+	mergeOverwriteWithNew
+	mergeKeepExisting
+	mergeBecomePrivateGetSetPair
+	mergeBecomePrivateStaticGetSetPair
+)
+
+func (p *parser) canMergeSymbols(scope *js_ast.Scope, existing ast.SymbolKind, new ast.SymbolKind) mergeResult {
+	if existing == ast.SymbolUnbound {
+		return mergeReplaceWithNew
+	}
+
+	// In TypeScript, imports are allowed to silently collide with symbols within
+	// the module. Presumably this is because the imports may be type-only:
+	//
+	//   import {Foo} from 'bar'
+	//   class Foo {}
+	//
+	if p.options.ts.Parse && existing == ast.SymbolImport {
+		return mergeReplaceWithNew
+	}
+
+	// "enum Foo {} enum Foo {}"
+	if new == ast.SymbolTSEnum && existing == ast.SymbolTSEnum {
+		return mergeKeepExisting
+	}
+
+	// "namespace Foo { ... } enum Foo {}"
+	if new == ast.SymbolTSEnum && existing == ast.SymbolTSNamespace {
+		return mergeReplaceWithNew
+	}
+
+	// "namespace Foo { ... } namespace Foo { ... }"
+	// "function Foo() {} namespace Foo { ... }"
+	// "enum Foo {} namespace Foo { ... }"
+	if new == ast.SymbolTSNamespace {
+		switch existing {
+		case ast.SymbolTSNamespace, ast.SymbolHoistedFunction, ast.SymbolGeneratorOrAsyncFunction, ast.SymbolTSEnum, ast.SymbolClass:
+			return mergeKeepExisting
+		}
+	}
+
+	// "var foo; var foo;"
+	// "var foo; function foo() {}"
+	// "function foo() {} var foo;"
+	// "function *foo() {} function *foo() {}" but not "{ function *foo() {} function *foo() {} }"
+	if new.IsHoistedOrFunction() && existing.IsHoistedOrFunction() &&
+		(scope.Kind == js_ast.ScopeEntry ||
+			scope.Kind == js_ast.ScopeFunctionBody ||
+			scope.Kind == js_ast.ScopeFunctionArgs ||
+			(new == existing && new.IsHoisted())) {
+		return mergeReplaceWithNew
+	}
+
+	// "get #foo() {} set #foo() {}"
+	// "set #foo() {} get #foo() {}"
+	if (existing == ast.SymbolPrivateGet && new == ast.SymbolPrivateSet) ||
+		(existing == ast.SymbolPrivateSet && new == ast.SymbolPrivateGet) {
+		return mergeBecomePrivateGetSetPair
+	}
+	if (existing == ast.SymbolPrivateStaticGet && new == ast.SymbolPrivateStaticSet) ||
+		(existing == ast.SymbolPrivateStaticSet && new == ast.SymbolPrivateStaticGet) {
+		return mergeBecomePrivateStaticGetSetPair
+	}
+
+	// "try {} catch (e) { var e }"
+	if existing == ast.SymbolCatchIdentifier && new == ast.SymbolHoisted {
+		return mergeReplaceWithNew
+	}
+
+	// "function() { var arguments }"
+	if existing == ast.SymbolArguments && new == ast.SymbolHoisted {
+		return mergeKeepExisting
+	}
+
+	// "function() { let arguments }"
+	if existing == ast.SymbolArguments && new != ast.SymbolHoisted {
+		return mergeOverwriteWithNew
+	}
+
+	return mergeForbidden
+}
+
+func (p *parser) addSymbolAlreadyDeclaredError(name string, newLoc logger.Loc, oldLoc logger.Loc) {
+	p.log.AddErrorWithNotes(&p.tracker,
+		js_lexer.RangeOfIdentifier(p.source, newLoc),
+		fmt.Sprintf("The symbol %q has already been declared", name),
+
+		[]logger.MsgData{p.tracker.MsgData(
+			js_lexer.RangeOfIdentifier(p.source, oldLoc),
+			fmt.Sprintf("The symbol %q was originally declared here:", name),
+		)},
+	)
+}
+
+func (p *parser) declareSymbol(kind ast.SymbolKind, loc logger.Loc, name string) ast.Ref {
+	p.checkForUnrepresentableIdentifier(loc, name)
+
+	// Allocate a new symbol
+	ref := p.newSymbol(kind, name)
+
+	// Check for a collision in the declaring scope
+	if existing, ok := p.currentScope.Members[name]; ok {
+		symbol := &p.symbols[existing.Ref.InnerIndex]
+
+		switch p.canMergeSymbols(p.currentScope, symbol.Kind, kind) {
+		case mergeForbidden:
+			p.addSymbolAlreadyDeclaredError(name, loc, existing.Loc)
+			return existing.Ref
+
+		case mergeKeepExisting:
+			ref = existing.Ref
+
+		case mergeReplaceWithNew:
+			symbol.Link = ref
+			p.currentScope.Replaced = append(p.currentScope.Replaced, existing)
+
+			// If these are both functions, remove the overwritten declaration
+			if p.options.minifySyntax && kind.IsFunction() && symbol.Kind.IsFunction() {
+				symbol.Flags |= ast.RemoveOverwrittenFunctionDeclaration
+			}
+
+		case mergeBecomePrivateGetSetPair:
+			ref = existing.Ref
+			symbol.Kind = ast.SymbolPrivateGetSetPair
+
+		case mergeBecomePrivateStaticGetSetPair:
+			ref = existing.Ref
+			symbol.Kind = ast.SymbolPrivateStaticGetSetPair
+
+		case mergeOverwriteWithNew:
+		}
+	}
+
+	// Overwrite this name in the declaring scope
+	p.currentScope.Members[name] = js_ast.ScopeMember{Ref: ref, Loc: loc}
+	return ref
+
+}
+
+// This type is just so we can use Go's native sort function
+type scopeMemberArray []js_ast.ScopeMember
+
+func (a scopeMemberArray) Len() int          { return len(a) }
+func (a scopeMemberArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a scopeMemberArray) Less(i int, j int) bool {
+	ai := a[i].Ref
+	bj := a[j].Ref
+	return ai.InnerIndex < bj.InnerIndex || (ai.InnerIndex == bj.InnerIndex && ai.SourceIndex < bj.SourceIndex)
+}
+
+func (p *parser) hoistSymbols(scope *js_ast.Scope) {
+	// Duplicate function declarations are forbidden in nested blocks in strict
+	// mode. Separately, they are also forbidden at the top-level of modules.
+	// This check needs to be delayed until now instead of being done when the
+	// functions are declared because we potentially need to scan the whole file
+	// to know if the file is considered to be in strict mode (or is considered
+	// to be a module). We might only encounter an "export {}" clause at the end
+	// of the file.
+	if (scope.StrictMode != js_ast.SloppyMode && scope.Kind == js_ast.ScopeBlock) || (scope.Parent == nil && p.isFileConsideredESM) {
+		for _, replaced := range scope.Replaced {
+			symbol := &p.symbols[replaced.Ref.InnerIndex]
+			if symbol.Kind.IsFunction() {
+				if member, ok := scope.Members[symbol.OriginalName]; ok && p.symbols[member.Ref.InnerIndex].Kind.IsFunction() {
+					var notes []logger.MsgData
+					if scope.Parent == nil && p.isFileConsideredESM {
+						_, notes = p.whyESModule()
+						notes[0].Text = fmt.Sprintf("Duplicate top-level function declarations are not allowed in an ECMAScript module. %s", notes[0].Text)
+					} else {
+						var where string
+						where, notes = p.whyStrictMode(scope)
+						notes[0].Text = fmt.Sprintf("Duplicate function declarations are not allowed in nested blocks %s. %s", where, notes[0].Text)
+					}
+
+					p.log.AddErrorWithNotes(&p.tracker,
+						js_lexer.RangeOfIdentifier(p.source, member.Loc),
+						fmt.Sprintf("The symbol %q has already been declared", symbol.OriginalName),
+
+						append([]logger.MsgData{p.tracker.MsgData(
+							js_lexer.RangeOfIdentifier(p.source, replaced.Loc),
+							fmt.Sprintf("The symbol %q was originally declared here:", symbol.OriginalName),
+						)}, notes...),
+					)
+				}
+			}
+		}
+	}
+
+	if !scope.Kind.StopsHoisting() {
+		// We create new symbols in the loop below, so the iteration order of the
+		// loop must be deterministic to avoid generating different minified names
+		sortedMembers := make(scopeMemberArray, 0, len(scope.Members))
+		for _, member := range scope.Members {
+			sortedMembers = append(sortedMembers, member)
+		}
+		sort.Sort(sortedMembers)
+
+	nextMember:
+		for _, member := range sortedMembers {
+			symbol := &p.symbols[member.Ref.InnerIndex]
+
+			// Handle non-hoisted collisions between catch bindings and the catch body.
+			// This implements "B.3.4 VariableStatements in Catch Blocks" from Annex B
+			// of the ECMAScript standard version 6+ (except for the hoisted case, which
+			// is handled later on below):
+			//
+			// * It is a Syntax Error if any element of the BoundNames of CatchParameter
+			//   also occurs in the LexicallyDeclaredNames of Block.
+			//
+			// * It is a Syntax Error if any element of the BoundNames of CatchParameter
+			//   also occurs in the VarDeclaredNames of Block unless CatchParameter is
+			//   CatchParameter : BindingIdentifier .
+			//
+			if scope.Parent.Kind == js_ast.ScopeCatchBinding && symbol.Kind != ast.SymbolHoisted {
+				if existingMember, ok := scope.Parent.Members[symbol.OriginalName]; ok {
+					p.addSymbolAlreadyDeclaredError(symbol.OriginalName, member.Loc, existingMember.Loc)
+					continue
+				}
+			}
+
+			if !symbol.Kind.IsHoisted() {
+				continue
+			}
+
+			// Implement "Block-Level Function Declarations Web Legacy Compatibility
+			// Semantics" from Annex B of the ECMAScript standard version 6+
+			isSloppyModeBlockLevelFnStmt := false
+			originalMemberRef := member.Ref
+			if symbol.Kind == ast.SymbolHoistedFunction {
+				// Block-level function declarations behave like "let" in strict mode
+				if scope.StrictMode != js_ast.SloppyMode {
+					continue
+				}
+
+				// In sloppy mode, block level functions behave like "let" except with
+				// an assignment to "var", sort of. This code:
+				//
+				//   if (x) {
+				//     f();
+				//     function f() {}
+				//   }
+				//   f();
+				//
+				// behaves like this code:
+				//
+				//   if (x) {
+				//     let f2 = function() {}
+				//     var f = f2;
+				//     f2();
+				//   }
+				//   f();
+				//
+				hoistedRef := p.newSymbol(ast.SymbolHoisted, symbol.OriginalName)
+				scope.Generated = append(scope.Generated, hoistedRef)
+				if p.hoistedRefForSloppyModeBlockFn == nil {
+					p.hoistedRefForSloppyModeBlockFn = make(map[ast.Ref]ast.Ref)
+				}
+				p.hoistedRefForSloppyModeBlockFn[member.Ref] = hoistedRef
+				symbol = &p.symbols[hoistedRef.InnerIndex]
+				member.Ref = hoistedRef
+				isSloppyModeBlockLevelFnStmt = true
+			}
+
+			// Check for collisions that would prevent to hoisting "var" symbols up to the enclosing function scope
+			s := scope.Parent
+			for {
+				// Variable declarations hoisted past a "with" statement may actually end
+				// up overwriting a property on the target of the "with" statement instead
+				// of initializing the variable. We must not rename them or we risk
+				// causing a behavior change.
+				//
+				//   var obj = { foo: 1 }
+				//   with (obj) { var foo = 2 }
+				//   assert(foo === undefined)
+				//   assert(obj.foo === 2)
+				//
+				if s.Kind == js_ast.ScopeWith {
+					symbol.Flags |= ast.MustNotBeRenamed
+				}
+
+				if existingMember, ok := s.Members[symbol.OriginalName]; ok {
+					existingSymbol := &p.symbols[existingMember.Ref.InnerIndex]
+
+					// We can hoist the symbol from the child scope into the symbol in
+					// this scope if:
+					//
+					//   - The symbol is unbound (i.e. a global variable access)
+					//   - The symbol is also another hoisted variable
+					//   - The symbol is a function of any kind and we're in a function or module scope
+					//
+					// Is this unbound (i.e. a global access) or also hoisted?
+					if existingSymbol.Kind == ast.SymbolUnbound || existingSymbol.Kind == ast.SymbolHoisted ||
+						(existingSymbol.Kind.IsFunction() && (s.Kind == js_ast.ScopeEntry || s.Kind == js_ast.ScopeFunctionBody)) {
+						// Silently merge this symbol into the existing symbol
+						symbol.Link = existingMember.Ref
+						s.Members[symbol.OriginalName] = existingMember
+						continue nextMember
+					}
+
+					// Otherwise if this isn't a catch identifier or "arguments", it's a collision
+					if existingSymbol.Kind != ast.SymbolCatchIdentifier && existingSymbol.Kind != ast.SymbolArguments {
+						// An identifier binding from a catch statement and a function
+						// declaration can both silently shadow another hoisted symbol
+						if symbol.Kind != ast.SymbolCatchIdentifier && symbol.Kind != ast.SymbolHoistedFunction {
+							if !isSloppyModeBlockLevelFnStmt {
+								p.addSymbolAlreadyDeclaredError(symbol.OriginalName, member.Loc, existingMember.Loc)
+							} else if s == scope.Parent {
+								// Never mind about this, turns out it's not needed after all
+								delete(p.hoistedRefForSloppyModeBlockFn, originalMemberRef)
+							}
+						}
+						continue nextMember
+					}
+
+					// If this is a catch identifier, silently merge the existing symbol
+					// into this symbol but continue hoisting past this catch scope
+					existingSymbol.Link = member.Ref
+					s.Members[symbol.OriginalName] = member
+				}
+
+				if s.Kind.StopsHoisting() {
+					// Declare the member in the scope that stopped the hoisting
+					s.Members[symbol.OriginalName] = member
+					break
+				}
+				s = s.Parent
+			}
+		}
+	}
+
+	for _, child := range scope.Children {
+		p.hoistSymbols(child)
+	}
+}
+
+func (p *parser) declareBinding(kind ast.SymbolKind, binding js_ast.Binding, opts parseStmtOpts) {
+	js_ast.ForEachIdentifierBinding(binding, func(loc logger.Loc, b *js_ast.BIdentifier) {
+		if !opts.isTypeScriptDeclare || (opts.isNamespaceScope && opts.isExport) {
+			b.Ref = p.declareSymbol(kind, loc, p.loadNameFromRef(b.Ref))
+		}
+	})
+}
+
+func (p *parser) recordUsage(ref ast.Ref) {
+	// The use count stored in the symbol is used for generating symbol names
+	// during minification. These counts shouldn't include references inside dead
+	// code regions since those will be culled.
+	if !p.isControlFlowDead {
+		p.symbols[ref.InnerIndex].UseCountEstimate++
+		use := p.symbolUses[ref]
+		use.CountEstimate++
+		p.symbolUses[ref] = use
+	}
+
+	// The correctness of TypeScript-to-JavaScript conversion relies on accurate
+	// symbol use counts for the whole file, including dead code regions. This is
+	// tracked separately in a parser-only data structure.
+	if p.options.ts.Parse {
+		p.tsUseCounts[ref.InnerIndex]++
+	}
+}
+
+func (p *parser) ignoreUsage(ref ast.Ref) {
+	// Roll back the use count increment in recordUsage()
+	if !p.isControlFlowDead {
+		p.symbols[ref.InnerIndex].UseCountEstimate--
+		use := p.symbolUses[ref]
+		use.CountEstimate--
+		if use.CountEstimate == 0 {
+			delete(p.symbolUses, ref)
+		} else {
+			p.symbolUses[ref] = use
+		}
+	}
+
+	// Don't roll back the "tsUseCounts" increment. This must be counted even if
+	// the value is ignored because that's what the TypeScript compiler does.
+}
+
+func (p *parser) ignoreUsageOfIdentifierInDotChain(expr js_ast.Expr) {
+	for {
+		switch e := expr.Data.(type) {
+		case *js_ast.EIdentifier:
+			p.ignoreUsage(e.Ref)
+
+		case *js_ast.EDot:
+			expr = e.Target
+			continue
+
+		case *js_ast.EIndex:
+			if _, ok := e.Index.Data.(*js_ast.EString); ok {
+				expr = e.Target
+				continue
+			}
+		}
+
+		return
+	}
+}
+
+func (p *parser) importFromRuntime(loc logger.Loc, name string) js_ast.Expr {
+	it, ok := p.runtimeImports[name]
+	if !ok {
+		it.Loc = loc
+		it.Ref = p.newSymbol(ast.SymbolOther, name)
+		p.moduleScope.Generated = append(p.moduleScope.Generated, it.Ref)
+		p.runtimeImports[name] = it
+	}
+	p.recordUsage(it.Ref)
+	return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: it.Ref}}
+}
+
+func (p *parser) callRuntime(loc logger.Loc, name string, args []js_ast.Expr) js_ast.Expr {
+	return js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+		Target: p.importFromRuntime(loc, name),
+		Args:   args,
+	}}
+}
+
+type JSXImport uint8
+
+const (
+	JSXImportJSX JSXImport = iota
+	JSXImportJSXS
+	JSXImportFragment
+	JSXImportCreateElement
+)
+
+func (p *parser) importJSXSymbol(loc logger.Loc, jsx JSXImport) js_ast.Expr {
+	var symbols map[string]ast.LocRef
+	var name string
+
+	switch jsx {
+	case JSXImportJSX:
+		symbols = p.jsxRuntimeImports
+		if p.options.jsx.Development {
+			name = "jsxDEV"
+		} else {
+			name = "jsx"
+		}
+
+	case JSXImportJSXS:
+		symbols = p.jsxRuntimeImports
+		if p.options.jsx.Development {
+			name = "jsxDEV"
+		} else {
+			name = "jsxs"
+		}
+
+	case JSXImportFragment:
+		symbols = p.jsxRuntimeImports
+		name = "Fragment"
+
+	case JSXImportCreateElement:
+		symbols = p.jsxLegacyImports
+		name = "createElement"
+	}
+
+	it, ok := symbols[name]
+	if !ok {
+		it.Loc = loc
+		it.Ref = p.newSymbol(ast.SymbolOther, name)
+		p.moduleScope.Generated = append(p.moduleScope.Generated, it.Ref)
+		p.isImportItem[it.Ref] = true
+		symbols[name] = it
+	}
+
+	p.recordUsage(it.Ref)
+	return p.handleIdentifier(loc, &js_ast.EIdentifier{Ref: it.Ref}, identifierOpts{
+		wasOriginallyIdentifier: true,
+	})
+}
+
+func (p *parser) valueToSubstituteForRequire(loc logger.Loc) js_ast.Expr {
+	if p.source.Index != runtime.SourceIndex &&
+		config.ShouldCallRuntimeRequire(p.options.mode, p.options.outputFormat) {
+		return p.importFromRuntime(loc, "__require")
+	}
+
+	p.recordUsage(p.requireRef)
+	return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: p.requireRef}}
+}
+
+func (p *parser) makePromiseRef() ast.Ref {
+	if p.promiseRef == ast.InvalidRef {
+		p.promiseRef = p.newSymbol(ast.SymbolUnbound, "Promise")
+	}
+	return p.promiseRef
+}
+
+func (p *parser) makeRegExpRef() ast.Ref {
+	if p.regExpRef == ast.InvalidRef {
+		p.regExpRef = p.newSymbol(ast.SymbolUnbound, "RegExp")
+		p.moduleScope.Generated = append(p.moduleScope.Generated, p.regExpRef)
+	}
+	return p.regExpRef
+}
+
+// The name is temporarily stored in the ref until the scope traversal pass
+// happens, at which point a symbol will be generated and the ref will point
+// to the symbol instead.
+//
+// The scope traversal pass will reconstruct the name using one of two methods.
+// In the common case, the name is a slice of the file itself. In that case we
+// can just store the slice and not need to allocate any extra memory. In the
+// rare case, the name is an externally-allocated string. In that case we store
+// an index to the string and use that index during the scope traversal pass.
+func (p *parser) storeNameInRef(name js_lexer.MaybeSubstring) ast.Ref {
+	// Is the data in "name" a subset of the data in "p.source.Contents"?
+	if name.Start.IsValid() {
+		// The name is a slice of the file contents, so we can just reference it by
+		// length and don't have to allocate anything. This is the common case.
+		//
+		// It's stored as a negative value so we'll crash if we try to use it. That
+		// way we'll catch cases where we've forgotten to call loadNameFromRef().
+		// The length is the negative part because we know it's non-zero.
+		return ast.Ref{SourceIndex: -uint32(len(name.String)), InnerIndex: uint32(name.Start.GetIndex())}
+	} else {
+		// The name is some memory allocated elsewhere. This is either an inline
+		// string constant in the parser or an identifier with escape sequences
+		// in the source code, which is very unusual. Stash it away for later.
+		// This uses allocations but it should hopefully be very uncommon.
+		ref := ast.Ref{SourceIndex: 0x80000000, InnerIndex: uint32(len(p.allocatedNames))}
+		p.allocatedNames = append(p.allocatedNames, name.String)
+		return ref
+	}
+}
+
+// This is the inverse of storeNameInRef() above
+func (p *parser) loadNameFromRef(ref ast.Ref) string {
+	if ref.SourceIndex == 0x80000000 {
+		return p.allocatedNames[ref.InnerIndex]
+	} else {
+		if (ref.SourceIndex & 0x80000000) == 0 {
+			panic("Internal error: invalid symbol reference")
+		}
+		return p.source.Contents[ref.InnerIndex : int32(ref.InnerIndex)-int32(ref.SourceIndex)]
+	}
+}
+
+// Due to ES6 destructuring patterns, there are many cases where it's
+// impossible to distinguish between an array or object literal and a
+// destructuring assignment until we hit the "=" operator later on.
+// This object defers errors about being in one state or the other
+// until we discover which state we're in.
+type deferredErrors struct {
+	// These are errors for expressions
+	invalidExprDefaultValue  logger.Range
+	invalidExprAfterQuestion logger.Range
+	arraySpreadFeature       logger.Range
+
+	// These errors are for arrow functions
+	invalidParens []logger.Range
+}
+
+func (from *deferredErrors) mergeInto(to *deferredErrors) {
+	if from.invalidExprDefaultValue.Len > 0 {
+		to.invalidExprDefaultValue = from.invalidExprDefaultValue
+	}
+	if from.invalidExprAfterQuestion.Len > 0 {
+		to.invalidExprAfterQuestion = from.invalidExprAfterQuestion
+	}
+	if from.arraySpreadFeature.Len > 0 {
+		to.arraySpreadFeature = from.arraySpreadFeature
+	}
+	if len(from.invalidParens) > 0 {
+		if len(to.invalidParens) > 0 {
+			to.invalidParens = append(to.invalidParens, from.invalidParens...)
+		} else {
+			to.invalidParens = from.invalidParens
+		}
+	}
+}
+
+func (p *parser) logExprErrors(errors *deferredErrors) {
+	if errors.invalidExprDefaultValue.Len > 0 {
+		p.log.AddError(&p.tracker, errors.invalidExprDefaultValue, "Unexpected \"=\"")
+	}
+
+	if errors.invalidExprAfterQuestion.Len > 0 {
+		r := errors.invalidExprAfterQuestion
+		p.log.AddError(&p.tracker, r, fmt.Sprintf("Unexpected %q", p.source.Contents[r.Loc.Start:r.Loc.Start+r.Len]))
+	}
+
+	if errors.arraySpreadFeature.Len > 0 {
+		p.markSyntaxFeature(compat.ArraySpread, errors.arraySpreadFeature)
+	}
+}
+
+func (p *parser) logDeferredArrowArgErrors(errors *deferredErrors) {
+	for _, paren := range errors.invalidParens {
+		p.log.AddError(&p.tracker, paren, "Invalid binding pattern")
+	}
+}
+
+func (p *parser) logNullishCoalescingErrorPrecedenceError(op string) {
+	prevOp := "??"
+	if p.lexer.Token == js_lexer.TQuestionQuestion {
+		op, prevOp = prevOp, op
+	}
+	// p.log.AddError(&p.tracker, p.lexer.Range(), fmt.Sprintf("The %q operator requires parentheses"))
+	p.log.AddErrorWithNotes(&p.tracker, p.lexer.Range(), fmt.Sprintf("Cannot use %q with %q without parentheses", op, prevOp),
+		[]logger.MsgData{{Text: fmt.Sprintf("Expressions of the form \"x %s y %s z\" are not allowed in JavaScript. "+
+			"You must disambiguate between \"(x %s y) %s z\" and \"x %s (y %s z)\" by adding parentheses.", prevOp, op, prevOp, op, prevOp, op)}})
+}
+
+func defineValueCanBeUsedInAssignTarget(data js_ast.E) bool {
+	switch data.(type) {
+	case *js_ast.EIdentifier, *js_ast.EDot:
+		return true
+	}
+
+	// Substituting a constant into an assignment target (e.g. "x = 1" becomes
+	// "0 = 1") will cause a syntax error, so we avoid doing this. The caller
+	// will log a warning instead.
+	return false
+}
+
+func (p *parser) logAssignToDefine(r logger.Range, name string, expr js_ast.Expr) {
+	// If this is a compound expression, pretty-print it for the error message.
+	// We don't use a literal slice of the source text in case it contains
+	// problematic things (e.g. spans multiple lines, has embedded comments).
+	if expr.Data != nil {
+		var parts []string
+		for {
+			if id, ok := expr.Data.(*js_ast.EIdentifier); ok {
+				parts = append(parts, p.loadNameFromRef(id.Ref))
+				break
+			} else if dot, ok := expr.Data.(*js_ast.EDot); ok {
+				parts = append(parts, dot.Name)
+				parts = append(parts, ".")
+				expr = dot.Target
+			} else if index, ok := expr.Data.(*js_ast.EIndex); ok {
+				if str, ok := index.Index.Data.(*js_ast.EString); ok {
+					parts = append(parts, "]")
+					parts = append(parts, string(helpers.QuoteSingle(helpers.UTF16ToString(str.Value), false)))
+					parts = append(parts, "[")
+					expr = index.Target
+				} else {
+					return
+				}
+			} else {
+				return
+			}
+		}
+		for i, j := 0, len(parts)-1; i < j; i, j = i+1, j-1 {
+			parts[i], parts[j] = parts[j], parts[i]
+		}
+		name = strings.Join(parts, "")
+	}
+
+	kind := logger.Warning
+	if p.suppressWarningsAboutWeirdCode {
+		kind = logger.Debug
+	}
+
+	p.log.AddIDWithNotes(logger.MsgID_JS_AssignToDefine, kind, &p.tracker, r,
+		fmt.Sprintf("Suspicious assignment to defined constant %q", name),
+		[]logger.MsgData{{Text: fmt.Sprintf(
+			"The expression %q has been configured to be replaced with a constant using the \"define\" feature. "+
+				"If this expression is supposed to be a compile-time constant, then it doesn't make sense to assign to it here. "+
+				"Or if this expression is supposed to change at run-time, this \"define\" substitution should be removed.", name)}})
+}
+
+// The "await" and "yield" expressions are never allowed in argument lists but
+// may or may not be allowed otherwise depending on the details of the enclosing
+// function or module. This needs to be handled when parsing an arrow function
+// argument list because we don't know if these expressions are not allowed until
+// we reach the "=>" token (or discover the absence of one).
+//
+// Specifically, for await:
+//
+//	// This is ok
+//	async function foo() { (x = await y) }
+//
+//	// This is an error
+//	async function foo() { (x = await y) => {} }
+//
+// And for yield:
+//
+//	// This is ok
+//	function* foo() { (x = yield y) }
+//
+//	// This is an error
+//	function* foo() { (x = yield y) => {} }
+type deferredArrowArgErrors struct {
+	invalidExprAwait logger.Range
+	invalidExprYield logger.Range
+}
+
+func (p *parser) logArrowArgErrors(errors *deferredArrowArgErrors) {
+	if errors.invalidExprAwait.Len > 0 {
+		p.log.AddError(&p.tracker, errors.invalidExprAwait, "Cannot use an \"await\" expression here:")
+	}
+
+	if errors.invalidExprYield.Len > 0 {
+		p.log.AddError(&p.tracker, errors.invalidExprYield, "Cannot use a \"yield\" expression here:")
+	}
+}
+
+func (p *parser) keyNameForError(key js_ast.Expr) string {
+	switch k := key.Data.(type) {
+	case *js_ast.EString:
+		return fmt.Sprintf("%q", helpers.UTF16ToString(k.Value))
+	case *js_ast.EPrivateIdentifier:
+		return fmt.Sprintf("%q", p.loadNameFromRef(k.Ref))
+	}
+	return "property"
+}
+
+func (p *parser) checkForLegacyOctalLiteral(e js_ast.E) {
+	if p.lexer.IsLegacyOctalLiteral {
+		if p.legacyOctalLiterals == nil {
+			p.legacyOctalLiterals = make(map[js_ast.E]logger.Range)
+		}
+		p.legacyOctalLiterals[e] = p.lexer.Range()
+	}
+}
+
+func (p *parser) notesForAssertTypeJSON(record *ast.ImportRecord, alias string) []logger.MsgData {
+	return []logger.MsgData{p.tracker.MsgData(
+		js_lexer.RangeOfImportAssertOrWith(p.source, *ast.FindAssertOrWithEntry(record.AssertOrWith.Entries, "type"), js_lexer.KeyAndValueRange),
+		"The JSON import assertion is here:"),
+		{Text: fmt.Sprintf("You can either keep the import assertion and only use the \"default\" import, "+
+			"or you can remove the import assertion and use the %q import.", alias)}}
+}
+
+// This assumes the caller has already checked for TStringLiteral or TNoSubstitutionTemplateLiteral
+func (p *parser) parseStringLiteral() js_ast.Expr {
+	var legacyOctalLoc logger.Loc
+	loc := p.lexer.Loc()
+	text := p.lexer.StringLiteral()
+
+	// Enable using a "/* @__KEY__ */" comment to turn a string into a key
+	hasPropertyKeyComment := (p.lexer.HasCommentBefore & js_lexer.KeyCommentBefore) != 0
+	if hasPropertyKeyComment {
+		if name := helpers.UTF16ToString(text); p.isMangledProp(name) {
+			value := js_ast.Expr{Loc: loc, Data: &js_ast.ENameOfSymbol{
+				Ref:                   p.storeNameInRef(js_lexer.MaybeSubstring{String: name}),
+				HasPropertyKeyComment: true,
+			}}
+			p.lexer.Next()
+			return value
+		}
+	}
+
+	if p.lexer.LegacyOctalLoc.Start > loc.Start {
+		legacyOctalLoc = p.lexer.LegacyOctalLoc
+	}
+	value := js_ast.Expr{Loc: loc, Data: &js_ast.EString{
+		Value:                 text,
+		LegacyOctalLoc:        legacyOctalLoc,
+		PreferTemplate:        p.lexer.Token == js_lexer.TNoSubstitutionTemplateLiteral,
+		HasPropertyKeyComment: hasPropertyKeyComment,
+	}}
+	p.lexer.Next()
+	return value
+}
+
+type propertyOpts struct {
+	decorators       []js_ast.Decorator
+	decoratorScope   *js_ast.Scope
+	decoratorContext decoratorContextFlags
+
+	asyncRange     logger.Range
+	generatorRange logger.Range
+	tsDeclareRange logger.Range
+	classKeyword   logger.Range
+	isAsync        bool
+	isGenerator    bool
+
+	// Class-related options
+	isStatic        bool
+	isTSAbstract    bool
+	isClass         bool
+	classHasExtends bool
+}
+
+func (p *parser) parseProperty(startLoc logger.Loc, kind js_ast.PropertyKind, opts propertyOpts, errors *deferredErrors) (js_ast.Property, bool) {
+	var flags js_ast.PropertyFlags
+	var key js_ast.Expr
+	var closeBracketLoc logger.Loc
+	keyRange := p.lexer.Range()
+
+	switch p.lexer.Token {
+	case js_lexer.TNumericLiteral:
+		key = js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.ENumber{Value: p.lexer.Number}}
+		p.checkForLegacyOctalLiteral(key.Data)
+		p.lexer.Next()
+
+	case js_lexer.TStringLiteral:
+		key = p.parseStringLiteral()
+		if !p.options.minifySyntax {
+			flags |= js_ast.PropertyPreferQuotedKey
+		}
+
+	case js_lexer.TBigIntegerLiteral:
+		key = js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.EBigInt{Value: p.lexer.Identifier.String}}
+		p.markSyntaxFeature(compat.Bigint, p.lexer.Range())
+		p.lexer.Next()
+
+	case js_lexer.TPrivateIdentifier:
+		if p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators == config.True && len(opts.decorators) > 0 {
+			p.log.AddError(&p.tracker, p.lexer.Range(), "TypeScript experimental decorators cannot be used on private identifiers")
+		} else if !opts.isClass {
+			p.lexer.Expected(js_lexer.TIdentifier)
+		} else if opts.tsDeclareRange.Len != 0 {
+			p.log.AddError(&p.tracker, opts.tsDeclareRange, "\"declare\" cannot be used with a private identifier")
+		}
+		name := p.lexer.Identifier
+		key = js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.EPrivateIdentifier{Ref: p.storeNameInRef(name)}}
+		p.reportPrivateNameUsage(name.String)
+		p.lexer.Next()
+
+	case js_lexer.TOpenBracket:
+		flags |= js_ast.PropertyIsComputed
+		p.markSyntaxFeature(compat.ObjectExtensions, p.lexer.Range())
+		p.lexer.Next()
+		wasIdentifier := p.lexer.Token == js_lexer.TIdentifier
+		expr := p.parseExpr(js_ast.LComma)
+
+		// Handle index signatures
+		if p.options.ts.Parse && p.lexer.Token == js_lexer.TColon && wasIdentifier && opts.isClass {
+			if _, ok := expr.Data.(*js_ast.EIdentifier); ok {
+				if opts.tsDeclareRange.Len != 0 {
+					p.log.AddError(&p.tracker, opts.tsDeclareRange, "\"declare\" cannot be used with an index signature")
+				}
+
+				// "[key: string]: any;"
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+				p.lexer.Expect(js_lexer.TCloseBracket)
+				p.lexer.Expect(js_lexer.TColon)
+				p.skipTypeScriptType(js_ast.LLowest)
+				p.lexer.ExpectOrInsertSemicolon()
+
+				// Skip this property entirely
+				return js_ast.Property{}, false
+			}
+		}
+
+		closeBracketLoc = p.saveExprCommentsHere()
+		p.lexer.Expect(js_lexer.TCloseBracket)
+		key = expr
+
+	case js_lexer.TAsterisk:
+		if kind != js_ast.PropertyField && (kind != js_ast.PropertyMethod || opts.isGenerator) {
+			p.lexer.Unexpected()
+		}
+		opts.isGenerator = true
+		opts.generatorRange = p.lexer.Range()
+		p.lexer.Next()
+		return p.parseProperty(startLoc, js_ast.PropertyMethod, opts, errors)
+
+	default:
+		name := p.lexer.Identifier
+		raw := p.lexer.Raw()
+		nameRange := p.lexer.Range()
+		if !p.lexer.IsIdentifierOrKeyword() {
+			p.lexer.Expect(js_lexer.TIdentifier)
+		}
+		p.lexer.Next()
+
+		// Support contextual keywords
+		if kind == js_ast.PropertyField {
+			// Does the following token look like a key?
+			couldBeModifierKeyword := p.lexer.IsIdentifierOrKeyword()
+			if !couldBeModifierKeyword {
+				switch p.lexer.Token {
+				case js_lexer.TOpenBracket, js_lexer.TNumericLiteral, js_lexer.TStringLiteral,
+					js_lexer.TAsterisk, js_lexer.TPrivateIdentifier:
+					couldBeModifierKeyword = true
+				}
+			}
+
+			// If so, check for a modifier keyword
+			if couldBeModifierKeyword {
+				switch name.String {
+				case "get":
+					if !opts.isAsync && raw == name.String {
+						p.markSyntaxFeature(compat.ObjectAccessors, nameRange)
+						return p.parseProperty(startLoc, js_ast.PropertyGetter, opts, nil)
+					}
+
+				case "set":
+					if !opts.isAsync && raw == name.String {
+						p.markSyntaxFeature(compat.ObjectAccessors, nameRange)
+						return p.parseProperty(startLoc, js_ast.PropertySetter, opts, nil)
+					}
+
+				case "accessor":
+					if !p.lexer.HasNewlineBefore && !opts.isAsync && opts.isClass && raw == name.String {
+						return p.parseProperty(startLoc, js_ast.PropertyAutoAccessor, opts, nil)
+					}
+
+				case "async":
+					if !p.lexer.HasNewlineBefore && !opts.isAsync && raw == name.String {
+						opts.isAsync = true
+						opts.asyncRange = nameRange
+						return p.parseProperty(startLoc, js_ast.PropertyMethod, opts, nil)
+					}
+
+				case "static":
+					if !opts.isStatic && !opts.isAsync && opts.isClass && raw == name.String {
+						opts.isStatic = true
+						return p.parseProperty(startLoc, kind, opts, nil)
+					}
+
+				case "declare":
+					if !p.lexer.HasNewlineBefore && opts.isClass && p.options.ts.Parse && opts.tsDeclareRange.Len == 0 && raw == name.String {
+						opts.tsDeclareRange = nameRange
+						scopeIndex := len(p.scopesInOrder)
+
+						if prop, ok := p.parseProperty(startLoc, kind, opts, nil); ok &&
+							prop.Kind == js_ast.PropertyField && prop.ValueOrNil.Data == nil &&
+							(p.options.ts.Config.ExperimentalDecorators == config.True && len(opts.decorators) > 0) {
+							// If this is a well-formed class field with the "declare" keyword,
+							// only keep the declaration to preserve its side-effects when
+							// there are TypeScript experimental decorators present:
+							//
+							//   class Foo {
+							//     // Remove this
+							//     declare [(console.log('side effect 1'), 'foo')]
+							//
+							//     // Keep this
+							//     @decorator(console.log('side effect 2')) declare bar
+							//   }
+							//
+							// This behavior is surprisingly somehow valid with TypeScript
+							// experimental decorators, which was possibly by accident.
+							// TypeScript does not allow this with JavaScript decorators.
+							//
+							// References:
+							//
+							//   https://github.com/evanw/esbuild/issues/1675
+							//   https://github.com/microsoft/TypeScript/issues/46345
+							//
+							prop.Kind = js_ast.PropertyDeclareOrAbstract
+							return prop, true
+						}
+
+						p.discardScopesUpTo(scopeIndex)
+						return js_ast.Property{}, false
+					}
+
+				case "abstract":
+					if !p.lexer.HasNewlineBefore && opts.isClass && p.options.ts.Parse && !opts.isTSAbstract && raw == name.String {
+						opts.isTSAbstract = true
+						scopeIndex := len(p.scopesInOrder)
+
+						if prop, ok := p.parseProperty(startLoc, kind, opts, nil); ok &&
+							prop.Kind == js_ast.PropertyField && prop.ValueOrNil.Data == nil &&
+							(p.options.ts.Config.ExperimentalDecorators == config.True && len(opts.decorators) > 0) {
+							// If this is a well-formed class field with the "abstract" keyword,
+							// only keep the declaration to preserve its side-effects when
+							// there are TypeScript experimental decorators present:
+							//
+							//   abstract class Foo {
+							//     // Remove this
+							//     abstract [(console.log('side effect 1'), 'foo')]
+							//
+							//     // Keep this
+							//     @decorator(console.log('side effect 2')) abstract bar
+							//   }
+							//
+							// This behavior is valid with TypeScript experimental decorators.
+							// TypeScript does not allow this with JavaScript decorators.
+							//
+							// References:
+							//
+							//   https://github.com/evanw/esbuild/issues/3684
+							//
+							prop.Kind = js_ast.PropertyDeclareOrAbstract
+							return prop, true
+						}
+
+						p.discardScopesUpTo(scopeIndex)
+						return js_ast.Property{}, false
+					}
+
+				case "private", "protected", "public", "readonly", "override":
+					// Skip over TypeScript keywords
+					if opts.isClass && p.options.ts.Parse && raw == name.String {
+						return p.parseProperty(startLoc, kind, opts, nil)
+					}
+				}
+			} else if p.lexer.Token == js_lexer.TOpenBrace && name.String == "static" && len(opts.decorators) == 0 {
+				loc := p.lexer.Loc()
+				p.lexer.Next()
+
+				oldFnOrArrowDataParse := p.fnOrArrowDataParse
+				p.fnOrArrowDataParse = fnOrArrowDataParse{
+					isReturnDisallowed: true,
+					allowSuperProperty: true,
+					await:              forbidAll,
+				}
+
+				p.pushScopeForParsePass(js_ast.ScopeClassStaticInit, loc)
+				stmts := p.parseStmtsUpTo(js_lexer.TCloseBrace, parseStmtOpts{})
+				p.popScope()
+
+				p.fnOrArrowDataParse = oldFnOrArrowDataParse
+
+				closeBraceLoc := p.lexer.Loc()
+				p.lexer.Expect(js_lexer.TCloseBrace)
+				return js_ast.Property{
+					Kind: js_ast.PropertyClassStaticBlock,
+					Loc:  startLoc,
+					ClassStaticBlock: &js_ast.ClassStaticBlock{
+						Loc:   loc,
+						Block: js_ast.SBlock{Stmts: stmts, CloseBraceLoc: closeBraceLoc},
+					},
+				}, true
+			}
+		}
+
+		if p.isMangledProp(name.String) {
+			key = js_ast.Expr{Loc: nameRange.Loc, Data: &js_ast.ENameOfSymbol{Ref: p.storeNameInRef(name)}}
+		} else {
+			key = js_ast.Expr{Loc: nameRange.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(name.String)}}
+		}
+
+		// Parse a shorthand property
+		if !opts.isClass && kind == js_ast.PropertyField && p.lexer.Token != js_lexer.TColon &&
+			p.lexer.Token != js_lexer.TOpenParen && p.lexer.Token != js_lexer.TLessThan &&
+			js_lexer.Keywords[name.String] == js_lexer.T(0) {
+
+			// Forbid invalid identifiers
+			if (p.fnOrArrowDataParse.await != allowIdent && name.String == "await") ||
+				(p.fnOrArrowDataParse.yield != allowIdent && name.String == "yield") {
+				p.log.AddError(&p.tracker, nameRange, fmt.Sprintf("Cannot use %q as an identifier here:", name.String))
+			}
+
+			ref := p.storeNameInRef(name)
+			value := js_ast.Expr{Loc: key.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
+
+			// Destructuring patterns have an optional default value
+			var initializerOrNil js_ast.Expr
+			if errors != nil && p.lexer.Token == js_lexer.TEquals {
+				errors.invalidExprDefaultValue = p.lexer.Range()
+				p.lexer.Next()
+				initializerOrNil = p.parseExpr(js_ast.LComma)
+			}
+
+			return js_ast.Property{
+				Kind:             kind,
+				Loc:              startLoc,
+				Key:              key,
+				ValueOrNil:       value,
+				InitializerOrNil: initializerOrNil,
+				Flags:            js_ast.PropertyWasShorthand,
+			}, true
+		}
+	}
+
+	hasTypeParameters := false
+	hasDefiniteAssignmentAssertionOperator := false
+
+	if p.options.ts.Parse {
+		if opts.isClass {
+			if p.lexer.Token == js_lexer.TQuestion {
+				// "class X { foo?: number }"
+				// "class X { foo?(): number }"
+				p.lexer.Next()
+			} else if p.lexer.Token == js_lexer.TExclamation && !p.lexer.HasNewlineBefore &&
+				(kind == js_ast.PropertyField || kind == js_ast.PropertyAutoAccessor) {
+				// "class X { foo!: number }"
+				p.lexer.Next()
+				hasDefiniteAssignmentAssertionOperator = true
+			}
+		}
+
+		// "class X { foo?<T>(): T }"
+		// "const x = { foo<T>(): T {} }"
+		if !hasDefiniteAssignmentAssertionOperator && kind != js_ast.PropertyAutoAccessor {
+			hasTypeParameters = p.skipTypeScriptTypeParameters(allowConstModifier) != didNotSkipAnything
+		}
+	}
+
+	// Parse a class field with an optional initial value
+	if kind == js_ast.PropertyAutoAccessor || (opts.isClass && kind == js_ast.PropertyField &&
+		!hasTypeParameters && (p.lexer.Token != js_lexer.TOpenParen || hasDefiniteAssignmentAssertionOperator)) {
+		var initializerOrNil js_ast.Expr
+
+		// Forbid the names "constructor" and "prototype" in some cases
+		if !flags.Has(js_ast.PropertyIsComputed) {
+			if str, ok := key.Data.(*js_ast.EString); ok && (helpers.UTF16EqualsString(str.Value, "constructor") ||
+				(opts.isStatic && helpers.UTF16EqualsString(str.Value, "prototype"))) {
+				p.log.AddError(&p.tracker, keyRange, fmt.Sprintf("Invalid field name %q", helpers.UTF16ToString(str.Value)))
+			}
+		}
+
+		// Skip over types
+		if p.options.ts.Parse && p.lexer.Token == js_lexer.TColon {
+			p.lexer.Next()
+			p.skipTypeScriptType(js_ast.LLowest)
+		}
+
+		if p.lexer.Token == js_lexer.TEquals {
+			p.lexer.Next()
+
+			// "this" and "super" property access is allowed in field initializers
+			oldIsThisDisallowed := p.fnOrArrowDataParse.isThisDisallowed
+			oldAllowSuperProperty := p.fnOrArrowDataParse.allowSuperProperty
+			p.fnOrArrowDataParse.isThisDisallowed = false
+			p.fnOrArrowDataParse.allowSuperProperty = true
+
+			initializerOrNil = p.parseExpr(js_ast.LComma)
+
+			p.fnOrArrowDataParse.isThisDisallowed = oldIsThisDisallowed
+			p.fnOrArrowDataParse.allowSuperProperty = oldAllowSuperProperty
+		}
+
+		// Special-case private identifiers
+		if private, ok := key.Data.(*js_ast.EPrivateIdentifier); ok {
+			name := p.loadNameFromRef(private.Ref)
+			if name == "#constructor" {
+				p.log.AddError(&p.tracker, keyRange, fmt.Sprintf("Invalid field name %q", name))
+			}
+			var declare ast.SymbolKind
+			if kind == js_ast.PropertyAutoAccessor {
+				if opts.isStatic {
+					declare = ast.SymbolPrivateStaticGetSetPair
+				} else {
+					declare = ast.SymbolPrivateGetSetPair
+				}
+				private.Ref = p.declareSymbol(declare, key.Loc, name)
+				p.privateGetters[private.Ref] = p.newSymbol(ast.SymbolOther, name[1:]+"_get")
+				p.privateSetters[private.Ref] = p.newSymbol(ast.SymbolOther, name[1:]+"_set")
+			} else {
+				if opts.isStatic {
+					declare = ast.SymbolPrivateStaticField
+				} else {
+					declare = ast.SymbolPrivateField
+				}
+				private.Ref = p.declareSymbol(declare, key.Loc, name)
+			}
+		}
+
+		p.lexer.ExpectOrInsertSemicolon()
+		if opts.isStatic {
+			flags |= js_ast.PropertyIsStatic
+		}
+		return js_ast.Property{
+			Decorators:       opts.decorators,
+			Loc:              startLoc,
+			Kind:             kind,
+			Flags:            flags,
+			Key:              key,
+			InitializerOrNil: initializerOrNil,
+			CloseBracketLoc:  closeBracketLoc,
+		}, true
+	}
+
+	// Parse a method expression
+	if p.lexer.Token == js_lexer.TOpenParen || kind.IsMethodDefinition() || opts.isClass {
+		hasError := false
+
+		if !hasError && opts.tsDeclareRange.Len != 0 {
+			what := "method"
+			if kind == js_ast.PropertyGetter {
+				what = "getter"
+			} else if kind == js_ast.PropertySetter {
+				what = "setter"
+			}
+			p.log.AddError(&p.tracker, opts.tsDeclareRange, "\"declare\" cannot be used with a "+what)
+			hasError = true
+		}
+
+		if opts.isAsync && p.markAsyncFn(opts.asyncRange, opts.isGenerator) {
+			hasError = true
+		}
+
+		if !hasError && opts.isGenerator && p.markSyntaxFeature(compat.Generator, opts.generatorRange) {
+			hasError = true
+		}
+
+		if !hasError && p.lexer.Token == js_lexer.TOpenParen && kind != js_ast.PropertyGetter && kind != js_ast.PropertySetter && p.markSyntaxFeature(compat.ObjectExtensions, p.lexer.Range()) {
+			hasError = true
+		}
+
+		loc := p.lexer.Loc()
+		scopeIndex := p.pushScopeForParsePass(js_ast.ScopeFunctionArgs, loc)
+		isConstructor := false
+
+		// Forbid the names "constructor" and "prototype" in some cases
+		if opts.isClass && !flags.Has(js_ast.PropertyIsComputed) {
+			if str, ok := key.Data.(*js_ast.EString); ok {
+				if !opts.isStatic && helpers.UTF16EqualsString(str.Value, "constructor") {
+					switch {
+					case kind == js_ast.PropertyGetter:
+						p.log.AddError(&p.tracker, keyRange, "Class constructor cannot be a getter")
+					case kind == js_ast.PropertySetter:
+						p.log.AddError(&p.tracker, keyRange, "Class constructor cannot be a setter")
+					case opts.isAsync:
+						p.log.AddError(&p.tracker, keyRange, "Class constructor cannot be an async function")
+					case opts.isGenerator:
+						p.log.AddError(&p.tracker, keyRange, "Class constructor cannot be a generator")
+					default:
+						isConstructor = true
+					}
+				} else if opts.isStatic && helpers.UTF16EqualsString(str.Value, "prototype") {
+					p.log.AddError(&p.tracker, keyRange, "Invalid static method name \"prototype\"")
+				}
+			}
+		}
+
+		await := allowIdent
+		yield := allowIdent
+		if opts.isAsync {
+			await = allowExpr
+		}
+		if opts.isGenerator {
+			yield = allowExpr
+		}
+
+		fn, hadBody := p.parseFn(nil, opts.classKeyword, opts.decoratorContext, fnOrArrowDataParse{
+			needsAsyncLoc:      key.Loc,
+			asyncRange:         opts.asyncRange,
+			await:              await,
+			yield:              yield,
+			allowSuperCall:     opts.classHasExtends && isConstructor,
+			allowSuperProperty: true,
+			decoratorScope:     opts.decoratorScope,
+			isConstructor:      isConstructor,
+
+			// Only allow omitting the body if we're parsing TypeScript class
+			allowMissingBodyForTypeScript: p.options.ts.Parse && opts.isClass,
+		})
+
+		// "class Foo { foo(): void; foo(): void {} }"
+		if !hadBody {
+			// Skip this property entirely
+			p.popAndDiscardScope(scopeIndex)
+			return js_ast.Property{}, false
+		}
+
+		p.popScope()
+		fn.IsUniqueFormalParameters = true
+		value := js_ast.Expr{Loc: loc, Data: &js_ast.EFunction{Fn: fn}}
+
+		// Enforce argument rules for accessors
+		switch kind {
+		case js_ast.PropertyGetter:
+			if len(fn.Args) > 0 {
+				r := js_lexer.RangeOfIdentifier(p.source, fn.Args[0].Binding.Loc)
+				p.log.AddError(&p.tracker, r, fmt.Sprintf("Getter %s must have zero arguments", p.keyNameForError(key)))
+			}
+
+		case js_ast.PropertySetter:
+			if len(fn.Args) != 1 {
+				r := js_lexer.RangeOfIdentifier(p.source, key.Loc)
+				if len(fn.Args) > 1 {
+					r = js_lexer.RangeOfIdentifier(p.source, fn.Args[1].Binding.Loc)
+				}
+				p.log.AddError(&p.tracker, r, fmt.Sprintf("Setter %s must have exactly one argument", p.keyNameForError(key)))
+			}
+
+		default:
+			kind = js_ast.PropertyMethod
+		}
+
+		// Special-case private identifiers
+		if private, ok := key.Data.(*js_ast.EPrivateIdentifier); ok {
+			var declare ast.SymbolKind
+			var suffix string
+			switch kind {
+			case js_ast.PropertyGetter:
+				if opts.isStatic {
+					declare = ast.SymbolPrivateStaticGet
+				} else {
+					declare = ast.SymbolPrivateGet
+				}
+				suffix = "_get"
+			case js_ast.PropertySetter:
+				if opts.isStatic {
+					declare = ast.SymbolPrivateStaticSet
+				} else {
+					declare = ast.SymbolPrivateSet
+				}
+				suffix = "_set"
+			default:
+				if opts.isStatic {
+					declare = ast.SymbolPrivateStaticMethod
+				} else {
+					declare = ast.SymbolPrivateMethod
+				}
+				suffix = "_fn"
+			}
+			name := p.loadNameFromRef(private.Ref)
+			if name == "#constructor" {
+				p.log.AddError(&p.tracker, keyRange, fmt.Sprintf("Invalid method name %q", name))
+			}
+			private.Ref = p.declareSymbol(declare, key.Loc, name)
+			methodRef := p.newSymbol(ast.SymbolOther, name[1:]+suffix)
+			if kind == js_ast.PropertySetter {
+				p.privateSetters[private.Ref] = methodRef
+			} else {
+				p.privateGetters[private.Ref] = methodRef
+			}
+		}
+
+		if opts.isStatic {
+			flags |= js_ast.PropertyIsStatic
+		}
+		return js_ast.Property{
+			Decorators:      opts.decorators,
+			Loc:             startLoc,
+			Kind:            kind,
+			Flags:           flags,
+			Key:             key,
+			ValueOrNil:      value,
+			CloseBracketLoc: closeBracketLoc,
+		}, true
+	}
+
+	// Parse an object key/value pair
+	p.lexer.Expect(js_lexer.TColon)
+	value := p.parseExprOrBindings(js_ast.LComma, errors)
+	return js_ast.Property{
+		Loc:             startLoc,
+		Kind:            kind,
+		Flags:           flags,
+		Key:             key,
+		ValueOrNil:      value,
+		CloseBracketLoc: closeBracketLoc,
+	}, true
+}
+
+func (p *parser) parsePropertyBinding() js_ast.PropertyBinding {
+	var key js_ast.Expr
+	var closeBracketLoc logger.Loc
+	isComputed := false
+	preferQuotedKey := false
+	loc := p.lexer.Loc()
+
+	switch p.lexer.Token {
+	case js_lexer.TDotDotDot:
+		p.lexer.Next()
+		value := js_ast.Binding{Loc: p.saveExprCommentsHere(), Data: &js_ast.BIdentifier{Ref: p.storeNameInRef(p.lexer.Identifier)}}
+		p.lexer.Expect(js_lexer.TIdentifier)
+		return js_ast.PropertyBinding{
+			Loc:      loc,
+			IsSpread: true,
+			Value:    value,
+		}
+
+	case js_lexer.TNumericLiteral:
+		key = js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.ENumber{Value: p.lexer.Number}}
+		p.checkForLegacyOctalLiteral(key.Data)
+		p.lexer.Next()
+
+	case js_lexer.TStringLiteral:
+		key = p.parseStringLiteral()
+		preferQuotedKey = !p.options.minifySyntax
+
+	case js_lexer.TBigIntegerLiteral:
+		key = js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.EBigInt{Value: p.lexer.Identifier.String}}
+		p.markSyntaxFeature(compat.Bigint, p.lexer.Range())
+		p.lexer.Next()
+
+	case js_lexer.TOpenBracket:
+		isComputed = true
+		p.lexer.Next()
+		key = p.parseExpr(js_ast.LComma)
+		closeBracketLoc = p.saveExprCommentsHere()
+		p.lexer.Expect(js_lexer.TCloseBracket)
+
+	default:
+		name := p.lexer.Identifier
+		nameRange := p.lexer.Range()
+		if !p.lexer.IsIdentifierOrKeyword() {
+			p.lexer.Expect(js_lexer.TIdentifier)
+		}
+		p.lexer.Next()
+		if p.isMangledProp(name.String) {
+			key = js_ast.Expr{Loc: nameRange.Loc, Data: &js_ast.ENameOfSymbol{Ref: p.storeNameInRef(name)}}
+		} else {
+			key = js_ast.Expr{Loc: nameRange.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(name.String)}}
+		}
+
+		if p.lexer.Token != js_lexer.TColon && p.lexer.Token != js_lexer.TOpenParen {
+			// Forbid invalid identifiers
+			if (p.fnOrArrowDataParse.await != allowIdent && name.String == "await") ||
+				(p.fnOrArrowDataParse.yield != allowIdent && name.String == "yield") {
+				p.log.AddError(&p.tracker, nameRange, fmt.Sprintf("Cannot use %q as an identifier here:", name.String))
+			}
+
+			ref := p.storeNameInRef(name)
+			value := js_ast.Binding{Loc: nameRange.Loc, Data: &js_ast.BIdentifier{Ref: ref}}
+
+			var defaultValueOrNil js_ast.Expr
+			if p.lexer.Token == js_lexer.TEquals {
+				p.lexer.Next()
+				defaultValueOrNil = p.parseExpr(js_ast.LComma)
+			}
+
+			return js_ast.PropertyBinding{
+				Loc:               loc,
+				Key:               key,
+				Value:             value,
+				DefaultValueOrNil: defaultValueOrNil,
+			}
+		}
+	}
+
+	p.lexer.Expect(js_lexer.TColon)
+	value := p.parseBinding(parseBindingOpts{})
+
+	var defaultValueOrNil js_ast.Expr
+	if p.lexer.Token == js_lexer.TEquals {
+		p.lexer.Next()
+		defaultValueOrNil = p.parseExpr(js_ast.LComma)
+	}
+
+	return js_ast.PropertyBinding{
+		Loc:               loc,
+		IsComputed:        isComputed,
+		PreferQuotedKey:   preferQuotedKey,
+		Key:               key,
+		Value:             value,
+		DefaultValueOrNil: defaultValueOrNil,
+		CloseBracketLoc:   closeBracketLoc,
+	}
+}
+
+// These properties have special semantics in JavaScript. They must not be
+// mangled or we could potentially fail to parse valid JavaScript syntax or
+// generate invalid JavaScript syntax as output.
+//
+// This list is only intended to contain properties specific to the JavaScript
+// language itself to avoid syntax errors in the generated output. It's not
+// intended to contain properties for JavaScript APIs. Those must be provided
+// by the user.
+var permanentReservedProps = map[string]bool{
+	"__proto__":   true,
+	"constructor": true,
+	"prototype":   true,
+}
+
+func (p *parser) isMangledProp(name string) bool {
+	if p.options.mangleProps == nil {
+		return false
+	}
+	if p.options.mangleProps.MatchString(name) && !permanentReservedProps[name] && (p.options.reserveProps == nil || !p.options.reserveProps.MatchString(name)) {
+		return true
+	}
+	reservedProps := p.reservedProps
+	if reservedProps == nil {
+		reservedProps = make(map[string]bool)
+		p.reservedProps = reservedProps
+	}
+	reservedProps[name] = true
+	return false
+}
+
+func (p *parser) symbolForMangledProp(name string) ast.Ref {
+	mangledProps := p.mangledProps
+	if mangledProps == nil {
+		mangledProps = make(map[string]ast.Ref)
+		p.mangledProps = mangledProps
+	}
+	ref, ok := mangledProps[name]
+	if !ok {
+		ref = p.newSymbol(ast.SymbolMangledProp, name)
+		mangledProps[name] = ref
+	}
+	if !p.isControlFlowDead {
+		p.symbols[ref.InnerIndex].UseCountEstimate++
+	}
+	return ref
+}
+
+type wasOriginallyDotOrIndex uint8
+
+const (
+	wasOriginallyDot wasOriginallyDotOrIndex = iota
+	wasOriginallyIndex
+)
+
+func (p *parser) dotOrMangledPropParse(
+	target js_ast.Expr,
+	name js_lexer.MaybeSubstring,
+	nameLoc logger.Loc,
+	optionalChain js_ast.OptionalChain,
+	original wasOriginallyDotOrIndex,
+) js_ast.E {
+	if (original != wasOriginallyIndex || p.options.mangleQuoted) && p.isMangledProp(name.String) {
+		return &js_ast.EIndex{
+			Target:        target,
+			Index:         js_ast.Expr{Loc: nameLoc, Data: &js_ast.ENameOfSymbol{Ref: p.storeNameInRef(name)}},
+			OptionalChain: optionalChain,
+		}
+	}
+
+	return &js_ast.EDot{
+		Target:        target,
+		Name:          name.String,
+		NameLoc:       nameLoc,
+		OptionalChain: optionalChain,
+	}
+}
+
+func (p *parser) dotOrMangledPropVisit(target js_ast.Expr, name string, nameLoc logger.Loc) js_ast.E {
+	if p.isMangledProp(name) {
+		return &js_ast.EIndex{
+			Target: target,
+			Index:  js_ast.Expr{Loc: nameLoc, Data: &js_ast.ENameOfSymbol{Ref: p.symbolForMangledProp(name)}},
+		}
+	}
+
+	return &js_ast.EDot{
+		Target:  target,
+		Name:    name,
+		NameLoc: nameLoc,
+	}
+}
+
+func (p *parser) parseArrowBody(args []js_ast.Arg, data fnOrArrowDataParse) *js_ast.EArrow {
+	arrowLoc := p.lexer.Loc()
+
+	// Newlines are not allowed before "=>"
+	if p.lexer.HasNewlineBefore {
+		p.log.AddError(&p.tracker, p.lexer.Range(), "Unexpected newline before \"=>\"")
+		panic(js_lexer.LexerPanic{})
+	}
+
+	p.lexer.Expect(js_lexer.TEqualsGreaterThan)
+
+	for _, arg := range args {
+		p.declareBinding(ast.SymbolHoisted, arg.Binding, parseStmtOpts{})
+	}
+
+	// The ability to use "this" and "super" is inherited by arrow functions
+	data.isThisDisallowed = p.fnOrArrowDataParse.isThisDisallowed
+	data.allowSuperCall = p.fnOrArrowDataParse.allowSuperCall
+	data.allowSuperProperty = p.fnOrArrowDataParse.allowSuperProperty
+
+	if p.lexer.Token == js_lexer.TOpenBrace {
+		body := p.parseFnBody(data)
+		p.afterArrowBodyLoc = p.lexer.Loc()
+		return &js_ast.EArrow{Args: args, Body: body}
+	}
+
+	p.pushScopeForParsePass(js_ast.ScopeFunctionBody, arrowLoc)
+	defer p.popScope()
+
+	oldFnOrArrowData := p.fnOrArrowDataParse
+	p.fnOrArrowDataParse = data
+	expr := p.parseExpr(js_ast.LComma)
+	p.fnOrArrowDataParse = oldFnOrArrowData
+	return &js_ast.EArrow{
+		Args:       args,
+		PreferExpr: true,
+		Body:       js_ast.FnBody{Loc: arrowLoc, Block: js_ast.SBlock{Stmts: []js_ast.Stmt{{Loc: expr.Loc, Data: &js_ast.SReturn{ValueOrNil: expr}}}}},
+	}
+}
+
+func (p *parser) checkForArrowAfterTheCurrentToken() bool {
+	oldLexer := p.lexer
+	p.lexer.IsLogDisabled = true
+
+	// Implement backtracking by restoring the lexer's memory to its original state
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			p.lexer = oldLexer
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	p.lexer.Next()
+	isArrowAfterThisToken := p.lexer.Token == js_lexer.TEqualsGreaterThan
+
+	p.lexer = oldLexer
+	return isArrowAfterThisToken
+}
+
+// This parses an expression. This assumes we've already parsed the "async"
+// keyword and are currently looking at the following token.
+func (p *parser) parseAsyncPrefixExpr(asyncRange logger.Range, level js_ast.L, flags exprFlag) js_ast.Expr {
+	// "async function() {}"
+	if !p.lexer.HasNewlineBefore && p.lexer.Token == js_lexer.TFunction {
+		return p.parseFnExpr(asyncRange.Loc, true /* isAsync */, asyncRange)
+	}
+
+	// Check the precedence level to avoid parsing an arrow function in
+	// "new async () => {}". This also avoids parsing "new async()" as
+	// "new (async())()" instead.
+	if !p.lexer.HasNewlineBefore && level < js_ast.LMember {
+		switch p.lexer.Token {
+		// "async => {}"
+		case js_lexer.TEqualsGreaterThan:
+			if level <= js_ast.LAssign {
+				arg := js_ast.Arg{Binding: js_ast.Binding{Loc: asyncRange.Loc, Data: &js_ast.BIdentifier{
+					Ref: p.storeNameInRef(js_lexer.MaybeSubstring{String: "async"})}}}
+
+				p.pushScopeForParsePass(js_ast.ScopeFunctionArgs, asyncRange.Loc)
+				defer p.popScope()
+
+				return js_ast.Expr{Loc: asyncRange.Loc, Data: p.parseArrowBody([]js_ast.Arg{arg}, fnOrArrowDataParse{
+					needsAsyncLoc: asyncRange.Loc,
+				})}
+			}
+
+		// "async x => {}"
+		case js_lexer.TIdentifier:
+			if level <= js_ast.LAssign {
+				// See https://github.com/tc39/ecma262/issues/2034 for details
+				isArrowFn := true
+				if (flags&exprFlagForLoopInit) != 0 && p.lexer.Identifier.String == "of" {
+					// "for (async of" is only an arrow function if the next token is "=>"
+					isArrowFn = p.checkForArrowAfterTheCurrentToken()
+
+					// Do not allow "for (async of []) ;" but do allow "for await (async of []) ;"
+					if !isArrowFn && (flags&exprFlagForAwaitLoopInit) == 0 && p.lexer.Raw() == "of" {
+						r := logger.Range{Loc: asyncRange.Loc, Len: p.lexer.Range().End() - asyncRange.Loc.Start}
+						p.log.AddError(&p.tracker, r, "For loop initializers cannot start with \"async of\"")
+						panic(js_lexer.LexerPanic{})
+					}
+				}
+
+				if isArrowFn {
+					p.markAsyncFn(asyncRange, false)
+					ref := p.storeNameInRef(p.lexer.Identifier)
+					arg := js_ast.Arg{Binding: js_ast.Binding{Loc: p.lexer.Loc(), Data: &js_ast.BIdentifier{Ref: ref}}}
+					p.lexer.Next()
+
+					p.pushScopeForParsePass(js_ast.ScopeFunctionArgs, asyncRange.Loc)
+					defer p.popScope()
+
+					arrow := p.parseArrowBody([]js_ast.Arg{arg}, fnOrArrowDataParse{
+						needsAsyncLoc: arg.Binding.Loc,
+						await:         allowExpr,
+					})
+					arrow.IsAsync = true
+					return js_ast.Expr{Loc: asyncRange.Loc, Data: arrow}
+				}
+			}
+
+		// "async()"
+		// "async () => {}"
+		case js_lexer.TOpenParen:
+			p.lexer.Next()
+			return p.parseParenExpr(asyncRange.Loc, level, parenExprOpts{asyncRange: asyncRange})
+
+		// "async<T>()"
+		// "async <T>() => {}"
+		case js_lexer.TLessThan:
+			if p.options.ts.Parse && (!p.options.jsx.Parse || p.isTSArrowFnJSX()) {
+				if result := p.trySkipTypeScriptTypeParametersThenOpenParenWithBacktracking(); result != didNotSkipAnything {
+					p.lexer.Next()
+					return p.parseParenExpr(asyncRange.Loc, level, parenExprOpts{
+						asyncRange:   asyncRange,
+						forceArrowFn: result == definitelyTypeParameters,
+					})
+				}
+			}
+		}
+	}
+
+	// "async"
+	// "async + 1"
+	return js_ast.Expr{Loc: asyncRange.Loc, Data: &js_ast.EIdentifier{
+		Ref: p.storeNameInRef(js_lexer.MaybeSubstring{String: "async"})}}
+}
+
+func (p *parser) parseFnExpr(loc logger.Loc, isAsync bool, asyncRange logger.Range) js_ast.Expr {
+	p.lexer.Next()
+	isGenerator := p.lexer.Token == js_lexer.TAsterisk
+	hasError := false
+	if isAsync {
+		hasError = p.markAsyncFn(asyncRange, isGenerator)
+	}
+	if isGenerator {
+		if !hasError {
+			p.markSyntaxFeature(compat.Generator, p.lexer.Range())
+		}
+		p.lexer.Next()
+	}
+	var name *ast.LocRef
+
+	p.pushScopeForParsePass(js_ast.ScopeFunctionArgs, loc)
+	defer p.popScope()
+
+	// The name is optional
+	if p.lexer.Token == js_lexer.TIdentifier {
+		// Don't declare the name "arguments" since it's shadowed and inaccessible
+		name = &ast.LocRef{Loc: p.lexer.Loc()}
+		if text := p.lexer.Identifier.String; text != "arguments" {
+			name.Ref = p.declareSymbol(ast.SymbolHoistedFunction, name.Loc, text)
+		} else {
+			name.Ref = p.newSymbol(ast.SymbolHoistedFunction, text)
+		}
+		p.lexer.Next()
+	}
+
+	// Even anonymous functions can have TypeScript type parameters
+	if p.options.ts.Parse {
+		p.skipTypeScriptTypeParameters(allowConstModifier)
+	}
+
+	await := allowIdent
+	yield := allowIdent
+	if isAsync {
+		await = allowExpr
+	}
+	if isGenerator {
+		yield = allowExpr
+	}
+
+	fn, _ := p.parseFn(name, logger.Range{}, 0, fnOrArrowDataParse{
+		needsAsyncLoc: loc,
+		asyncRange:    asyncRange,
+		await:         await,
+		yield:         yield,
+	})
+	p.validateFunctionName(fn, fnExpr)
+	return js_ast.Expr{Loc: loc, Data: &js_ast.EFunction{Fn: fn}}
+}
+
+type parenExprOpts struct {
+	asyncRange   logger.Range
+	forceArrowFn bool
+}
+
+// This assumes that the open parenthesis has already been parsed by the caller
+func (p *parser) parseParenExpr(loc logger.Loc, level js_ast.L, opts parenExprOpts) js_ast.Expr {
+	items := []js_ast.Expr{}
+	errors := deferredErrors{}
+	arrowArgErrors := deferredArrowArgErrors{}
+	spreadRange := logger.Range{}
+	typeColonRange := logger.Range{}
+	commaAfterSpread := logger.Loc{}
+	isAsync := opts.asyncRange.Len > 0
+
+	// Push a scope assuming this is an arrow function. It may not be, in which
+	// case we'll need to roll this change back. This has to be done ahead of
+	// parsing the arguments instead of later on when we hit the "=>" token and
+	// we know it's an arrow function because the arguments may have default
+	// values that introduce new scopes and declare new symbols. If this is an
+	// arrow function, then those new scopes will need to be parented under the
+	// scope of the arrow function itself.
+	scopeIndex := p.pushScopeForParsePass(js_ast.ScopeFunctionArgs, loc)
+
+	// Allow "in" inside parentheses
+	oldAllowIn := p.allowIn
+	p.allowIn = true
+
+	// Forbid "await" and "yield", but only for arrow functions
+	oldFnOrArrowData := p.fnOrArrowDataParse
+	p.fnOrArrowDataParse.arrowArgErrors = &arrowArgErrors
+
+	// Scan over the comma-separated arguments or expressions
+	for p.lexer.Token != js_lexer.TCloseParen {
+		itemLoc := p.lexer.Loc()
+		isSpread := p.lexer.Token == js_lexer.TDotDotDot
+
+		if isSpread {
+			spreadRange = p.lexer.Range()
+			p.markSyntaxFeature(compat.RestArgument, spreadRange)
+			p.lexer.Next()
+		}
+
+		// We don't know yet whether these are arguments or expressions, so parse
+		// a superset of the expression syntax. Errors about things that are valid
+		// in one but not in the other are deferred.
+		p.latestArrowArgLoc = p.lexer.Loc()
+		item := p.parseExprOrBindings(js_ast.LComma, &errors)
+
+		if isSpread {
+			item = js_ast.Expr{Loc: itemLoc, Data: &js_ast.ESpread{Value: item}}
+		}
+
+		// Skip over types
+		if p.options.ts.Parse && p.lexer.Token == js_lexer.TColon {
+			typeColonRange = p.lexer.Range()
+			p.lexer.Next()
+			p.skipTypeScriptType(js_ast.LLowest)
+		}
+
+		// There may be a "=" after the type (but not after an "as" cast)
+		if p.options.ts.Parse && p.lexer.Token == js_lexer.TEquals && p.lexer.Loc() != p.forbidSuffixAfterAsLoc {
+			p.lexer.Next()
+			item = js_ast.Assign(item, p.parseExpr(js_ast.LComma))
+		}
+
+		items = append(items, item)
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+
+		// Spread arguments must come last. If there's a spread argument followed
+		// by a comma, throw an error if we use these expressions as bindings.
+		if isSpread {
+			commaAfterSpread = p.lexer.Loc()
+		}
+
+		// Eat the comma token
+		p.lexer.Next()
+	}
+
+	// The parenthetical construct must end with a close parenthesis
+	p.lexer.Expect(js_lexer.TCloseParen)
+
+	// Restore "in" operator status before we parse the arrow function body
+	p.allowIn = oldAllowIn
+
+	// Also restore "await" and "yield" expression errors
+	p.fnOrArrowDataParse = oldFnOrArrowData
+
+	// Are these arguments to an arrow function?
+	if p.lexer.Token == js_lexer.TEqualsGreaterThan || opts.forceArrowFn || (p.options.ts.Parse && p.lexer.Token == js_lexer.TColon) {
+		// Arrow functions are not allowed inside certain expressions
+		if level > js_ast.LAssign {
+			p.lexer.Unexpected()
+		}
+
+		var invalidLog invalidLog
+		args := []js_ast.Arg{}
+
+		if isAsync {
+			p.markAsyncFn(opts.asyncRange, false)
+		}
+
+		// First, try converting the expressions to bindings
+		for _, item := range items {
+			isSpread := false
+			if spread, ok := item.Data.(*js_ast.ESpread); ok {
+				item = spread.Value
+				isSpread = true
+			}
+			binding, initializerOrNil, log := p.convertExprToBindingAndInitializer(item, invalidLog, isSpread)
+			invalidLog = log
+			args = append(args, js_ast.Arg{Binding: binding, DefaultOrNil: initializerOrNil})
+		}
+
+		// Avoid parsing TypeScript code like "a ? (1 + 2) : (3 + 4)" as an arrow
+		// function. The ":" after the ")" may be a return type annotation, so we
+		// attempt to convert the expressions to bindings first before deciding
+		// whether this is an arrow function, and only pick an arrow function if
+		// there were no conversion errors.
+		if p.lexer.Token == js_lexer.TEqualsGreaterThan || (len(invalidLog.invalidTokens) == 0 &&
+			p.trySkipTypeScriptArrowReturnTypeWithBacktracking()) || opts.forceArrowFn {
+			if commaAfterSpread.Start != 0 {
+				p.log.AddError(&p.tracker, logger.Range{Loc: commaAfterSpread, Len: 1}, "Unexpected \",\" after rest pattern")
+			}
+			p.logArrowArgErrors(&arrowArgErrors)
+			p.logDeferredArrowArgErrors(&errors)
+
+			// Now that we've decided we're an arrow function, report binding pattern
+			// conversion errors
+			if len(invalidLog.invalidTokens) > 0 {
+				for _, token := range invalidLog.invalidTokens {
+					p.log.AddError(&p.tracker, token, "Invalid binding pattern")
+				}
+				panic(js_lexer.LexerPanic{})
+			}
+
+			// Also report syntax features used in bindings
+			for _, entry := range invalidLog.syntaxFeatures {
+				p.markSyntaxFeature(entry.feature, entry.token)
+			}
+
+			await := allowIdent
+			if isAsync {
+				await = allowExpr
+			}
+
+			arrow := p.parseArrowBody(args, fnOrArrowDataParse{
+				needsAsyncLoc: loc,
+				await:         await,
+			})
+			arrow.IsAsync = isAsync
+			arrow.HasRestArg = spreadRange.Len > 0
+			p.popScope()
+			return js_ast.Expr{Loc: loc, Data: arrow}
+		}
+	}
+
+	// If we get here, it's not an arrow function so undo the pushing of the
+	// scope we did earlier. This needs to flatten any child scopes into the
+	// parent scope as if the scope was never pushed in the first place.
+	p.popAndFlattenScope(scopeIndex)
+
+	// If this isn't an arrow function, then types aren't allowed
+	if typeColonRange.Len > 0 {
+		p.log.AddError(&p.tracker, typeColonRange, "Unexpected \":\"")
+		panic(js_lexer.LexerPanic{})
+	}
+
+	// Are these arguments for a call to a function named "async"?
+	if isAsync {
+		p.logExprErrors(&errors)
+		async := js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{
+			Ref: p.storeNameInRef(js_lexer.MaybeSubstring{String: "async"})}}
+		return js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+			Target: async,
+			Args:   items,
+		}}
+	}
+
+	// Is this a chain of expressions and comma operators?
+	if len(items) > 0 {
+		p.logExprErrors(&errors)
+		if spreadRange.Len > 0 {
+			p.log.AddError(&p.tracker, spreadRange, "Unexpected \"...\"")
+			panic(js_lexer.LexerPanic{})
+		}
+		value := js_ast.JoinAllWithComma(items)
+		p.markExprAsParenthesized(value, loc, isAsync)
+		return value
+	}
+
+	// Indicate that we expected an arrow function
+	p.lexer.Expected(js_lexer.TEqualsGreaterThan)
+	return js_ast.Expr{}
+}
+
+type invalidLog struct {
+	invalidTokens  []logger.Range
+	syntaxFeatures []syntaxFeature
+}
+
+type syntaxFeature struct {
+	feature compat.JSFeature
+	token   logger.Range
+}
+
+func (p *parser) convertExprToBindingAndInitializer(
+	expr js_ast.Expr, invalidLog invalidLog, isSpread bool,
+) (js_ast.Binding, js_ast.Expr, invalidLog) {
+	var initializerOrNil js_ast.Expr
+	if assign, ok := expr.Data.(*js_ast.EBinary); ok && assign.Op == js_ast.BinOpAssign {
+		initializerOrNil = assign.Right
+		expr = assign.Left
+	}
+	binding, invalidLog := p.convertExprToBinding(expr, invalidLog)
+	if initializerOrNil.Data != nil {
+		equalsRange := p.source.RangeOfOperatorBefore(initializerOrNil.Loc, "=")
+		if isSpread {
+			p.log.AddError(&p.tracker, equalsRange, "A rest argument cannot have a default initializer")
+		} else {
+			invalidLog.syntaxFeatures = append(invalidLog.syntaxFeatures, syntaxFeature{
+				feature: compat.DefaultArgument,
+				token:   equalsRange,
+			})
+		}
+	}
+	return binding, initializerOrNil, invalidLog
+}
+
+// Note: do not write to "p.log" in this function. Any errors due to conversion
+// from expression to binding should be written to "invalidLog" instead. That
+// way we can potentially keep this as an expression if it turns out it's not
+// needed as a binding after all.
+func (p *parser) convertExprToBinding(expr js_ast.Expr, invalidLog invalidLog) (js_ast.Binding, invalidLog) {
+	switch e := expr.Data.(type) {
+	case *js_ast.EMissing:
+		return js_ast.Binding{Loc: expr.Loc, Data: js_ast.BMissingShared}, invalidLog
+
+	case *js_ast.EIdentifier:
+		return js_ast.Binding{Loc: expr.Loc, Data: &js_ast.BIdentifier{Ref: e.Ref}}, invalidLog
+
+	case *js_ast.EArray:
+		if e.CommaAfterSpread.Start != 0 {
+			invalidLog.invalidTokens = append(invalidLog.invalidTokens, logger.Range{Loc: e.CommaAfterSpread, Len: 1})
+		}
+		invalidLog.syntaxFeatures = append(invalidLog.syntaxFeatures,
+			syntaxFeature{feature: compat.Destructuring, token: p.source.RangeOfOperatorAfter(expr.Loc, "[")})
+		items := []js_ast.ArrayBinding{}
+		isSpread := false
+		for _, item := range e.Items {
+			if i, ok := item.Data.(*js_ast.ESpread); ok {
+				isSpread = true
+				item = i.Value
+				if _, ok := item.Data.(*js_ast.EIdentifier); !ok {
+					p.markSyntaxFeature(compat.NestedRestBinding, p.source.RangeOfOperatorAfter(item.Loc, "["))
+				}
+			}
+			binding, initializerOrNil, log := p.convertExprToBindingAndInitializer(item, invalidLog, isSpread)
+			invalidLog = log
+			items = append(items, js_ast.ArrayBinding{
+				Binding:           binding,
+				DefaultValueOrNil: initializerOrNil,
+				Loc:               item.Loc,
+			})
+		}
+		return js_ast.Binding{Loc: expr.Loc, Data: &js_ast.BArray{
+			Items:           items,
+			HasSpread:       isSpread,
+			IsSingleLine:    e.IsSingleLine,
+			CloseBracketLoc: e.CloseBracketLoc,
+		}}, invalidLog
+
+	case *js_ast.EObject:
+		if e.CommaAfterSpread.Start != 0 {
+			invalidLog.invalidTokens = append(invalidLog.invalidTokens, logger.Range{Loc: e.CommaAfterSpread, Len: 1})
+		}
+		invalidLog.syntaxFeatures = append(invalidLog.syntaxFeatures,
+			syntaxFeature{feature: compat.Destructuring, token: p.source.RangeOfOperatorAfter(expr.Loc, "{")})
+		properties := []js_ast.PropertyBinding{}
+		for _, property := range e.Properties {
+			if property.Kind.IsMethodDefinition() {
+				invalidLog.invalidTokens = append(invalidLog.invalidTokens, js_lexer.RangeOfIdentifier(p.source, property.Key.Loc))
+				continue
+			}
+			binding, initializerOrNil, log := p.convertExprToBindingAndInitializer(property.ValueOrNil, invalidLog, false)
+			invalidLog = log
+			if initializerOrNil.Data == nil {
+				initializerOrNil = property.InitializerOrNil
+			}
+			properties = append(properties, js_ast.PropertyBinding{
+				Loc:               property.Loc,
+				IsSpread:          property.Kind == js_ast.PropertySpread,
+				IsComputed:        property.Flags.Has(js_ast.PropertyIsComputed),
+				Key:               property.Key,
+				Value:             binding,
+				DefaultValueOrNil: initializerOrNil,
+			})
+		}
+		return js_ast.Binding{Loc: expr.Loc, Data: &js_ast.BObject{
+			Properties:    properties,
+			IsSingleLine:  e.IsSingleLine,
+			CloseBraceLoc: e.CloseBraceLoc,
+		}}, invalidLog
+
+	default:
+		invalidLog.invalidTokens = append(invalidLog.invalidTokens, logger.Range{Loc: expr.Loc})
+		return js_ast.Binding{}, invalidLog
+	}
+}
+
+func (p *parser) saveExprCommentsHere() logger.Loc {
+	loc := p.lexer.Loc()
+	if p.exprComments != nil && len(p.lexer.CommentsBeforeToken) > 0 {
+		comments := make([]string, len(p.lexer.CommentsBeforeToken))
+		for i, comment := range p.lexer.CommentsBeforeToken {
+			comments[i] = p.source.CommentTextWithoutIndent(comment)
+		}
+		p.exprComments[loc] = comments
+		p.lexer.CommentsBeforeToken = p.lexer.CommentsBeforeToken[0:]
+	}
+	return loc
+}
+
+type exprFlag uint8
+
+const (
+	exprFlagDecorator exprFlag = 1 << iota
+	exprFlagForLoopInit
+	exprFlagForAwaitLoopInit
+)
+
+func (p *parser) parsePrefix(level js_ast.L, errors *deferredErrors, flags exprFlag) js_ast.Expr {
+	loc := p.saveExprCommentsHere()
+
+	switch p.lexer.Token {
+	case js_lexer.TSuper:
+		superRange := p.lexer.Range()
+		p.lexer.Next()
+
+		switch p.lexer.Token {
+		case js_lexer.TOpenParen:
+			if level < js_ast.LCall && p.fnOrArrowDataParse.allowSuperCall {
+				return js_ast.Expr{Loc: loc, Data: js_ast.ESuperShared}
+			}
+
+		case js_lexer.TDot, js_lexer.TOpenBracket:
+			if p.fnOrArrowDataParse.allowSuperProperty {
+				return js_ast.Expr{Loc: loc, Data: js_ast.ESuperShared}
+			}
+		}
+
+		p.log.AddError(&p.tracker, superRange, "Unexpected \"super\"")
+		return js_ast.Expr{Loc: loc, Data: js_ast.ESuperShared}
+
+	case js_lexer.TOpenParen:
+		if errors != nil {
+			errors.invalidParens = append(errors.invalidParens, p.lexer.Range())
+		}
+
+		p.lexer.Next()
+
+		// Arrow functions aren't allowed in the middle of expressions
+		if level > js_ast.LAssign {
+			// Allow "in" inside parentheses
+			oldAllowIn := p.allowIn
+			p.allowIn = true
+
+			value := p.parseExpr(js_ast.LLowest)
+			p.markExprAsParenthesized(value, loc, false)
+			p.lexer.Expect(js_lexer.TCloseParen)
+
+			p.allowIn = oldAllowIn
+			return value
+		}
+
+		value := p.parseParenExpr(loc, level, parenExprOpts{})
+		return value
+
+	case js_lexer.TFalse:
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: false}}
+
+	case js_lexer.TTrue:
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: true}}
+
+	case js_lexer.TNull:
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: js_ast.ENullShared}
+
+	case js_lexer.TThis:
+		if p.fnOrArrowDataParse.isThisDisallowed {
+			p.log.AddError(&p.tracker, p.lexer.Range(), "Cannot use \"this\" here:")
+		}
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+
+	case js_lexer.TPrivateIdentifier:
+		if !p.allowPrivateIdentifiers || !p.allowIn || level >= js_ast.LCompare {
+			p.lexer.Unexpected()
+		}
+
+		name := p.lexer.Identifier
+		p.lexer.Next()
+
+		// Check for "#foo in bar"
+		if p.lexer.Token != js_lexer.TIn {
+			p.lexer.Expected(js_lexer.TIn)
+		}
+
+		// Make sure to lower all matching private names
+		if p.options.unsupportedJSFeatures.Has(compat.ClassPrivateBrandCheck) {
+			if p.lowerAllOfThesePrivateNames == nil {
+				p.lowerAllOfThesePrivateNames = make(map[string]bool)
+			}
+			p.lowerAllOfThesePrivateNames[name.String] = true
+		}
+
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EPrivateIdentifier{Ref: p.storeNameInRef(name)}}
+
+	case js_lexer.TIdentifier:
+		name := p.lexer.Identifier
+		nameRange := p.lexer.Range()
+		raw := p.lexer.Raw()
+		p.lexer.Next()
+
+		// Handle async and await expressions
+		switch name.String {
+		case "async":
+			if raw == "async" {
+				return p.parseAsyncPrefixExpr(nameRange, level, flags)
+			}
+
+		case "await":
+			switch p.fnOrArrowDataParse.await {
+			case forbidAll:
+				p.log.AddError(&p.tracker, nameRange, "The keyword \"await\" cannot be used here:")
+
+			case allowExpr:
+				if raw != "await" {
+					p.log.AddError(&p.tracker, nameRange, "The keyword \"await\" cannot be escaped")
+				} else {
+					if p.fnOrArrowDataParse.isTopLevel {
+						p.topLevelAwaitKeyword = nameRange
+					}
+					if p.fnOrArrowDataParse.arrowArgErrors != nil {
+						p.fnOrArrowDataParse.arrowArgErrors.invalidExprAwait = nameRange
+					}
+					value := p.parseExpr(js_ast.LPrefix)
+					if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+						p.lexer.Unexpected()
+					}
+					return js_ast.Expr{Loc: loc, Data: &js_ast.EAwait{Value: value}}
+				}
+
+			case allowIdent:
+				p.lexer.PrevTokenWasAwaitKeyword = true
+				p.lexer.AwaitKeywordLoc = loc
+				p.lexer.FnOrArrowStartLoc = p.fnOrArrowDataParse.needsAsyncLoc
+			}
+
+		case "yield":
+			switch p.fnOrArrowDataParse.yield {
+			case forbidAll:
+				p.log.AddError(&p.tracker, nameRange, "The keyword \"yield\" cannot be used here:")
+
+			case allowExpr:
+				if raw != "yield" {
+					p.log.AddError(&p.tracker, nameRange, "The keyword \"yield\" cannot be escaped")
+				} else {
+					if level > js_ast.LAssign {
+						p.log.AddError(&p.tracker, nameRange, "Cannot use a \"yield\" expression here without parentheses:")
+					}
+					if p.fnOrArrowDataParse.arrowArgErrors != nil {
+						p.fnOrArrowDataParse.arrowArgErrors.invalidExprYield = nameRange
+					}
+					return p.parseYieldExpr(loc)
+				}
+
+			case allowIdent:
+				if !p.lexer.HasNewlineBefore {
+					// Try to gracefully recover if "yield" is used in the wrong place
+					switch p.lexer.Token {
+					case js_lexer.TNull, js_lexer.TIdentifier, js_lexer.TFalse, js_lexer.TTrue,
+						js_lexer.TNumericLiteral, js_lexer.TBigIntegerLiteral, js_lexer.TStringLiteral:
+						p.log.AddError(&p.tracker, nameRange, "Cannot use \"yield\" outside a generator function")
+						return p.parseYieldExpr(loc)
+					}
+				}
+			}
+		}
+
+		// Handle the start of an arrow expression
+		if p.lexer.Token == js_lexer.TEqualsGreaterThan && level <= js_ast.LAssign {
+			ref := p.storeNameInRef(name)
+			arg := js_ast.Arg{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: ref}}}
+
+			p.pushScopeForParsePass(js_ast.ScopeFunctionArgs, loc)
+			defer p.popScope()
+
+			return js_ast.Expr{Loc: loc, Data: p.parseArrowBody([]js_ast.Arg{arg}, fnOrArrowDataParse{
+				needsAsyncLoc: loc,
+			})}
+		}
+
+		ref := p.storeNameInRef(name)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+
+	case js_lexer.TStringLiteral, js_lexer.TNoSubstitutionTemplateLiteral:
+		return p.parseStringLiteral()
+
+	case js_lexer.TTemplateHead:
+		var legacyOctalLoc logger.Loc
+		headLoc := p.lexer.Loc()
+		head := p.lexer.StringLiteral()
+		if p.lexer.LegacyOctalLoc.Start > loc.Start {
+			legacyOctalLoc = p.lexer.LegacyOctalLoc
+		}
+		parts, tailLegacyOctalLoc := p.parseTemplateParts(false /* includeRaw */)
+		if tailLegacyOctalLoc.Start > 0 {
+			legacyOctalLoc = tailLegacyOctalLoc
+		}
+		return js_ast.Expr{Loc: loc, Data: &js_ast.ETemplate{
+			HeadLoc:        headLoc,
+			HeadCooked:     head,
+			Parts:          parts,
+			LegacyOctalLoc: legacyOctalLoc,
+		}}
+
+	case js_lexer.TNumericLiteral:
+		value := js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: p.lexer.Number}}
+		p.checkForLegacyOctalLiteral(value.Data)
+		p.lexer.Next()
+		return value
+
+	case js_lexer.TBigIntegerLiteral:
+		value := p.lexer.Identifier
+		p.markSyntaxFeature(compat.Bigint, p.lexer.Range())
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EBigInt{Value: value.String}}
+
+	case js_lexer.TSlash, js_lexer.TSlashEquals:
+		p.lexer.ScanRegExp()
+		value := p.lexer.Raw()
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.ERegExp{Value: value}}
+
+	case js_lexer.TVoid:
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LPrefix)
+		if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+			p.lexer.Unexpected()
+		}
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{Op: js_ast.UnOpVoid, Value: value}}
+
+	case js_lexer.TTypeof:
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LPrefix)
+		if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+			p.lexer.Unexpected()
+		}
+		_, valueIsIdentifier := value.Data.(*js_ast.EIdentifier)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{
+			Op:                            js_ast.UnOpTypeof,
+			Value:                         value,
+			WasOriginallyTypeofIdentifier: valueIsIdentifier,
+		}}
+
+	case js_lexer.TDelete:
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LPrefix)
+		if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+			p.lexer.Unexpected()
+		}
+		if index, ok := value.Data.(*js_ast.EIndex); ok {
+			if private, ok := index.Index.Data.(*js_ast.EPrivateIdentifier); ok {
+				name := p.loadNameFromRef(private.Ref)
+				r := logger.Range{Loc: index.Index.Loc, Len: int32(len(name))}
+				p.log.AddError(&p.tracker, r, fmt.Sprintf("Deleting the private name %q is forbidden", name))
+			}
+		}
+		_, valueIsIdentifier := value.Data.(*js_ast.EIdentifier)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{
+			Op:    js_ast.UnOpDelete,
+			Value: value,
+			WasOriginallyDeleteOfIdentifierOrPropertyAccess: valueIsIdentifier || js_ast.IsPropertyAccess(value),
+		}}
+
+	case js_lexer.TPlus:
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LPrefix)
+		if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+			p.lexer.Unexpected()
+		}
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{Op: js_ast.UnOpPos, Value: value}}
+
+	case js_lexer.TMinus:
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LPrefix)
+		if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+			p.lexer.Unexpected()
+		}
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{Op: js_ast.UnOpNeg, Value: value}}
+
+	case js_lexer.TTilde:
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LPrefix)
+		if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+			p.lexer.Unexpected()
+		}
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{Op: js_ast.UnOpCpl, Value: value}}
+
+	case js_lexer.TExclamation:
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LPrefix)
+		if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+			p.lexer.Unexpected()
+		}
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{Op: js_ast.UnOpNot, Value: value}}
+
+	case js_lexer.TMinusMinus:
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{Op: js_ast.UnOpPreDec, Value: p.parseExpr(js_ast.LPrefix)}}
+
+	case js_lexer.TPlusPlus:
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{Op: js_ast.UnOpPreInc, Value: p.parseExpr(js_ast.LPrefix)}}
+
+	case js_lexer.TFunction:
+		return p.parseFnExpr(loc, false /* isAsync */, logger.Range{})
+
+	case js_lexer.TClass:
+		return p.parseClassExpr(nil)
+
+	case js_lexer.TAt:
+		// Parse decorators before class expressions
+		decorators := p.parseDecorators(p.currentScope, logger.Range{}, decoratorBeforeClassExpr)
+		return p.parseClassExpr(decorators)
+
+	case js_lexer.TNew:
+		p.lexer.Next()
+
+		// Special-case the weird "new.target" expression here
+		if p.lexer.Token == js_lexer.TDot {
+			p.lexer.Next()
+			if p.lexer.Token != js_lexer.TIdentifier || p.lexer.Raw() != "target" {
+				p.lexer.Unexpected()
+			}
+			r := logger.Range{Loc: loc, Len: p.lexer.Range().End() - loc.Start}
+			p.markSyntaxFeature(compat.NewTarget, r)
+			p.lexer.Next()
+			return js_ast.Expr{Loc: loc, Data: &js_ast.ENewTarget{Range: r}}
+		}
+
+		target := p.parseExprWithFlags(js_ast.LMember, flags)
+		args := []js_ast.Expr{}
+		var closeParenLoc logger.Loc
+		var isMultiLine bool
+
+		if p.lexer.Token == js_lexer.TOpenParen {
+			args, closeParenLoc, isMultiLine = p.parseCallArgs()
+		}
+
+		return js_ast.Expr{Loc: loc, Data: &js_ast.ENew{
+			Target:        target,
+			Args:          args,
+			CloseParenLoc: closeParenLoc,
+			IsMultiLine:   isMultiLine,
+		}}
+
+	case js_lexer.TOpenBracket:
+		p.lexer.Next()
+		isSingleLine := !p.lexer.HasNewlineBefore
+		items := []js_ast.Expr{}
+		selfErrors := deferredErrors{}
+		commaAfterSpread := logger.Loc{}
+
+		// Allow "in" inside arrays
+		oldAllowIn := p.allowIn
+		p.allowIn = true
+
+		for p.lexer.Token != js_lexer.TCloseBracket {
+			switch p.lexer.Token {
+			case js_lexer.TComma:
+				items = append(items, js_ast.Expr{Loc: p.lexer.Loc(), Data: js_ast.EMissingShared})
+
+			case js_lexer.TDotDotDot:
+				if errors != nil {
+					errors.arraySpreadFeature = p.lexer.Range()
+				} else {
+					p.markSyntaxFeature(compat.ArraySpread, p.lexer.Range())
+				}
+				dotsLoc := p.saveExprCommentsHere()
+				p.lexer.Next()
+				item := p.parseExprOrBindings(js_ast.LComma, &selfErrors)
+				items = append(items, js_ast.Expr{Loc: dotsLoc, Data: &js_ast.ESpread{Value: item}})
+
+				// Commas are not allowed here when destructuring
+				if p.lexer.Token == js_lexer.TComma {
+					commaAfterSpread = p.lexer.Loc()
+				}
+
+			default:
+				item := p.parseExprOrBindings(js_ast.LComma, &selfErrors)
+				items = append(items, item)
+			}
+
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+			p.lexer.Next()
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+		}
+
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+		closeBracketLoc := p.saveExprCommentsHere()
+		p.lexer.Expect(js_lexer.TCloseBracket)
+		p.allowIn = oldAllowIn
+
+		if p.willNeedBindingPattern() {
+			// Is this a binding pattern?
+		} else if errors == nil {
+			// Is this an expression?
+			p.logExprErrors(&selfErrors)
+		} else {
+			// In this case, we can't distinguish between the two yet
+			selfErrors.mergeInto(errors)
+		}
+
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EArray{
+			Items:            items,
+			CommaAfterSpread: commaAfterSpread,
+			IsSingleLine:     isSingleLine,
+			CloseBracketLoc:  closeBracketLoc,
+		}}
+
+	case js_lexer.TOpenBrace:
+		p.lexer.Next()
+		isSingleLine := !p.lexer.HasNewlineBefore
+		properties := []js_ast.Property{}
+		selfErrors := deferredErrors{}
+		commaAfterSpread := logger.Loc{}
+
+		// Allow "in" inside object literals
+		oldAllowIn := p.allowIn
+		p.allowIn = true
+
+		for p.lexer.Token != js_lexer.TCloseBrace {
+			if p.lexer.Token == js_lexer.TDotDotDot {
+				dotLoc := p.saveExprCommentsHere()
+				p.lexer.Next()
+				value := p.parseExprOrBindings(js_ast.LComma, &selfErrors)
+				properties = append(properties, js_ast.Property{
+					Kind:       js_ast.PropertySpread,
+					Loc:        dotLoc,
+					ValueOrNil: value,
+				})
+
+				// Commas are not allowed here when destructuring
+				if p.lexer.Token == js_lexer.TComma {
+					commaAfterSpread = p.lexer.Loc()
+				}
+			} else {
+				// This property may turn out to be a type in TypeScript, which should be ignored
+				if property, ok := p.parseProperty(p.saveExprCommentsHere(), js_ast.PropertyField, propertyOpts{}, &selfErrors); ok {
+					properties = append(properties, property)
+				}
+			}
+
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+			p.lexer.Next()
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+		}
+
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+		closeBraceLoc := p.saveExprCommentsHere()
+		p.lexer.Expect(js_lexer.TCloseBrace)
+		p.allowIn = oldAllowIn
+
+		if p.willNeedBindingPattern() {
+			// Is this a binding pattern?
+		} else if errors == nil {
+			// Is this an expression?
+			p.logExprErrors(&selfErrors)
+		} else {
+			// In this case, we can't distinguish between the two yet
+			selfErrors.mergeInto(errors)
+		}
+
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EObject{
+			Properties:       properties,
+			CommaAfterSpread: commaAfterSpread,
+			IsSingleLine:     isSingleLine,
+			CloseBraceLoc:    closeBraceLoc,
+		}}
+
+	case js_lexer.TLessThan:
+		// This is a very complicated and highly ambiguous area of TypeScript
+		// syntax. Many similar-looking things are overloaded.
+		//
+		// TS:
+		//
+		//   A type cast:
+		//     <A>(x)
+		//     <[]>(x)
+		//     <A[]>(x)
+		//     <const>(x)
+		//
+		//   An arrow function with type parameters:
+		//     <A>(x) => {}
+		//     <A, B>(x) => {}
+		//     <A = B>(x) => {}
+		//     <A extends B>(x) => {}
+		//     <const A>(x) => {}
+		//     <const A extends B>(x) => {}
+		//
+		//   A syntax error:
+		//     <>() => {}
+		//
+		// TSX:
+		//
+		//   A JSX element:
+		//     <>() => {}</>
+		//     <A>(x) => {}</A>
+		//     <A extends/>
+		//     <A extends>(x) => {}</A>
+		//     <A extends={false}>(x) => {}</A>
+		//     <const A extends/>
+		//     <const A extends>(x) => {}</const>
+		//
+		//   An arrow function with type parameters:
+		//     <A,>(x) => {}
+		//     <A, B>(x) => {}
+		//     <A = B>(x) => {}
+		//     <A extends B>(x) => {}
+		//     <const>(x)</const>
+		//     <const A extends B>(x) => {}
+		//
+		//   A syntax error:
+		//     <[]>(x)
+		//     <A[]>(x)
+		//     <>() => {}
+		//     <A>(x) => {}
+
+		if p.options.ts.Parse && p.options.jsx.Parse && p.isTSArrowFnJSX() {
+			p.skipTypeScriptTypeParameters(allowConstModifier)
+			p.lexer.Expect(js_lexer.TOpenParen)
+			return p.parseParenExpr(loc, level, parenExprOpts{forceArrowFn: true})
+		}
+
+		// Print a friendly error message when parsing JSX as JavaScript
+		if !p.options.jsx.Parse && !p.options.ts.Parse {
+			var how string
+			switch logger.API {
+			case logger.CLIAPI:
+				how = " You can use \"--loader:.js=jsx\" to do that."
+			case logger.JSAPI:
+				how = " You can use \"loader: { '.js': 'jsx' }\" to do that."
+			case logger.GoAPI:
+				how = " You can use 'Loader: map[string]api.Loader{\".js\": api.LoaderJSX}' to do that."
+			}
+			p.log.AddErrorWithNotes(&p.tracker, p.lexer.Range(), "The JSX syntax extension is not currently enabled", []logger.MsgData{{
+				Text: "The esbuild loader for this file is currently set to \"js\" but it must be set to \"jsx\" to be able to parse JSX syntax." + how}})
+			p.options.jsx.Parse = true
+		}
+
+		if p.options.jsx.Parse {
+			// Use NextInsideJSXElement() instead of Next() so we parse "<<" as "<"
+			p.lexer.NextInsideJSXElement()
+			element := p.parseJSXElement(loc)
+
+			// The call to parseJSXElement() above doesn't consume the last
+			// TGreaterThan because the caller knows what Next() function to call.
+			// Use Next() instead of NextInsideJSXElement() here since the next
+			// token is an expression.
+			p.lexer.Next()
+			return element
+		}
+
+		if p.options.ts.Parse {
+			// This is either an old-style type cast or a generic lambda function
+
+			// TypeScript 4.5 introduced the ".mts" and ".cts" extensions that forbid
+			// the use of an expression starting with "<" that would be ambiguous
+			// when the file is in JSX mode.
+			if p.options.ts.NoAmbiguousLessThan && !p.isTSArrowFnJSX() {
+				p.log.AddError(&p.tracker, p.lexer.Range(),
+					"This syntax is not allowed in files with the \".mts\" or \".cts\" extension")
+			}
+
+			// "<T>(x)"
+			// "<T>(x) => {}"
+			if result := p.trySkipTypeScriptTypeParametersThenOpenParenWithBacktracking(); result != didNotSkipAnything {
+				p.lexer.Expect(js_lexer.TOpenParen)
+				return p.parseParenExpr(loc, level, parenExprOpts{
+					forceArrowFn: result == definitelyTypeParameters,
+				})
+			}
+
+			// "<T>x"
+			p.lexer.Next()
+			p.skipTypeScriptType(js_ast.LLowest)
+			p.lexer.ExpectGreaterThan(false /* isInsideJSXElement */)
+			value := p.parsePrefix(level, errors, flags)
+			return value
+		}
+
+		p.lexer.Unexpected()
+		return js_ast.Expr{}
+
+	case js_lexer.TImport:
+		p.lexer.Next()
+		return p.parseImportExpr(loc, level)
+
+	default:
+		p.lexer.Unexpected()
+		return js_ast.Expr{}
+	}
+}
+
+func (p *parser) parseYieldExpr(loc logger.Loc) js_ast.Expr {
+	// Parse a yield-from expression, which yields from an iterator
+	isStar := p.lexer.Token == js_lexer.TAsterisk
+	if isStar && !p.lexer.HasNewlineBefore {
+		p.lexer.Next()
+	}
+
+	var valueOrNil js_ast.Expr
+
+	// The yield expression only has a value in certain cases
+	if isStar {
+		valueOrNil = p.parseExpr(js_ast.LYield)
+	} else {
+		switch p.lexer.Token {
+		case js_lexer.TCloseBrace, js_lexer.TCloseBracket, js_lexer.TCloseParen,
+			js_lexer.TColon, js_lexer.TComma, js_lexer.TSemicolon:
+
+		default:
+			if !p.lexer.HasNewlineBefore {
+				valueOrNil = p.parseExpr(js_ast.LYield)
+			}
+		}
+	}
+
+	return js_ast.Expr{Loc: loc, Data: &js_ast.EYield{ValueOrNil: valueOrNil, IsStar: isStar}}
+}
+
+func (p *parser) willNeedBindingPattern() bool {
+	switch p.lexer.Token {
+	case js_lexer.TEquals:
+		// "[a] = b;"
+		return true
+
+	case js_lexer.TIn:
+		// "for ([a] in b) {}"
+		return !p.allowIn
+
+	case js_lexer.TIdentifier:
+		// "for ([a] of b) {}"
+		return !p.allowIn && p.lexer.IsContextualKeyword("of")
+
+	default:
+		return false
+	}
+}
+
+// Note: The caller has already parsed the "import" keyword
+func (p *parser) parseImportExpr(loc logger.Loc, level js_ast.L) js_ast.Expr {
+	// Parse an "import.meta" expression
+	if p.lexer.Token == js_lexer.TDot {
+		p.lexer.Next()
+		if !p.lexer.IsContextualKeyword("meta") {
+			p.lexer.ExpectedString("\"meta\"")
+		}
+		p.esmImportMeta = logger.Range{Loc: loc, Len: p.lexer.Range().End() - loc.Start}
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EImportMeta{RangeLen: p.esmImportMeta.Len}}
+	}
+
+	if level > js_ast.LCall {
+		r := js_lexer.RangeOfIdentifier(p.source, loc)
+		p.log.AddError(&p.tracker, r, "Cannot use an \"import\" expression here without parentheses:")
+	}
+
+	// Allow "in" inside call arguments
+	oldAllowIn := p.allowIn
+	p.allowIn = true
+
+	p.lexer.Expect(js_lexer.TOpenParen)
+
+	value := p.parseExpr(js_ast.LComma)
+	var optionsOrNil js_ast.Expr
+
+	if p.lexer.Token == js_lexer.TComma {
+		// "import('./foo.json', )"
+		p.lexer.Next()
+
+		if p.lexer.Token != js_lexer.TCloseParen {
+			// "import('./foo.json', { assert: { type: 'json' } })"
+			optionsOrNil = p.parseExpr(js_ast.LComma)
+
+			if p.lexer.Token == js_lexer.TComma {
+				// "import('./foo.json', { assert: { type: 'json' } }, )"
+				p.lexer.Next()
+			}
+		}
+	}
+
+	closeParenLoc := p.saveExprCommentsHere()
+	p.lexer.Expect(js_lexer.TCloseParen)
+
+	p.allowIn = oldAllowIn
+	return js_ast.Expr{Loc: loc, Data: &js_ast.EImportCall{
+		Expr:          value,
+		OptionsOrNil:  optionsOrNil,
+		CloseParenLoc: closeParenLoc,
+	}}
+}
+
+func (p *parser) parseExprOrBindings(level js_ast.L, errors *deferredErrors) js_ast.Expr {
+	return p.parseExprCommon(level, errors, 0)
+}
+
+func (p *parser) parseExpr(level js_ast.L) js_ast.Expr {
+	return p.parseExprCommon(level, nil, 0)
+}
+
+func (p *parser) parseExprWithFlags(level js_ast.L, flags exprFlag) js_ast.Expr {
+	return p.parseExprCommon(level, nil, flags)
+}
+
+func (p *parser) parseExprCommon(level js_ast.L, errors *deferredErrors, flags exprFlag) js_ast.Expr {
+	lexerCommentFlags := p.lexer.HasCommentBefore
+	expr := p.parsePrefix(level, errors, flags)
+
+	if (lexerCommentFlags&(js_lexer.PureCommentBefore|js_lexer.NoSideEffectsCommentBefore)) != 0 && !p.options.ignoreDCEAnnotations {
+		if (lexerCommentFlags & js_lexer.NoSideEffectsCommentBefore) != 0 {
+			switch e := expr.Data.(type) {
+			case *js_ast.EArrow:
+				e.HasNoSideEffectsComment = true
+			case *js_ast.EFunction:
+				e.Fn.HasNoSideEffectsComment = true
+			}
+		}
+
+		// There is no formal spec for "__PURE__" comments but from reverse-
+		// engineering, it looks like they apply to the next CallExpression or
+		// NewExpression. So in "/* @__PURE__ */ a().b() + c()" the comment applies
+		// to the expression "a().b()".
+		if (lexerCommentFlags&js_lexer.PureCommentBefore) != 0 && level < js_ast.LCall {
+			expr = p.parseSuffix(expr, js_ast.LCall-1, errors, flags)
+			switch e := expr.Data.(type) {
+			case *js_ast.ECall:
+				e.CanBeUnwrappedIfUnused = true
+			case *js_ast.ENew:
+				e.CanBeUnwrappedIfUnused = true
+			}
+		}
+	}
+
+	return p.parseSuffix(expr, level, errors, flags)
+}
+
+func (p *parser) parseSuffix(left js_ast.Expr, level js_ast.L, errors *deferredErrors, flags exprFlag) js_ast.Expr {
+	optionalChain := js_ast.OptionalChainNone
+
+	for {
+		if p.lexer.Loc() == p.afterArrowBodyLoc {
+			for {
+				switch p.lexer.Token {
+				case js_lexer.TComma:
+					if level >= js_ast.LComma {
+						return left
+					}
+					p.lexer.Next()
+					left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpComma, Left: left, Right: p.parseExpr(js_ast.LComma)}}
+
+				default:
+					return left
+				}
+			}
+		}
+
+		// Stop now if this token is forbidden to follow a TypeScript "as" cast
+		if p.lexer.Loc() == p.forbidSuffixAfterAsLoc {
+			return left
+		}
+
+		// Reset the optional chain flag by default. That way we won't accidentally
+		// treat "c.d" as OptionalChainContinue in "a?.b + c.d".
+		oldOptionalChain := optionalChain
+		optionalChain = js_ast.OptionalChainNone
+
+		switch p.lexer.Token {
+		case js_lexer.TDot:
+			p.lexer.Next()
+
+			if p.lexer.Token == js_lexer.TPrivateIdentifier && p.allowPrivateIdentifiers {
+				// "a.#b"
+				// "a?.b.#c"
+				if _, ok := left.Data.(*js_ast.ESuper); ok {
+					p.lexer.Expected(js_lexer.TIdentifier)
+				}
+				name := p.lexer.Identifier
+				nameLoc := p.lexer.Loc()
+				p.reportPrivateNameUsage(name.String)
+				p.lexer.Next()
+				ref := p.storeNameInRef(name)
+				left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EIndex{
+					Target:        left,
+					Index:         js_ast.Expr{Loc: nameLoc, Data: &js_ast.EPrivateIdentifier{Ref: ref}},
+					OptionalChain: oldOptionalChain,
+				}}
+			} else {
+				// "a.b"
+				// "a?.b.c"
+				if !p.lexer.IsIdentifierOrKeyword() {
+					p.lexer.Expect(js_lexer.TIdentifier)
+				}
+				name := p.lexer.Identifier
+				nameLoc := p.lexer.Loc()
+				p.lexer.Next()
+				left = js_ast.Expr{Loc: left.Loc, Data: p.dotOrMangledPropParse(left, name, nameLoc, oldOptionalChain, wasOriginallyDot)}
+			}
+
+			optionalChain = oldOptionalChain
+
+		case js_lexer.TQuestionDot:
+			p.lexer.Next()
+			optionalStart := js_ast.OptionalChainStart
+
+			// Remove unnecessary optional chains
+			if p.options.minifySyntax {
+				if isNullOrUndefined, _, ok := js_ast.ToNullOrUndefinedWithSideEffects(left.Data); ok && !isNullOrUndefined {
+					optionalStart = js_ast.OptionalChainNone
+				}
+			}
+
+			switch p.lexer.Token {
+			case js_lexer.TOpenBracket:
+				// "a?.[b]"
+				p.lexer.Next()
+
+				// Allow "in" inside the brackets
+				oldAllowIn := p.allowIn
+				p.allowIn = true
+
+				index := p.parseExpr(js_ast.LLowest)
+
+				p.allowIn = oldAllowIn
+
+				closeBracketLoc := p.saveExprCommentsHere()
+				p.lexer.Expect(js_lexer.TCloseBracket)
+				left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EIndex{
+					Target:          left,
+					Index:           index,
+					OptionalChain:   optionalStart,
+					CloseBracketLoc: closeBracketLoc,
+				}}
+
+			case js_lexer.TOpenParen:
+				// "a?.()"
+				if level >= js_ast.LCall {
+					return left
+				}
+				kind := js_ast.NormalCall
+				if js_ast.IsPropertyAccess(left) {
+					kind = js_ast.TargetWasOriginallyPropertyAccess
+				}
+				args, closeParenLoc, isMultiLine := p.parseCallArgs()
+				left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.ECall{
+					Target:        left,
+					Args:          args,
+					CloseParenLoc: closeParenLoc,
+					OptionalChain: optionalStart,
+					IsMultiLine:   isMultiLine,
+					Kind:          kind,
+				}}
+
+			case js_lexer.TLessThan, js_lexer.TLessThanLessThan:
+				// "a?.<T>()"
+				// "a?.<<T>() => T>()"
+				if !p.options.ts.Parse {
+					p.lexer.Expected(js_lexer.TIdentifier)
+				}
+				p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{})
+				if p.lexer.Token != js_lexer.TOpenParen {
+					p.lexer.Expected(js_lexer.TOpenParen)
+				}
+				if level >= js_ast.LCall {
+					return left
+				}
+				kind := js_ast.NormalCall
+				if js_ast.IsPropertyAccess(left) {
+					kind = js_ast.TargetWasOriginallyPropertyAccess
+				}
+				args, closeParenLoc, isMultiLine := p.parseCallArgs()
+				left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.ECall{
+					Target:        left,
+					Args:          args,
+					CloseParenLoc: closeParenLoc,
+					OptionalChain: optionalStart,
+					IsMultiLine:   isMultiLine,
+					Kind:          kind,
+				}}
+
+			default:
+				if p.lexer.Token == js_lexer.TPrivateIdentifier && p.allowPrivateIdentifiers {
+					// "a?.#b"
+					name := p.lexer.Identifier
+					nameLoc := p.lexer.Loc()
+					p.reportPrivateNameUsage(name.String)
+					p.lexer.Next()
+					ref := p.storeNameInRef(name)
+					left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EIndex{
+						Target:        left,
+						Index:         js_ast.Expr{Loc: nameLoc, Data: &js_ast.EPrivateIdentifier{Ref: ref}},
+						OptionalChain: optionalStart,
+					}}
+				} else {
+					// "a?.b"
+					if !p.lexer.IsIdentifierOrKeyword() {
+						p.lexer.Expect(js_lexer.TIdentifier)
+					}
+					name := p.lexer.Identifier
+					nameLoc := p.lexer.Loc()
+					p.lexer.Next()
+					left = js_ast.Expr{Loc: left.Loc, Data: p.dotOrMangledPropParse(left, name, nameLoc, optionalStart, wasOriginallyDot)}
+				}
+			}
+
+			// Only continue if we have started
+			if optionalStart == js_ast.OptionalChainStart {
+				optionalChain = js_ast.OptionalChainContinue
+			}
+
+		case js_lexer.TNoSubstitutionTemplateLiteral:
+			if oldOptionalChain != js_ast.OptionalChainNone {
+				p.log.AddError(&p.tracker, p.lexer.Range(), "Template literals cannot have an optional chain as a tag")
+			}
+			headLoc := p.lexer.Loc()
+			headCooked, headRaw := p.lexer.CookedAndRawTemplateContents()
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.ETemplate{
+				TagOrNil:                       left,
+				HeadLoc:                        headLoc,
+				HeadCooked:                     headCooked,
+				HeadRaw:                        headRaw,
+				TagWasOriginallyPropertyAccess: js_ast.IsPropertyAccess(left),
+			}}
+
+		case js_lexer.TTemplateHead:
+			if oldOptionalChain != js_ast.OptionalChainNone {
+				p.log.AddError(&p.tracker, p.lexer.Range(), "Template literals cannot have an optional chain as a tag")
+			}
+			headLoc := p.lexer.Loc()
+			headCooked, headRaw := p.lexer.CookedAndRawTemplateContents()
+			parts, _ := p.parseTemplateParts(true /* includeRaw */)
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.ETemplate{
+				TagOrNil:                       left,
+				HeadLoc:                        headLoc,
+				HeadCooked:                     headCooked,
+				HeadRaw:                        headRaw,
+				Parts:                          parts,
+				TagWasOriginallyPropertyAccess: js_ast.IsPropertyAccess(left),
+			}}
+
+		case js_lexer.TOpenBracket:
+			// When parsing a decorator, ignore EIndex expressions since they may be
+			// part of a computed property:
+			//
+			//   class Foo {
+			//     @foo ['computed']() {}
+			//   }
+			//
+			// This matches the behavior of the TypeScript compiler.
+			if (flags & exprFlagDecorator) != 0 {
+				return left
+			}
+
+			p.lexer.Next()
+
+			// Allow "in" inside the brackets
+			oldAllowIn := p.allowIn
+			p.allowIn = true
+
+			index := p.parseExpr(js_ast.LLowest)
+
+			p.allowIn = oldAllowIn
+
+			closeBracketLoc := p.saveExprCommentsHere()
+			p.lexer.Expect(js_lexer.TCloseBracket)
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EIndex{
+				Target:          left,
+				Index:           index,
+				OptionalChain:   oldOptionalChain,
+				CloseBracketLoc: closeBracketLoc,
+			}}
+			optionalChain = oldOptionalChain
+
+		case js_lexer.TOpenParen:
+			if level >= js_ast.LCall {
+				return left
+			}
+			kind := js_ast.NormalCall
+			if js_ast.IsPropertyAccess(left) {
+				kind = js_ast.TargetWasOriginallyPropertyAccess
+			}
+			args, closeParenLoc, isMultiLine := p.parseCallArgs()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.ECall{
+				Target:        left,
+				Args:          args,
+				CloseParenLoc: closeParenLoc,
+				OptionalChain: oldOptionalChain,
+				IsMultiLine:   isMultiLine,
+				Kind:          kind,
+			}}
+			optionalChain = oldOptionalChain
+
+		case js_lexer.TQuestion:
+			if level >= js_ast.LConditional {
+				return left
+			}
+			p.lexer.Next()
+
+			// Stop now if we're parsing one of these:
+			// "(a?) => {}"
+			// "(a?: b) => {}"
+			// "(a?, b?) => {}"
+			if p.options.ts.Parse && left.Loc == p.latestArrowArgLoc && (p.lexer.Token == js_lexer.TColon ||
+				p.lexer.Token == js_lexer.TCloseParen || p.lexer.Token == js_lexer.TComma) {
+				if errors == nil {
+					p.lexer.Unexpected()
+				}
+				errors.invalidExprAfterQuestion = p.lexer.Range()
+				return left
+			}
+
+			// Allow "in" in between "?" and ":"
+			oldAllowIn := p.allowIn
+			p.allowIn = true
+
+			yes := p.parseExpr(js_ast.LComma)
+
+			p.allowIn = oldAllowIn
+
+			p.lexer.Expect(js_lexer.TColon)
+			no := p.parseExpr(js_ast.LComma)
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EIf{Test: left, Yes: yes, No: no}}
+
+		case js_lexer.TExclamation:
+			// Skip over TypeScript non-null assertions
+			if p.lexer.HasNewlineBefore {
+				return left
+			}
+			if !p.options.ts.Parse {
+				p.lexer.Unexpected()
+			}
+			p.lexer.Next()
+			optionalChain = oldOptionalChain
+
+		case js_lexer.TMinusMinus:
+			if p.lexer.HasNewlineBefore || level >= js_ast.LPostfix {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EUnary{Op: js_ast.UnOpPostDec, Value: left}}
+
+		case js_lexer.TPlusPlus:
+			if p.lexer.HasNewlineBefore || level >= js_ast.LPostfix {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EUnary{Op: js_ast.UnOpPostInc, Value: left}}
+
+		case js_lexer.TComma:
+			if level >= js_ast.LComma {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpComma, Left: left, Right: p.parseExpr(js_ast.LComma)}}
+
+		case js_lexer.TPlus:
+			if level >= js_ast.LAdd {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpAdd, Left: left, Right: p.parseExpr(js_ast.LAdd)}}
+
+		case js_lexer.TPlusEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpAddAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TMinus:
+			if level >= js_ast.LAdd {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpSub, Left: left, Right: p.parseExpr(js_ast.LAdd)}}
+
+		case js_lexer.TMinusEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpSubAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TAsterisk:
+			if level >= js_ast.LMultiply {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpMul, Left: left, Right: p.parseExpr(js_ast.LMultiply)}}
+
+		case js_lexer.TAsteriskAsterisk:
+			if level >= js_ast.LExponentiation {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpPow, Left: left, Right: p.parseExpr(js_ast.LExponentiation - 1)}}
+
+		case js_lexer.TAsteriskAsteriskEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpPowAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TAsteriskEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpMulAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TPercent:
+			if level >= js_ast.LMultiply {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpRem, Left: left, Right: p.parseExpr(js_ast.LMultiply)}}
+
+		case js_lexer.TPercentEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpRemAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TSlash:
+			if level >= js_ast.LMultiply {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpDiv, Left: left, Right: p.parseExpr(js_ast.LMultiply)}}
+
+		case js_lexer.TSlashEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpDivAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TEqualsEquals:
+			if level >= js_ast.LEquals {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpLooseEq, Left: left, Right: p.parseExpr(js_ast.LEquals)}}
+
+		case js_lexer.TExclamationEquals:
+			if level >= js_ast.LEquals {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpLooseNe, Left: left, Right: p.parseExpr(js_ast.LEquals)}}
+
+		case js_lexer.TEqualsEqualsEquals:
+			if level >= js_ast.LEquals {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpStrictEq, Left: left, Right: p.parseExpr(js_ast.LEquals)}}
+
+		case js_lexer.TExclamationEqualsEquals:
+			if level >= js_ast.LEquals {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpStrictNe, Left: left, Right: p.parseExpr(js_ast.LEquals)}}
+
+		case js_lexer.TLessThan:
+			// TypeScript allows type arguments to be specified with angle brackets
+			// inside an expression. Unlike in other languages, this unfortunately
+			// appears to require backtracking to parse.
+			if p.options.ts.Parse && p.trySkipTypeArgumentsInExpressionWithBacktracking() {
+				optionalChain = oldOptionalChain
+				continue
+			}
+
+			if level >= js_ast.LCompare {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpLt, Left: left, Right: p.parseExpr(js_ast.LCompare)}}
+
+		case js_lexer.TLessThanEquals:
+			if level >= js_ast.LCompare {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpLe, Left: left, Right: p.parseExpr(js_ast.LCompare)}}
+
+		case js_lexer.TGreaterThan:
+			if level >= js_ast.LCompare {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpGt, Left: left, Right: p.parseExpr(js_ast.LCompare)}}
+
+		case js_lexer.TGreaterThanEquals:
+			if level >= js_ast.LCompare {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpGe, Left: left, Right: p.parseExpr(js_ast.LCompare)}}
+
+		case js_lexer.TLessThanLessThan:
+			// TypeScript allows type arguments to be specified with angle brackets
+			// inside an expression. Unlike in other languages, this unfortunately
+			// appears to require backtracking to parse.
+			if p.options.ts.Parse && p.trySkipTypeArgumentsInExpressionWithBacktracking() {
+				optionalChain = oldOptionalChain
+				continue
+			}
+
+			if level >= js_ast.LShift {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpShl, Left: left, Right: p.parseExpr(js_ast.LShift)}}
+
+		case js_lexer.TLessThanLessThanEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpShlAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TGreaterThanGreaterThan:
+			if level >= js_ast.LShift {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpShr, Left: left, Right: p.parseExpr(js_ast.LShift)}}
+
+		case js_lexer.TGreaterThanGreaterThanEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpShrAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TGreaterThanGreaterThanGreaterThan:
+			if level >= js_ast.LShift {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpUShr, Left: left, Right: p.parseExpr(js_ast.LShift)}}
+
+		case js_lexer.TGreaterThanGreaterThanGreaterThanEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpUShrAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TQuestionQuestion:
+			if level >= js_ast.LNullishCoalescing {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpNullishCoalescing, Left: left, Right: p.parseExpr(js_ast.LNullishCoalescing)}}
+
+		case js_lexer.TQuestionQuestionEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpNullishCoalescingAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TBarBar:
+			if level >= js_ast.LLogicalOr {
+				return left
+			}
+
+			// Prevent "||" inside "??" from the right
+			if level == js_ast.LNullishCoalescing {
+				p.logNullishCoalescingErrorPrecedenceError("||")
+			}
+
+			p.lexer.Next()
+			right := p.parseExpr(js_ast.LLogicalOr)
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpLogicalOr, Left: left, Right: right}}
+
+			// Prevent "||" inside "??" from the left
+			if level < js_ast.LNullishCoalescing {
+				left = p.parseSuffix(left, js_ast.LNullishCoalescing+1, nil, flags)
+				if p.lexer.Token == js_lexer.TQuestionQuestion {
+					p.logNullishCoalescingErrorPrecedenceError("||")
+				}
+			}
+
+		case js_lexer.TBarBarEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpLogicalOrAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TAmpersandAmpersand:
+			if level >= js_ast.LLogicalAnd {
+				return left
+			}
+
+			// Prevent "&&" inside "??" from the right
+			if level == js_ast.LNullishCoalescing {
+				p.logNullishCoalescingErrorPrecedenceError("&&")
+			}
+
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpLogicalAnd, Left: left, Right: p.parseExpr(js_ast.LLogicalAnd)}}
+
+			// Prevent "&&" inside "??" from the left
+			if level < js_ast.LNullishCoalescing {
+				left = p.parseSuffix(left, js_ast.LNullishCoalescing+1, nil, flags)
+				if p.lexer.Token == js_lexer.TQuestionQuestion {
+					p.logNullishCoalescingErrorPrecedenceError("&&")
+				}
+			}
+
+		case js_lexer.TAmpersandAmpersandEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpLogicalAndAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TBar:
+			if level >= js_ast.LBitwiseOr {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpBitwiseOr, Left: left, Right: p.parseExpr(js_ast.LBitwiseOr)}}
+
+		case js_lexer.TBarEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpBitwiseOrAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TAmpersand:
+			if level >= js_ast.LBitwiseAnd {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpBitwiseAnd, Left: left, Right: p.parseExpr(js_ast.LBitwiseAnd)}}
+
+		case js_lexer.TAmpersandEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpBitwiseAndAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TCaret:
+			if level >= js_ast.LBitwiseXor {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpBitwiseXor, Left: left, Right: p.parseExpr(js_ast.LBitwiseXor)}}
+
+		case js_lexer.TCaretEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpBitwiseXorAssign, Left: left, Right: p.parseExpr(js_ast.LAssign - 1)}}
+
+		case js_lexer.TEquals:
+			if level >= js_ast.LAssign {
+				return left
+			}
+			p.lexer.Next()
+			left = js_ast.Assign(left, p.parseExpr(js_ast.LAssign-1))
+
+		case js_lexer.TIn:
+			if level >= js_ast.LCompare || !p.allowIn {
+				return left
+			}
+
+			// Warn about "!a in b" instead of "!(a in b)"
+			kind := logger.Warning
+			if p.suppressWarningsAboutWeirdCode {
+				kind = logger.Debug
+			}
+			if e, ok := left.Data.(*js_ast.EUnary); ok && e.Op == js_ast.UnOpNot {
+				r := logger.Range{Loc: left.Loc, Len: p.source.LocBeforeWhitespace(p.lexer.Loc()).Start - left.Loc.Start}
+				data := p.tracker.MsgData(r, "Suspicious use of the \"!\" operator inside the \"in\" operator")
+				data.Location.Suggestion = fmt.Sprintf("(%s)", p.source.TextForRange(r))
+				p.log.AddMsgID(logger.MsgID_JS_SuspiciousBooleanNot, logger.Msg{
+					Kind: kind,
+					Data: data,
+					Notes: []logger.MsgData{{Text: "The code \"!x in y\" is parsed as \"(!x) in y\". " +
+						"You need to insert parentheses to get \"!(x in y)\" instead."}},
+				})
+			}
+
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpIn, Left: left, Right: p.parseExpr(js_ast.LCompare)}}
+
+		case js_lexer.TInstanceof:
+			if level >= js_ast.LCompare {
+				return left
+			}
+
+			// Warn about "!a instanceof b" instead of "!(a instanceof b)". Here's an
+			// example of code with this problem: https://github.com/mrdoob/three.js/pull/11182.
+			kind := logger.Warning
+			if p.suppressWarningsAboutWeirdCode {
+				kind = logger.Debug
+			}
+			if e, ok := left.Data.(*js_ast.EUnary); ok && e.Op == js_ast.UnOpNot {
+				r := logger.Range{Loc: left.Loc, Len: p.source.LocBeforeWhitespace(p.lexer.Loc()).Start - left.Loc.Start}
+				data := p.tracker.MsgData(r, "Suspicious use of the \"!\" operator inside the \"instanceof\" operator")
+				data.Location.Suggestion = fmt.Sprintf("(%s)", p.source.TextForRange(r))
+				p.log.AddMsgID(logger.MsgID_JS_SuspiciousBooleanNot, logger.Msg{
+					Kind: kind,
+					Data: data,
+					Notes: []logger.MsgData{{Text: "The code \"!x instanceof y\" is parsed as \"(!x) instanceof y\". " +
+						"You need to insert parentheses to get \"!(x instanceof y)\" instead."}},
+				})
+			}
+
+			p.lexer.Next()
+			left = js_ast.Expr{Loc: left.Loc, Data: &js_ast.EBinary{Op: js_ast.BinOpInstanceof, Left: left, Right: p.parseExpr(js_ast.LCompare)}}
+
+		default:
+			// Handle the TypeScript "as"/"satisfies" operator
+			if p.options.ts.Parse && level < js_ast.LCompare && !p.lexer.HasNewlineBefore && (p.lexer.IsContextualKeyword("as") || p.lexer.IsContextualKeyword("satisfies")) {
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+
+				// These tokens are not allowed to follow a cast expression. This isn't
+				// an outright error because it may be on a new line, in which case it's
+				// the start of a new expression when it's after a cast:
+				//
+				//   x = y as z
+				//   (something);
+				//
+				switch p.lexer.Token {
+				case js_lexer.TPlusPlus, js_lexer.TMinusMinus, js_lexer.TNoSubstitutionTemplateLiteral,
+					js_lexer.TTemplateHead, js_lexer.TOpenParen, js_lexer.TOpenBracket, js_lexer.TQuestionDot:
+					p.forbidSuffixAfterAsLoc = p.lexer.Loc()
+					return left
+				}
+				if p.lexer.Token.IsAssign() {
+					p.forbidSuffixAfterAsLoc = p.lexer.Loc()
+					return left
+				}
+				continue
+			}
+
+			return left
+		}
+	}
+}
+
+func (p *parser) parseExprOrLetOrUsingStmt(opts parseStmtOpts) (js_ast.Expr, js_ast.Stmt, []js_ast.Decl) {
+	couldBeLet := false
+	couldBeUsing := false
+	couldBeAwaitUsing := false
+	tokenRange := p.lexer.Range()
+
+	if p.lexer.Token == js_lexer.TIdentifier {
+		raw := p.lexer.Raw()
+		couldBeLet = raw == "let"
+		couldBeUsing = raw == "using"
+		couldBeAwaitUsing = raw == "await" && p.fnOrArrowDataParse.await == allowExpr
+	}
+
+	if !couldBeLet && !couldBeUsing && !couldBeAwaitUsing {
+		var flags exprFlag
+		if opts.isForLoopInit {
+			flags |= exprFlagForLoopInit
+		}
+		if opts.isForAwaitLoopInit {
+			flags |= exprFlagForAwaitLoopInit
+		}
+		return p.parseExprCommon(js_ast.LLowest, nil, flags), js_ast.Stmt{}, nil
+	}
+
+	name := p.lexer.Identifier
+	p.lexer.Next()
+
+	if couldBeLet {
+		isLet := opts.isExport
+		switch p.lexer.Token {
+		case js_lexer.TIdentifier, js_lexer.TOpenBracket, js_lexer.TOpenBrace:
+			if opts.lexicalDecl == lexicalDeclAllowAll || !p.lexer.HasNewlineBefore || p.lexer.Token == js_lexer.TOpenBracket {
+				isLet = true
+			}
+		}
+		if isLet {
+			// Handle a "let" declaration
+			if opts.lexicalDecl != lexicalDeclAllowAll {
+				p.forbidLexicalDecl(tokenRange.Loc)
+			}
+			p.markSyntaxFeature(compat.ConstAndLet, tokenRange)
+			decls := p.parseAndDeclareDecls(ast.SymbolOther, opts)
+			return js_ast.Expr{}, js_ast.Stmt{Loc: tokenRange.Loc, Data: &js_ast.SLocal{
+				Kind:     js_ast.LocalLet,
+				Decls:    decls,
+				IsExport: opts.isExport,
+			}}, decls
+		}
+	} else if couldBeUsing && p.lexer.Token == js_lexer.TIdentifier && !p.lexer.HasNewlineBefore && (!opts.isForLoopInit || p.lexer.Raw() != "of") {
+		// Handle a "using" declaration
+		if opts.lexicalDecl != lexicalDeclAllowAll {
+			p.forbidLexicalDecl(tokenRange.Loc)
+		}
+		opts.isUsingStmt = true
+		decls := p.parseAndDeclareDecls(ast.SymbolConst, opts)
+		if !opts.isForLoopInit {
+			p.requireInitializers(js_ast.LocalUsing, decls)
+		}
+		return js_ast.Expr{}, js_ast.Stmt{Loc: tokenRange.Loc, Data: &js_ast.SLocal{
+			Kind:     js_ast.LocalUsing,
+			Decls:    decls,
+			IsExport: opts.isExport,
+		}}, decls
+	} else if couldBeAwaitUsing {
+		// Handle an "await using" declaration
+		if p.fnOrArrowDataParse.isTopLevel {
+			p.topLevelAwaitKeyword = tokenRange
+		}
+		var value js_ast.Expr
+		if p.lexer.Token == js_lexer.TIdentifier && p.lexer.Raw() == "using" {
+			usingLoc := p.saveExprCommentsHere()
+			usingRange := p.lexer.Range()
+			p.lexer.Next()
+			if p.lexer.Token == js_lexer.TIdentifier && !p.lexer.HasNewlineBefore {
+				// It's an "await using" declaration if we get here
+				if opts.lexicalDecl != lexicalDeclAllowAll {
+					p.forbidLexicalDecl(usingRange.Loc)
+				}
+				opts.isUsingStmt = true
+				decls := p.parseAndDeclareDecls(ast.SymbolConst, opts)
+				if !opts.isForLoopInit {
+					p.requireInitializers(js_ast.LocalAwaitUsing, decls)
+				}
+				return js_ast.Expr{}, js_ast.Stmt{Loc: tokenRange.Loc, Data: &js_ast.SLocal{
+					Kind:     js_ast.LocalAwaitUsing,
+					Decls:    decls,
+					IsExport: opts.isExport,
+				}}, decls
+			}
+			value = js_ast.Expr{Loc: usingLoc, Data: &js_ast.EIdentifier{Ref: p.storeNameInRef(js_lexer.MaybeSubstring{String: "using"})}}
+		} else {
+			value = p.parseExpr(js_ast.LPrefix)
+		}
+		if p.lexer.Token == js_lexer.TAsteriskAsterisk {
+			p.lexer.Unexpected()
+		}
+		value = p.parseSuffix(value, js_ast.LPrefix, nil, 0)
+		expr := js_ast.Expr{Loc: tokenRange.Loc, Data: &js_ast.EAwait{Value: value}}
+		return p.parseSuffix(expr, js_ast.LLowest, nil, 0), js_ast.Stmt{}, nil
+	}
+
+	// Parse the remainder of this expression that starts with an identifier
+	expr := js_ast.Expr{Loc: tokenRange.Loc, Data: &js_ast.EIdentifier{Ref: p.storeNameInRef(name)}}
+	return p.parseSuffix(expr, js_ast.LLowest, nil, 0), js_ast.Stmt{}, nil
+}
+
+func (p *parser) parseCallArgs() (args []js_ast.Expr, closeParenLoc logger.Loc, isMultiLine bool) {
+	// Allow "in" inside call arguments
+	oldAllowIn := p.allowIn
+	p.allowIn = true
+
+	p.lexer.Expect(js_lexer.TOpenParen)
+
+	for p.lexer.Token != js_lexer.TCloseParen {
+		if p.lexer.HasNewlineBefore {
+			isMultiLine = true
+		}
+		loc := p.lexer.Loc()
+		isSpread := p.lexer.Token == js_lexer.TDotDotDot
+		if isSpread {
+			p.markSyntaxFeature(compat.RestArgument, p.lexer.Range())
+			p.lexer.Next()
+		}
+		arg := p.parseExpr(js_ast.LComma)
+		if isSpread {
+			arg = js_ast.Expr{Loc: loc, Data: &js_ast.ESpread{Value: arg}}
+		}
+		args = append(args, arg)
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+		if p.lexer.HasNewlineBefore {
+			isMultiLine = true
+		}
+		p.lexer.Next()
+	}
+
+	if p.lexer.HasNewlineBefore {
+		isMultiLine = true
+	}
+	closeParenLoc = p.saveExprCommentsHere()
+	p.lexer.Expect(js_lexer.TCloseParen)
+	p.allowIn = oldAllowIn
+	return
+}
+
+func (p *parser) parseJSXNamespacedName() (logger.Range, js_lexer.MaybeSubstring) {
+	nameRange := p.lexer.Range()
+	name := p.lexer.Identifier
+	p.lexer.ExpectInsideJSXElement(js_lexer.TIdentifier)
+
+	// Parse JSX namespaces. These are not supported by React or TypeScript
+	// but someone using JSX syntax in more obscure ways may find a use for
+	// them. A namespaced name is just always turned into a string so you
+	// can't use this feature to reference JavaScript identifiers.
+	if p.lexer.Token == js_lexer.TColon {
+		// Parse the colon
+		nameRange.Len = p.lexer.Range().End() - nameRange.Loc.Start
+		ns := name.String + ":"
+		p.lexer.NextInsideJSXElement()
+
+		// Parse the second identifier
+		if p.lexer.Token == js_lexer.TIdentifier {
+			nameRange.Len = p.lexer.Range().End() - nameRange.Loc.Start
+			ns += p.lexer.Identifier.String
+			p.lexer.NextInsideJSXElement()
+		} else {
+			p.log.AddError(&p.tracker, logger.Range{Loc: logger.Loc{Start: nameRange.End()}},
+				fmt.Sprintf("Expected identifier after %q in namespaced JSX name", ns))
+			panic(js_lexer.LexerPanic{})
+		}
+		return nameRange, js_lexer.MaybeSubstring{String: ns}
+	}
+
+	return nameRange, name
+}
+
+func tagOrFragmentHelpText(tag string) string {
+	if tag == "" {
+		return "fragment tag"
+	}
+	return fmt.Sprintf("%q tag", tag)
+}
+
+func (p *parser) parseJSXTag() (logger.Range, string, js_ast.Expr) {
+	loc := p.lexer.Loc()
+
+	// A missing tag is a fragment
+	if p.lexer.Token == js_lexer.TGreaterThan {
+		return logger.Range{Loc: loc, Len: 0}, "", js_ast.Expr{}
+	}
+
+	// The tag is an identifier
+	tagRange, tagName := p.parseJSXNamespacedName()
+
+	// Certain identifiers are strings
+	if strings.ContainsAny(tagName.String, "-:") || (p.lexer.Token != js_lexer.TDot && tagName.String[0] >= 'a' && tagName.String[0] <= 'z') {
+		return tagRange, tagName.String, js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(tagName.String)}}
+	}
+
+	// Otherwise, this is an identifier
+	tag := js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: p.storeNameInRef(tagName)}}
+
+	// Parse a member expression chain
+	chain := tagName.String
+	for p.lexer.Token == js_lexer.TDot {
+		p.lexer.NextInsideJSXElement()
+		memberRange := p.lexer.Range()
+		member := p.lexer.Identifier
+		p.lexer.ExpectInsideJSXElement(js_lexer.TIdentifier)
+
+		// Dashes are not allowed in member expression chains
+		index := strings.IndexByte(member.String, '-')
+		if index >= 0 {
+			p.log.AddError(&p.tracker, logger.Range{Loc: logger.Loc{Start: memberRange.Loc.Start + int32(index)}},
+				"Unexpected \"-\"")
+			panic(js_lexer.LexerPanic{})
+		}
+
+		chain += "." + member.String
+		tag = js_ast.Expr{Loc: loc, Data: p.dotOrMangledPropParse(tag, member, memberRange.Loc, js_ast.OptionalChainNone, wasOriginallyDot)}
+		tagRange.Len = memberRange.Loc.Start + memberRange.Len - tagRange.Loc.Start
+	}
+
+	return tagRange, chain, tag
+}
+
+func (p *parser) parseJSXElement(loc logger.Loc) js_ast.Expr {
+	// Keep track of the location of the first JSX element for error messages
+	if p.firstJSXElementLoc.Start == -1 {
+		p.firstJSXElementLoc = loc
+	}
+
+	// Parse the tag
+	startRange, startText, startTagOrNil := p.parseJSXTag()
+
+	// The tag may have TypeScript type arguments: "<Foo<T>/>"
+	if p.options.ts.Parse {
+		// Pass a flag to the type argument skipper because we need to call
+		// js_lexer.NextInsideJSXElement() after we hit the closing ">". The next
+		// token after the ">" might be an attribute name with a dash in it
+		// like this: "<Foo<T> data-disabled/>"
+		p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{isInsideJSXElement: true})
+	}
+
+	// Parse attributes
+	var previousStringWithBackslashLoc logger.Loc
+	properties := []js_ast.Property{}
+	isSingleLine := true
+	if startTagOrNil.Data != nil {
+	parseAttributes:
+		for {
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+
+			switch p.lexer.Token {
+			case js_lexer.TIdentifier:
+				// Parse the key
+				keyRange, keyName := p.parseJSXNamespacedName()
+				var key js_ast.Expr
+				if p.isMangledProp(keyName.String) && !strings.ContainsRune(keyName.String, ':') {
+					key = js_ast.Expr{Loc: keyRange.Loc, Data: &js_ast.ENameOfSymbol{Ref: p.storeNameInRef(keyName)}}
+				} else {
+					key = js_ast.Expr{Loc: keyRange.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(keyName.String)}}
+				}
+
+				// Parse the value
+				var value js_ast.Expr
+				var flags js_ast.PropertyFlags
+				if p.lexer.Token != js_lexer.TEquals {
+					// Implicitly true value
+					flags |= js_ast.PropertyWasShorthand
+					value = js_ast.Expr{Loc: logger.Loc{Start: keyRange.Loc.Start + keyRange.Len}, Data: &js_ast.EBoolean{Value: true}}
+				} else {
+					// Use NextInsideJSXElement() not Next() so we can parse a JSX-style string literal
+					p.lexer.NextInsideJSXElement()
+					if p.lexer.Token == js_lexer.TStringLiteral {
+						stringLoc := p.lexer.Loc()
+						if p.lexer.PreviousBackslashQuoteInJSX.Loc.Start > stringLoc.Start {
+							previousStringWithBackslashLoc = stringLoc
+						}
+						if p.options.jsx.Preserve {
+							value = js_ast.Expr{Loc: stringLoc, Data: &js_ast.EJSXText{Raw: p.lexer.Raw()}}
+						} else {
+							value = js_ast.Expr{Loc: stringLoc, Data: &js_ast.EString{Value: p.lexer.StringLiteral()}}
+						}
+						p.lexer.NextInsideJSXElement()
+					} else if p.lexer.Token == js_lexer.TLessThan {
+						// This may be removed in the future: https://github.com/facebook/jsx/issues/53
+						loc := p.lexer.Loc()
+						p.lexer.NextInsideJSXElement()
+						flags |= js_ast.PropertyWasShorthand
+						value = p.parseJSXElement(loc)
+
+						// The call to parseJSXElement() above doesn't consume the last
+						// TGreaterThan because the caller knows what Next() function to call.
+						// Use NextJSXElementChild() here since the next token is inside a JSX
+						// element.
+						p.lexer.NextInsideJSXElement()
+					} else {
+						// Use Expect() not ExpectInsideJSXElement() so we can parse expression tokens
+						p.lexer.Expect(js_lexer.TOpenBrace)
+						value = p.parseExpr(js_ast.LLowest)
+						p.lexer.ExpectInsideJSXElement(js_lexer.TCloseBrace)
+					}
+				}
+
+				// Add a property
+				properties = append(properties, js_ast.Property{
+					Loc:        keyRange.Loc,
+					Key:        key,
+					ValueOrNil: value,
+					Flags:      flags,
+				})
+
+			case js_lexer.TOpenBrace:
+				// Use Next() not ExpectInsideJSXElement() so we can parse "..."
+				p.lexer.Next()
+				dotLoc := p.saveExprCommentsHere()
+				p.lexer.Expect(js_lexer.TDotDotDot)
+				value := p.parseExpr(js_ast.LComma)
+				properties = append(properties, js_ast.Property{
+					Kind:       js_ast.PropertySpread,
+					Loc:        dotLoc,
+					ValueOrNil: value,
+				})
+
+				// Use NextInsideJSXElement() not Next() so we can parse ">>" as ">"
+				p.lexer.NextInsideJSXElement()
+
+			default:
+				break parseAttributes
+			}
+		}
+
+		// Check for and warn about duplicate attributes
+		if len(properties) > 1 && !p.suppressWarningsAboutWeirdCode {
+			keys := make(map[string]logger.Loc)
+			for _, property := range properties {
+				if property.Kind != js_ast.PropertySpread {
+					if str, ok := property.Key.Data.(*js_ast.EString); ok {
+						key := helpers.UTF16ToString(str.Value)
+						if prevLoc, ok := keys[key]; ok {
+							r := js_lexer.RangeOfIdentifier(p.source, property.Key.Loc)
+							p.log.AddIDWithNotes(logger.MsgID_JS_DuplicateObjectKey, logger.Warning, &p.tracker, r,
+								fmt.Sprintf("Duplicate %q attribute in JSX element", key),
+								[]logger.MsgData{p.tracker.MsgData(js_lexer.RangeOfIdentifier(p.source, prevLoc),
+									fmt.Sprintf("The original %q attribute is here:", key))})
+						}
+						keys[key] = property.Key.Loc
+					}
+				}
+			}
+		}
+	}
+
+	// People sometimes try to use the output of "JSON.stringify()" as a JSX
+	// attribute when automatically-generating JSX code. Doing so is incorrect
+	// because JSX strings work like XML instead of like JS (since JSX is XML-in-
+	// JS). Specifically, using a backslash before a quote does not cause it to
+	// be escaped:
+	//
+	//   JSX ends the "content" attribute here and sets "content" to 'some so-called \\'
+	//                                          v
+	//         <Button content="some so-called \"button text\"" />
+	//                                                      ^
+	//       There is no "=" after the JSX attribute "text", so we expect a ">"
+	//
+	// This code special-cases this error to provide a less obscure error message.
+	if p.lexer.Token == js_lexer.TSyntaxError && p.lexer.Raw() == "\\" && previousStringWithBackslashLoc.Start > 0 {
+		msg := logger.Msg{Kind: logger.Error, Data: p.tracker.MsgData(p.lexer.Range(),
+			"Unexpected backslash in JSX element")}
+
+		// Option 1: Suggest using an XML escape
+		jsEscape := p.source.TextForRange(p.lexer.PreviousBackslashQuoteInJSX)
+		xmlEscape := ""
+		if jsEscape == "\\\"" {
+			xmlEscape = "&quot;"
+		} else if jsEscape == "\\'" {
+			xmlEscape = "&apos;"
+		}
+		if xmlEscape != "" {
+			data := p.tracker.MsgData(p.lexer.PreviousBackslashQuoteInJSX,
+				"Quoted JSX attributes use XML-style escapes instead of JavaScript-style escapes:")
+			data.Location.Suggestion = xmlEscape
+			msg.Notes = append(msg.Notes, data)
+		}
+
+		// Option 2: Suggest using a JavaScript string
+		if stringRange := p.source.RangeOfString(previousStringWithBackslashLoc); stringRange.Len > 0 {
+			data := p.tracker.MsgData(stringRange,
+				"Consider using a JavaScript string inside {...} instead of a quoted JSX attribute:")
+			data.Location.Suggestion = fmt.Sprintf("{%s}", p.source.TextForRange(stringRange))
+			msg.Notes = append(msg.Notes, data)
+		}
+
+		p.log.AddMsg(msg)
+		panic(js_lexer.LexerPanic{})
+	}
+
+	// A slash here is a self-closing element
+	if p.lexer.Token == js_lexer.TSlash {
+		// Use NextInsideJSXElement() not Next() so we can parse ">>" as ">"
+		closeLoc := p.lexer.Loc()
+		p.lexer.NextInsideJSXElement()
+		if p.lexer.Token != js_lexer.TGreaterThan {
+			p.lexer.Expected(js_lexer.TGreaterThan)
+		}
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EJSXElement{
+			TagOrNil:        startTagOrNil,
+			Properties:      properties,
+			CloseLoc:        closeLoc,
+			IsTagSingleLine: isSingleLine,
+		}}
+	}
+
+	// Attempt to provide a better error message for people incorrectly trying to
+	// use arrow functions in TSX (which doesn't work because they are JSX elements)
+	if p.options.ts.Parse && len(properties) == 0 && startText != "" && p.lexer.Token == js_lexer.TGreaterThan &&
+		strings.HasPrefix(p.source.Contents[p.lexer.Loc().Start:], ">(") {
+		badArrowInTSXRange := p.lexer.BadArrowInTSXRange
+		badArrowInTSXSuggestion := p.lexer.BadArrowInTSXSuggestion
+
+		p.lexer.CouldBeBadArrowInTSX++
+		p.lexer.BadArrowInTSXRange = logger.Range{Loc: loc, Len: p.lexer.Range().End() - loc.Start}
+		p.lexer.BadArrowInTSXSuggestion = fmt.Sprintf("<%s,>", startText)
+
+		defer func() {
+			p.lexer.CouldBeBadArrowInTSX--
+			p.lexer.BadArrowInTSXRange = badArrowInTSXRange
+			p.lexer.BadArrowInTSXSuggestion = badArrowInTSXSuggestion
+		}()
+	}
+
+	// Use ExpectJSXElementChild() so we parse child strings
+	p.lexer.ExpectJSXElementChild(js_lexer.TGreaterThan)
+
+	// Parse the children of this element
+	nullableChildren := []js_ast.Expr{}
+	for {
+		switch p.lexer.Token {
+		case js_lexer.TStringLiteral:
+			if p.options.jsx.Preserve {
+				nullableChildren = append(nullableChildren, js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.EJSXText{Raw: p.lexer.Raw()}})
+			} else if str := p.lexer.StringLiteral(); len(str) > 0 {
+				nullableChildren = append(nullableChildren, js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.EString{Value: str}})
+			} else {
+				// Skip this token if it turned out to be empty after trimming
+			}
+			p.lexer.NextJSXElementChild()
+
+		case js_lexer.TOpenBrace:
+			// Use Next() instead of NextJSXElementChild() here since the next token is an expression
+			p.lexer.Next()
+
+			// The expression is optional, and may be absent
+			if p.lexer.Token == js_lexer.TCloseBrace {
+				// Save comments even for absent expressions
+				nullableChildren = append(nullableChildren, js_ast.Expr{Loc: p.saveExprCommentsHere(), Data: nil})
+			} else {
+				if p.lexer.Token == js_lexer.TDotDotDot {
+					// TypeScript preserves "..." before JSX child expressions here.
+					// Babel gives the error "Spread children are not supported in React"
+					// instead, so it should be safe to support this TypeScript-specific
+					// behavior. Note that TypeScript's behavior changed in TypeScript 4.5.
+					// Before that, the "..." was omitted instead of being preserved.
+					itemLoc := p.lexer.Loc()
+					p.markSyntaxFeature(compat.RestArgument, p.lexer.Range())
+					p.lexer.Next()
+					nullableChildren = append(nullableChildren, js_ast.Expr{Loc: itemLoc, Data: &js_ast.ESpread{Value: p.parseExpr(js_ast.LLowest)}})
+				} else {
+					nullableChildren = append(nullableChildren, p.parseExpr(js_ast.LLowest))
+				}
+			}
+
+			// Use ExpectJSXElementChild() so we parse child strings
+			p.lexer.ExpectJSXElementChild(js_lexer.TCloseBrace)
+
+		case js_lexer.TLessThan:
+			lessThanLoc := p.lexer.Loc()
+			p.lexer.NextInsideJSXElement()
+
+			if p.lexer.Token != js_lexer.TSlash {
+				// This is a child element
+				nullableChildren = append(nullableChildren, p.parseJSXElement(lessThanLoc))
+
+				// The call to parseJSXElement() above doesn't consume the last
+				// TGreaterThan because the caller knows what Next() function to call.
+				// Use NextJSXElementChild() here since the next token is an element
+				// child.
+				p.lexer.NextJSXElementChild()
+				continue
+			}
+
+			// This is the closing element
+			p.lexer.NextInsideJSXElement()
+			endRange, endText, _ := p.parseJSXTag()
+			if startText != endText {
+				startTag := tagOrFragmentHelpText(startText)
+				endTag := tagOrFragmentHelpText(endText)
+				msg := logger.Msg{
+					Kind:  logger.Error,
+					Data:  p.tracker.MsgData(endRange, fmt.Sprintf("Unexpected closing %s does not match opening %s", endTag, startTag)),
+					Notes: []logger.MsgData{p.tracker.MsgData(startRange, fmt.Sprintf("The opening %s is here:", startTag))},
+				}
+				msg.Data.Location.Suggestion = startText
+				p.log.AddMsg(msg)
+			}
+			if p.lexer.Token != js_lexer.TGreaterThan {
+				p.lexer.Expected(js_lexer.TGreaterThan)
+			}
+
+			return js_ast.Expr{Loc: loc, Data: &js_ast.EJSXElement{
+				TagOrNil:         startTagOrNil,
+				Properties:       properties,
+				NullableChildren: nullableChildren,
+				CloseLoc:         lessThanLoc,
+				IsTagSingleLine:  isSingleLine,
+			}}
+
+		case js_lexer.TEndOfFile:
+			startTag := tagOrFragmentHelpText(startText)
+			msg := logger.Msg{
+				Kind:  logger.Error,
+				Data:  p.tracker.MsgData(p.lexer.Range(), fmt.Sprintf("Unexpected end of file before a closing %s", startTag)),
+				Notes: []logger.MsgData{p.tracker.MsgData(startRange, fmt.Sprintf("The opening %s is here:", startTag))},
+			}
+			msg.Data.Location.Suggestion = fmt.Sprintf("</%s>", startText)
+			p.log.AddMsg(msg)
+			panic(js_lexer.LexerPanic{})
+
+		default:
+			p.lexer.Unexpected()
+		}
+	}
+}
+
+func (p *parser) parseTemplateParts(includeRaw bool) (parts []js_ast.TemplatePart, legacyOctalLoc logger.Loc) {
+	// Allow "in" inside template literals
+	oldAllowIn := p.allowIn
+	p.allowIn = true
+
+	for {
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LLowest)
+		tailLoc := p.lexer.Loc()
+		p.lexer.RescanCloseBraceAsTemplateToken()
+		if includeRaw {
+			tailCooked, tailRaw := p.lexer.CookedAndRawTemplateContents()
+			parts = append(parts, js_ast.TemplatePart{
+				Value:      value,
+				TailLoc:    tailLoc,
+				TailCooked: tailCooked,
+				TailRaw:    tailRaw,
+			})
+		} else {
+			parts = append(parts, js_ast.TemplatePart{
+				Value:      value,
+				TailLoc:    tailLoc,
+				TailCooked: p.lexer.StringLiteral(),
+			})
+			if p.lexer.LegacyOctalLoc.Start > tailLoc.Start {
+				legacyOctalLoc = p.lexer.LegacyOctalLoc
+			}
+		}
+		if p.lexer.Token == js_lexer.TTemplateTail {
+			p.lexer.Next()
+			break
+		}
+	}
+
+	p.allowIn = oldAllowIn
+
+	return parts, legacyOctalLoc
+}
+
+func (p *parser) parseAndDeclareDecls(kind ast.SymbolKind, opts parseStmtOpts) []js_ast.Decl {
+	decls := []js_ast.Decl{}
+
+	for {
+		// Forbid "let let" and "const let" but not "var let"
+		if (kind == ast.SymbolOther || kind == ast.SymbolConst) && p.lexer.IsContextualKeyword("let") {
+			p.log.AddError(&p.tracker, p.lexer.Range(), "Cannot use \"let\" as an identifier here:")
+		}
+
+		var valueOrNil js_ast.Expr
+		local := p.parseBinding(parseBindingOpts{isUsingStmt: opts.isUsingStmt})
+		p.declareBinding(kind, local, opts)
+
+		// Skip over types
+		if p.options.ts.Parse {
+			// "let foo!"
+			isDefiniteAssignmentAssertion := p.lexer.Token == js_lexer.TExclamation && !p.lexer.HasNewlineBefore
+			if isDefiniteAssignmentAssertion {
+				p.lexer.Next()
+			}
+
+			// "let foo: number"
+			if isDefiniteAssignmentAssertion || p.lexer.Token == js_lexer.TColon {
+				p.lexer.Expect(js_lexer.TColon)
+				p.skipTypeScriptType(js_ast.LLowest)
+			}
+		}
+
+		if p.lexer.Token == js_lexer.TEquals {
+			p.lexer.Next()
+			valueOrNil = p.parseExpr(js_ast.LComma)
+
+			// Rollup (the tool that invented the "@__NO_SIDE_EFFECTS__" comment) only
+			// applies this to the first declaration, and only when it's a "const".
+			// For more info see: https://github.com/rollup/rollup/pull/5024/files
+			if !p.options.ignoreDCEAnnotations && kind == ast.SymbolConst {
+				switch e := valueOrNil.Data.(type) {
+				case *js_ast.EArrow:
+					if opts.hasNoSideEffectsComment {
+						e.HasNoSideEffectsComment = true
+					}
+					if e.HasNoSideEffectsComment && !opts.isTypeScriptDeclare {
+						if b, ok := local.Data.(*js_ast.BIdentifier); ok {
+							p.symbols[b.Ref.InnerIndex].Flags |= ast.CallCanBeUnwrappedIfUnused
+						}
+					}
+
+				case *js_ast.EFunction:
+					if opts.hasNoSideEffectsComment {
+						e.Fn.HasNoSideEffectsComment = true
+					}
+					if e.Fn.HasNoSideEffectsComment && !opts.isTypeScriptDeclare {
+						if b, ok := local.Data.(*js_ast.BIdentifier); ok {
+							p.symbols[b.Ref.InnerIndex].Flags |= ast.CallCanBeUnwrappedIfUnused
+						}
+					}
+				}
+
+				// Only apply this to the first declaration
+				opts.hasNoSideEffectsComment = false
+			}
+		}
+
+		decls = append(decls, js_ast.Decl{Binding: local, ValueOrNil: valueOrNil})
+
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+		p.lexer.Next()
+	}
+
+	return decls
+}
+
+func (p *parser) requireInitializers(kind js_ast.LocalKind, decls []js_ast.Decl) {
+	for _, d := range decls {
+		if d.ValueOrNil.Data == nil {
+			what := "constant"
+			if kind == js_ast.LocalUsing {
+				what = "declaration"
+			}
+			if id, ok := d.Binding.Data.(*js_ast.BIdentifier); ok {
+				r := js_lexer.RangeOfIdentifier(p.source, d.Binding.Loc)
+				p.log.AddError(&p.tracker, r,
+					fmt.Sprintf("The %s %q must be initialized", what, p.symbols[id.Ref.InnerIndex].OriginalName))
+			} else {
+				p.log.AddError(&p.tracker, logger.Range{Loc: d.Binding.Loc},
+					fmt.Sprintf("This %s must be initialized", what))
+			}
+		}
+	}
+}
+
+func (p *parser) forbidInitializers(decls []js_ast.Decl, loopType string, isVar bool) {
+	if len(decls) > 1 {
+		p.log.AddError(&p.tracker, logger.Range{Loc: decls[0].Binding.Loc},
+			fmt.Sprintf("for-%s loops must have a single declaration", loopType))
+	} else if len(decls) == 1 && decls[0].ValueOrNil.Data != nil {
+		if isVar {
+			if _, ok := decls[0].Binding.Data.(*js_ast.BIdentifier); ok {
+				// This is a weird special case. Initializers are allowed in "var"
+				// statements with identifier bindings.
+				return
+			}
+		}
+		p.log.AddError(&p.tracker, logger.Range{Loc: decls[0].ValueOrNil.Loc},
+			fmt.Sprintf("for-%s loop variables cannot have an initializer", loopType))
+	}
+}
+
+func (p *parser) parseClauseAlias(kind string) js_lexer.MaybeSubstring {
+	loc := p.lexer.Loc()
+
+	// The alias may now be a string (see https://github.com/tc39/ecma262/pull/2154)
+	if p.lexer.Token == js_lexer.TStringLiteral {
+		r := p.source.RangeOfString(loc)
+		alias, problem, ok := helpers.UTF16ToStringWithValidation(p.lexer.StringLiteral())
+		if !ok {
+			p.log.AddError(&p.tracker, r,
+				fmt.Sprintf("This %s alias is invalid because it contains the unpaired Unicode surrogate U+%X", kind, problem))
+		}
+		return js_lexer.MaybeSubstring{String: alias}
+	}
+
+	// The alias may be a keyword
+	if !p.lexer.IsIdentifierOrKeyword() {
+		p.lexer.Expect(js_lexer.TIdentifier)
+	}
+
+	alias := p.lexer.Identifier
+	p.checkForUnrepresentableIdentifier(loc, alias.String)
+	return alias
+}
+
+func (p *parser) parseImportClause() ([]js_ast.ClauseItem, bool) {
+	items := []js_ast.ClauseItem{}
+	p.lexer.Expect(js_lexer.TOpenBrace)
+	isSingleLine := !p.lexer.HasNewlineBefore
+
+	for p.lexer.Token != js_lexer.TCloseBrace {
+		isIdentifier := p.lexer.Token == js_lexer.TIdentifier
+		aliasLoc := p.lexer.Loc()
+		alias := p.parseClauseAlias("import")
+		name := ast.LocRef{Loc: aliasLoc, Ref: p.storeNameInRef(alias)}
+		originalName := alias
+		p.lexer.Next()
+
+		// "import { type xx } from 'mod'"
+		// "import { type xx as yy } from 'mod'"
+		// "import { type 'xx' as yy } from 'mod'"
+		// "import { type as } from 'mod'"
+		// "import { type as as } from 'mod'"
+		// "import { type as as as } from 'mod'"
+		if p.options.ts.Parse && alias.String == "type" && p.lexer.Token != js_lexer.TComma && p.lexer.Token != js_lexer.TCloseBrace {
+			if p.lexer.IsContextualKeyword("as") {
+				p.lexer.Next()
+				if p.lexer.IsContextualKeyword("as") {
+					originalName = p.lexer.Identifier
+					name = ast.LocRef{Loc: p.lexer.Loc(), Ref: p.storeNameInRef(originalName)}
+					p.lexer.Next()
+
+					if p.lexer.Token == js_lexer.TIdentifier {
+						// "import { type as as as } from 'mod'"
+						// "import { type as as foo } from 'mod'"
+						p.lexer.Next()
+					} else {
+						// "import { type as as } from 'mod'"
+						items = append(items, js_ast.ClauseItem{
+							Alias:        alias.String,
+							AliasLoc:     aliasLoc,
+							Name:         name,
+							OriginalName: originalName.String,
+						})
+					}
+				} else if p.lexer.Token == js_lexer.TIdentifier {
+					// "import { type as xxx } from 'mod'"
+					originalName = p.lexer.Identifier
+					name = ast.LocRef{Loc: p.lexer.Loc(), Ref: p.storeNameInRef(originalName)}
+					p.lexer.Expect(js_lexer.TIdentifier)
+
+					// Reject forbidden names
+					if isEvalOrArguments(originalName.String) {
+						r := js_lexer.RangeOfIdentifier(p.source, name.Loc)
+						p.log.AddError(&p.tracker, r, fmt.Sprintf("Cannot use %q as an identifier here:", originalName.String))
+					}
+
+					items = append(items, js_ast.ClauseItem{
+						Alias:        alias.String,
+						AliasLoc:     aliasLoc,
+						Name:         name,
+						OriginalName: originalName.String,
+					})
+				}
+			} else {
+				isIdentifier := p.lexer.Token == js_lexer.TIdentifier
+
+				// "import { type xx } from 'mod'"
+				// "import { type xx as yy } from 'mod'"
+				// "import { type if as yy } from 'mod'"
+				// "import { type 'xx' as yy } from 'mod'"
+				p.parseClauseAlias("import")
+				p.lexer.Next()
+
+				if p.lexer.IsContextualKeyword("as") {
+					p.lexer.Next()
+					p.lexer.Expect(js_lexer.TIdentifier)
+				} else if !isIdentifier {
+					// An import where the name is a keyword must have an alias
+					p.lexer.ExpectedString("\"as\"")
+				}
+			}
+		} else {
+			if p.lexer.IsContextualKeyword("as") {
+				p.lexer.Next()
+				originalName = p.lexer.Identifier
+				name = ast.LocRef{Loc: p.lexer.Loc(), Ref: p.storeNameInRef(originalName)}
+				p.lexer.Expect(js_lexer.TIdentifier)
+			} else if !isIdentifier {
+				// An import where the name is a keyword must have an alias
+				p.lexer.ExpectedString("\"as\"")
+			}
+
+			// Reject forbidden names
+			if isEvalOrArguments(originalName.String) {
+				r := js_lexer.RangeOfIdentifier(p.source, name.Loc)
+				p.log.AddError(&p.tracker, r, fmt.Sprintf("Cannot use %q as an identifier here:", originalName.String))
+			}
+
+			items = append(items, js_ast.ClauseItem{
+				Alias:        alias.String,
+				AliasLoc:     aliasLoc,
+				Name:         name,
+				OriginalName: originalName.String,
+			})
+		}
+
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+		p.lexer.Next()
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+	}
+
+	if p.lexer.HasNewlineBefore {
+		isSingleLine = false
+	}
+	p.lexer.Expect(js_lexer.TCloseBrace)
+	return items, isSingleLine
+}
+
+func (p *parser) parseExportClause() ([]js_ast.ClauseItem, bool) {
+	items := []js_ast.ClauseItem{}
+	firstNonIdentifierLoc := logger.Loc{}
+	p.lexer.Expect(js_lexer.TOpenBrace)
+	isSingleLine := !p.lexer.HasNewlineBefore
+
+	for p.lexer.Token != js_lexer.TCloseBrace {
+		alias := p.parseClauseAlias("export")
+		aliasLoc := p.lexer.Loc()
+		name := ast.LocRef{Loc: aliasLoc, Ref: p.storeNameInRef(alias)}
+		originalName := alias
+
+		// The name can actually be a keyword if we're really an "export from"
+		// statement. However, we won't know until later. Allow keywords as
+		// identifiers for now and throw an error later if there's no "from".
+		//
+		//   // This is fine
+		//   export { default } from 'path'
+		//
+		//   // This is a syntax error
+		//   export { default }
+		//
+		if p.lexer.Token != js_lexer.TIdentifier && firstNonIdentifierLoc.Start == 0 {
+			firstNonIdentifierLoc = p.lexer.Loc()
+		}
+		p.lexer.Next()
+
+		if p.options.ts.Parse && alias.String == "type" && p.lexer.Token != js_lexer.TComma && p.lexer.Token != js_lexer.TCloseBrace {
+			if p.lexer.IsContextualKeyword("as") {
+				p.lexer.Next()
+				if p.lexer.IsContextualKeyword("as") {
+					alias = p.parseClauseAlias("export")
+					aliasLoc = p.lexer.Loc()
+					p.lexer.Next()
+
+					if p.lexer.Token != js_lexer.TComma && p.lexer.Token != js_lexer.TCloseBrace {
+						// "export { type as as as }"
+						// "export { type as as foo }"
+						// "export { type as as 'foo' }"
+						p.parseClauseAlias("export")
+						p.lexer.Next()
+					} else {
+						// "export { type as as }"
+						items = append(items, js_ast.ClauseItem{
+							Alias:        alias.String,
+							AliasLoc:     aliasLoc,
+							Name:         name,
+							OriginalName: originalName.String,
+						})
+					}
+				} else if p.lexer.Token != js_lexer.TComma && p.lexer.Token != js_lexer.TCloseBrace {
+					// "export { type as xxx }"
+					// "export { type as 'xxx' }"
+					alias = p.parseClauseAlias("export")
+					aliasLoc = p.lexer.Loc()
+					p.lexer.Next()
+
+					items = append(items, js_ast.ClauseItem{
+						Alias:        alias.String,
+						AliasLoc:     aliasLoc,
+						Name:         name,
+						OriginalName: originalName.String,
+					})
+				}
+			} else {
+				// The name can actually be a keyword if we're really an "export from"
+				// statement. However, we won't know until later. Allow keywords as
+				// identifiers for now and throw an error later if there's no "from".
+				//
+				//   // This is fine
+				//   export { type default } from 'path'
+				//
+				//   // This is a syntax error
+				//   export { type default }
+				//
+				if p.lexer.Token != js_lexer.TIdentifier && firstNonIdentifierLoc.Start == 0 {
+					firstNonIdentifierLoc = p.lexer.Loc()
+				}
+
+				// "export { type xx }"
+				// "export { type xx as yy }"
+				// "export { type xx as if }"
+				// "export { type default } from 'path'"
+				// "export { type default as if } from 'path'"
+				// "export { type xx as 'yy' }"
+				// "export { type 'xx' } from 'mod'"
+				p.parseClauseAlias("export")
+				p.lexer.Next()
+
+				if p.lexer.IsContextualKeyword("as") {
+					p.lexer.Next()
+					p.parseClauseAlias("export")
+					p.lexer.Next()
+				}
+			}
+		} else {
+			if p.lexer.IsContextualKeyword("as") {
+				p.lexer.Next()
+				alias = p.parseClauseAlias("export")
+				aliasLoc = p.lexer.Loc()
+				p.lexer.Next()
+			}
+
+			items = append(items, js_ast.ClauseItem{
+				Alias:        alias.String,
+				AliasLoc:     aliasLoc,
+				Name:         name,
+				OriginalName: originalName.String,
+			})
+		}
+
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+		p.lexer.Next()
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+	}
+
+	if p.lexer.HasNewlineBefore {
+		isSingleLine = false
+	}
+	p.lexer.Expect(js_lexer.TCloseBrace)
+
+	// Throw an error here if we found a keyword earlier and this isn't an
+	// "export from" statement after all
+	if firstNonIdentifierLoc.Start != 0 && !p.lexer.IsContextualKeyword("from") {
+		r := js_lexer.RangeOfIdentifier(p.source, firstNonIdentifierLoc)
+		p.log.AddError(&p.tracker, r, fmt.Sprintf("Expected identifier but found %q", p.source.TextForRange(r)))
+		panic(js_lexer.LexerPanic{})
+	}
+
+	return items, isSingleLine
+}
+
+type parseBindingOpts struct {
+	isUsingStmt bool
+}
+
+func (p *parser) parseBinding(opts parseBindingOpts) js_ast.Binding {
+	loc := p.lexer.Loc()
+
+	switch p.lexer.Token {
+	case js_lexer.TIdentifier:
+		name := p.lexer.Identifier
+
+		// Forbid invalid identifiers
+		if (p.fnOrArrowDataParse.await != allowIdent && name.String == "await") ||
+			(p.fnOrArrowDataParse.yield != allowIdent && name.String == "yield") {
+			p.log.AddError(&p.tracker, p.lexer.Range(), fmt.Sprintf("Cannot use %q as an identifier here:", name.String))
+		}
+
+		ref := p.storeNameInRef(name)
+		p.lexer.Next()
+		return js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: ref}}
+
+	case js_lexer.TOpenBracket:
+		if opts.isUsingStmt {
+			break
+		}
+		p.markSyntaxFeature(compat.Destructuring, p.lexer.Range())
+		p.lexer.Next()
+		isSingleLine := !p.lexer.HasNewlineBefore
+		items := []js_ast.ArrayBinding{}
+		hasSpread := false
+
+		// "in" expressions are allowed
+		oldAllowIn := p.allowIn
+		p.allowIn = true
+
+		for p.lexer.Token != js_lexer.TCloseBracket {
+			itemLoc := p.saveExprCommentsHere()
+
+			if p.lexer.Token == js_lexer.TComma {
+				binding := js_ast.Binding{Loc: itemLoc, Data: js_ast.BMissingShared}
+				items = append(items, js_ast.ArrayBinding{
+					Binding: binding,
+					Loc:     itemLoc,
+				})
+			} else {
+				if p.lexer.Token == js_lexer.TDotDotDot {
+					p.lexer.Next()
+					hasSpread = true
+
+					// This was a bug in the ES2015 spec that was fixed in ES2016
+					if p.lexer.Token != js_lexer.TIdentifier {
+						p.markSyntaxFeature(compat.NestedRestBinding, p.lexer.Range())
+					}
+				}
+
+				p.saveExprCommentsHere()
+				binding := p.parseBinding(parseBindingOpts{})
+
+				var defaultValueOrNil js_ast.Expr
+				if !hasSpread && p.lexer.Token == js_lexer.TEquals {
+					p.lexer.Next()
+					defaultValueOrNil = p.parseExpr(js_ast.LComma)
+				}
+
+				items = append(items, js_ast.ArrayBinding{
+					Binding:           binding,
+					DefaultValueOrNil: defaultValueOrNil,
+					Loc:               itemLoc,
+				})
+
+				// Commas after spread elements are not allowed
+				if hasSpread && p.lexer.Token == js_lexer.TComma {
+					p.log.AddError(&p.tracker, p.lexer.Range(), "Unexpected \",\" after rest pattern")
+					panic(js_lexer.LexerPanic{})
+				}
+			}
+
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+			p.lexer.Next()
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+		}
+
+		p.allowIn = oldAllowIn
+
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+		closeBracketLoc := p.saveExprCommentsHere()
+		p.lexer.Expect(js_lexer.TCloseBracket)
+		return js_ast.Binding{Loc: loc, Data: &js_ast.BArray{
+			Items:           items,
+			HasSpread:       hasSpread,
+			IsSingleLine:    isSingleLine,
+			CloseBracketLoc: closeBracketLoc,
+		}}
+
+	case js_lexer.TOpenBrace:
+		if opts.isUsingStmt {
+			break
+		}
+		p.markSyntaxFeature(compat.Destructuring, p.lexer.Range())
+		p.lexer.Next()
+		isSingleLine := !p.lexer.HasNewlineBefore
+		properties := []js_ast.PropertyBinding{}
+
+		// "in" expressions are allowed
+		oldAllowIn := p.allowIn
+		p.allowIn = true
+
+		for p.lexer.Token != js_lexer.TCloseBrace {
+			p.saveExprCommentsHere()
+			property := p.parsePropertyBinding()
+			properties = append(properties, property)
+
+			// Commas after spread elements are not allowed
+			if property.IsSpread && p.lexer.Token == js_lexer.TComma {
+				p.log.AddError(&p.tracker, p.lexer.Range(), "Unexpected \",\" after rest pattern")
+				panic(js_lexer.LexerPanic{})
+			}
+
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+			p.lexer.Next()
+			if p.lexer.HasNewlineBefore {
+				isSingleLine = false
+			}
+		}
+
+		p.allowIn = oldAllowIn
+
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+		closeBraceLoc := p.saveExprCommentsHere()
+		p.lexer.Expect(js_lexer.TCloseBrace)
+		return js_ast.Binding{Loc: loc, Data: &js_ast.BObject{
+			Properties:    properties,
+			IsSingleLine:  isSingleLine,
+			CloseBraceLoc: closeBraceLoc,
+		}}
+	}
+
+	p.lexer.Expect(js_lexer.TIdentifier)
+	return js_ast.Binding{}
+}
+
+func (p *parser) parseFn(
+	name *ast.LocRef,
+	classKeyword logger.Range,
+	decoratorContext decoratorContextFlags,
+	data fnOrArrowDataParse,
+) (fn js_ast.Fn, hadBody bool) {
+	fn.Name = name
+	fn.HasRestArg = false
+	fn.IsAsync = data.await == allowExpr
+	fn.IsGenerator = data.yield == allowExpr
+	fn.ArgumentsRef = ast.InvalidRef
+	fn.OpenParenLoc = p.lexer.Loc()
+	p.lexer.Expect(js_lexer.TOpenParen)
+
+	// Await and yield are not allowed in function arguments
+	oldFnOrArrowData := p.fnOrArrowDataParse
+	if data.await == allowExpr {
+		p.fnOrArrowDataParse.await = forbidAll
+	} else {
+		p.fnOrArrowDataParse.await = allowIdent
+	}
+	if data.yield == allowExpr {
+		p.fnOrArrowDataParse.yield = forbidAll
+	} else {
+		p.fnOrArrowDataParse.yield = allowIdent
+	}
+
+	// Don't suggest inserting "async" before anything if "await" is found
+	p.fnOrArrowDataParse.needsAsyncLoc.Start = -1
+
+	// If "super" is allowed in the body, it's allowed in the arguments
+	p.fnOrArrowDataParse.allowSuperCall = data.allowSuperCall
+	p.fnOrArrowDataParse.allowSuperProperty = data.allowSuperProperty
+
+	for p.lexer.Token != js_lexer.TCloseParen {
+		// Skip over "this" type annotations
+		if p.options.ts.Parse && p.lexer.Token == js_lexer.TThis {
+			p.lexer.Next()
+			if p.lexer.Token == js_lexer.TColon {
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+			}
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			p.lexer.Next()
+			continue
+		}
+
+		var decorators []js_ast.Decorator
+		if data.decoratorScope != nil {
+			oldAwait := p.fnOrArrowDataParse.await
+			oldNeedsAsyncLoc := p.fnOrArrowDataParse.needsAsyncLoc
+
+			// While TypeScript parameter decorators are expressions, they are not
+			// evaluated where they exist in the code. They are moved to after the
+			// class declaration and evaluated there instead. Specifically this
+			// TypeScript code:
+			//
+			//   class Foo {
+			//     foo(@bar() baz) {}
+			//   }
+			//
+			// becomes this JavaScript code:
+			//
+			//   class Foo {
+			//     foo(baz) {}
+			//   }
+			//   __decorate([
+			//     __param(0, bar())
+			//   ], Foo.prototype, "foo", null);
+			//
+			// One consequence of this is that whether "await" is allowed or not
+			// depends on whether the class declaration itself is inside an "async"
+			// function or not. The TypeScript compiler allows code that does this:
+			//
+			//   async function fn(foo) {
+			//     class Foo {
+			//       foo(@bar(await foo) baz) {}
+			//     }
+			//     return Foo
+			//   }
+			//
+			// because that becomes the following valid JavaScript:
+			//
+			//   async function fn(foo) {
+			//     class Foo {
+			//       foo(baz) {}
+			//     }
+			//     __decorate([
+			//       __param(0, bar(await foo))
+			//     ], Foo.prototype, "foo", null);
+			//     return Foo;
+			//   }
+			//
+			if oldFnOrArrowData.await == allowExpr {
+				p.fnOrArrowDataParse.await = allowExpr
+			} else {
+				p.fnOrArrowDataParse.needsAsyncLoc = oldFnOrArrowData.needsAsyncLoc
+			}
+
+			decorators = p.parseDecorators(data.decoratorScope, classKeyword, decoratorContext|decoratorInFnArgs)
+
+			p.fnOrArrowDataParse.await = oldAwait
+			p.fnOrArrowDataParse.needsAsyncLoc = oldNeedsAsyncLoc
+		}
+
+		if !fn.HasRestArg && p.lexer.Token == js_lexer.TDotDotDot {
+			p.markSyntaxFeature(compat.RestArgument, p.lexer.Range())
+			p.lexer.Next()
+			fn.HasRestArg = true
+		}
+
+		isTypeScriptCtorField := false
+		isIdentifier := p.lexer.Token == js_lexer.TIdentifier
+		text := p.lexer.Identifier.String
+		arg := p.parseBinding(parseBindingOpts{})
+
+		if p.options.ts.Parse {
+			// Skip over TypeScript accessibility modifiers, which turn this argument
+			// into a class field when used inside a class constructor. This is known
+			// as a "parameter property" in TypeScript.
+			if isIdentifier && data.isConstructor {
+				for p.lexer.Token == js_lexer.TIdentifier || p.lexer.Token == js_lexer.TOpenBrace || p.lexer.Token == js_lexer.TOpenBracket {
+					if text != "public" && text != "private" && text != "protected" && text != "readonly" && text != "override" {
+						break
+					}
+					isTypeScriptCtorField = true
+
+					// TypeScript requires an identifier binding
+					if p.lexer.Token != js_lexer.TIdentifier {
+						p.lexer.Expect(js_lexer.TIdentifier)
+					}
+					text = p.lexer.Identifier.String
+
+					// Re-parse the binding (the current binding is the TypeScript keyword)
+					arg = p.parseBinding(parseBindingOpts{})
+				}
+			}
+
+			// "function foo(a?) {}"
+			if p.lexer.Token == js_lexer.TQuestion {
+				p.lexer.Next()
+			}
+
+			// "function foo(a: any) {}"
+			if p.lexer.Token == js_lexer.TColon {
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+			}
+		}
+
+		p.declareBinding(ast.SymbolHoisted, arg, parseStmtOpts{})
+
+		var defaultValueOrNil js_ast.Expr
+		if !fn.HasRestArg && p.lexer.Token == js_lexer.TEquals {
+			p.markSyntaxFeature(compat.DefaultArgument, p.lexer.Range())
+			p.lexer.Next()
+			defaultValueOrNil = p.parseExpr(js_ast.LComma)
+		}
+
+		fn.Args = append(fn.Args, js_ast.Arg{
+			Decorators:   decorators,
+			Binding:      arg,
+			DefaultOrNil: defaultValueOrNil,
+
+			// We need to track this because it affects code generation
+			IsTypeScriptCtorField: isTypeScriptCtorField,
+		})
+
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+		if fn.HasRestArg {
+			// JavaScript does not allow a comma after a rest argument
+			if data.isTypeScriptDeclare {
+				// TypeScript does allow a comma after a rest argument in a "declare" context
+				p.lexer.Next()
+			} else {
+				p.lexer.Expect(js_lexer.TCloseParen)
+			}
+			break
+		}
+		p.lexer.Next()
+	}
+
+	// Reserve the special name "arguments" in this scope. This ensures that it
+	// shadows any variable called "arguments" in any parent scopes. But only do
+	// this if it wasn't already declared above because arguments are allowed to
+	// be called "arguments", in which case the real "arguments" is inaccessible.
+	if _, ok := p.currentScope.Members["arguments"]; !ok {
+		fn.ArgumentsRef = p.declareSymbol(ast.SymbolArguments, fn.OpenParenLoc, "arguments")
+		p.symbols[fn.ArgumentsRef.InnerIndex].Flags |= ast.MustNotBeRenamed
+	}
+
+	p.lexer.Expect(js_lexer.TCloseParen)
+	p.fnOrArrowDataParse = oldFnOrArrowData
+
+	// "function foo(): any {}"
+	if p.options.ts.Parse && p.lexer.Token == js_lexer.TColon {
+		p.lexer.Next()
+		p.skipTypeScriptReturnType()
+	}
+
+	// "function foo(): any;"
+	if data.allowMissingBodyForTypeScript && p.lexer.Token != js_lexer.TOpenBrace {
+		p.lexer.ExpectOrInsertSemicolon()
+		return
+	}
+
+	fn.Body = p.parseFnBody(data)
+	hadBody = true
+	return
+}
+
+type fnKind uint8
+
+const (
+	fnStmt fnKind = iota
+	fnExpr
+)
+
+func (p *parser) validateFunctionName(fn js_ast.Fn, kind fnKind) {
+	// Prevent the function name from being the same as a function-specific keyword
+	if fn.Name != nil {
+		if fn.IsAsync && p.symbols[fn.Name.Ref.InnerIndex].OriginalName == "await" {
+			p.log.AddError(&p.tracker, js_lexer.RangeOfIdentifier(p.source, fn.Name.Loc),
+				"An async function cannot be named \"await\"")
+		} else if fn.IsGenerator && p.symbols[fn.Name.Ref.InnerIndex].OriginalName == "yield" && kind == fnExpr {
+			p.log.AddError(&p.tracker, js_lexer.RangeOfIdentifier(p.source, fn.Name.Loc),
+				"A generator function expression cannot be named \"yield\"")
+		}
+	}
+}
+
+func (p *parser) validateDeclaredSymbolName(loc logger.Loc, name string) {
+	if js_lexer.StrictModeReservedWords[name] {
+		p.markStrictModeFeature(reservedWord, js_lexer.RangeOfIdentifier(p.source, loc), name)
+	} else if isEvalOrArguments(name) {
+		p.markStrictModeFeature(evalOrArguments, js_lexer.RangeOfIdentifier(p.source, loc), name)
+	}
+}
+
+func (p *parser) parseClassStmt(loc logger.Loc, opts parseStmtOpts) js_ast.Stmt {
+	var name *ast.LocRef
+	classKeyword := p.lexer.Range()
+	if p.lexer.Token == js_lexer.TClass {
+		p.markSyntaxFeature(compat.Class, classKeyword)
+		p.lexer.Next()
+	} else {
+		p.lexer.Expected(js_lexer.TClass)
+	}
+
+	if !opts.isNameOptional || (p.lexer.Token == js_lexer.TIdentifier && (!p.options.ts.Parse || p.lexer.Identifier.String != "implements")) {
+		nameLoc := p.lexer.Loc()
+		nameText := p.lexer.Identifier.String
+		p.lexer.Expect(js_lexer.TIdentifier)
+		if p.fnOrArrowDataParse.await != allowIdent && nameText == "await" {
+			p.log.AddError(&p.tracker, js_lexer.RangeOfIdentifier(p.source, nameLoc), "Cannot use \"await\" as an identifier here:")
+		}
+		name = &ast.LocRef{Loc: nameLoc, Ref: ast.InvalidRef}
+		if !opts.isTypeScriptDeclare {
+			name.Ref = p.declareSymbol(ast.SymbolClass, nameLoc, nameText)
+		}
+	}
+
+	// Even anonymous classes can have TypeScript type parameters
+	if p.options.ts.Parse {
+		p.skipTypeScriptTypeParameters(allowInOutVarianceAnnotations | allowConstModifier)
+	}
+
+	classOpts := parseClassOpts{
+		isTypeScriptDeclare: opts.isTypeScriptDeclare,
+	}
+	if opts.deferredDecorators != nil {
+		classOpts.decorators = opts.deferredDecorators.decorators
+	}
+	scopeIndex := p.pushScopeForParsePass(js_ast.ScopeClassName, loc)
+	class := p.parseClass(classKeyword, name, classOpts)
+
+	if opts.isTypeScriptDeclare {
+		p.popAndDiscardScope(scopeIndex)
+
+		if opts.isNamespaceScope && opts.isExport {
+			p.hasNonLocalExportDeclareInsideNamespace = true
+		}
+
+		// Remember that this was a "declare class" so we can allow decorators on it
+		return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptSharedWasDeclareClass}
+	}
+
+	p.popScope()
+	return js_ast.Stmt{Loc: loc, Data: &js_ast.SClass{Class: class, IsExport: opts.isExport}}
+}
+
+func (p *parser) parseClassExpr(decorators []js_ast.Decorator) js_ast.Expr {
+	classKeyword := p.lexer.Range()
+	p.markSyntaxFeature(compat.Class, classKeyword)
+	p.lexer.Expect(js_lexer.TClass)
+	var name *ast.LocRef
+
+	opts := parseClassOpts{
+		decorators:       decorators,
+		decoratorContext: decoratorInClassExpr,
+	}
+	p.pushScopeForParsePass(js_ast.ScopeClassName, classKeyword.Loc)
+
+	// Parse an optional class name
+	if p.lexer.Token == js_lexer.TIdentifier {
+		if nameText := p.lexer.Identifier.String; !p.options.ts.Parse || nameText != "implements" {
+			if p.fnOrArrowDataParse.await != allowIdent && nameText == "await" {
+				p.log.AddError(&p.tracker, p.lexer.Range(), "Cannot use \"await\" as an identifier here:")
+			}
+			name = &ast.LocRef{Loc: p.lexer.Loc(), Ref: p.newSymbol(ast.SymbolOther, nameText)}
+			p.lexer.Next()
+		}
+	}
+
+	// Even anonymous classes can have TypeScript type parameters
+	if p.options.ts.Parse {
+		p.skipTypeScriptTypeParameters(allowInOutVarianceAnnotations | allowConstModifier)
+	}
+
+	class := p.parseClass(classKeyword, name, opts)
+
+	p.popScope()
+	return js_ast.Expr{Loc: classKeyword.Loc, Data: &js_ast.EClass{Class: class}}
+}
+
+type parseClassOpts struct {
+	decorators          []js_ast.Decorator
+	decoratorContext    decoratorContextFlags
+	isTypeScriptDeclare bool
+}
+
+// By the time we call this, the identifier and type parameters have already
+// been parsed. We need to start parsing from the "extends" clause.
+func (p *parser) parseClass(classKeyword logger.Range, name *ast.LocRef, classOpts parseClassOpts) js_ast.Class {
+	var extendsOrNil js_ast.Expr
+
+	if p.lexer.Token == js_lexer.TExtends {
+		p.lexer.Next()
+		extendsOrNil = p.parseExpr(js_ast.LNew)
+
+		// TypeScript's type argument parser inside expressions backtracks if the
+		// first token after the end of the type parameter list is "{", so the
+		// parsed expression above will have backtracked if there are any type
+		// arguments. This means we have to re-parse for any type arguments here.
+		// This seems kind of wasteful to me but it's what the official compiler
+		// does and it probably doesn't have that high of a performance overhead
+		// because "extends" clauses aren't that frequent, so it should be ok.
+		if p.options.ts.Parse {
+			p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{})
+		}
+	}
+
+	if p.options.ts.Parse && p.lexer.IsContextualKeyword("implements") {
+		p.lexer.Next()
+		for {
+			p.skipTypeScriptType(js_ast.LLowest)
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			p.lexer.Next()
+		}
+	}
+
+	bodyLoc := p.lexer.Loc()
+	p.lexer.Expect(js_lexer.TOpenBrace)
+	properties := []js_ast.Property{}
+	hasPropertyDecorator := false
+
+	// Allow "in" and private fields inside class bodies
+	oldAllowIn := p.allowIn
+	oldAllowPrivateIdentifiers := p.allowPrivateIdentifiers
+	p.allowIn = true
+	p.allowPrivateIdentifiers = true
+
+	// A scope is needed for private identifiers
+	scopeIndex := p.pushScopeForParsePass(js_ast.ScopeClassBody, bodyLoc)
+
+	opts := propertyOpts{
+		isClass:          true,
+		decoratorScope:   p.currentScope,
+		decoratorContext: classOpts.decoratorContext,
+		classHasExtends:  extendsOrNil.Data != nil,
+		classKeyword:     classKeyword,
+	}
+	hasConstructor := false
+
+	for p.lexer.Token != js_lexer.TCloseBrace {
+		if p.lexer.Token == js_lexer.TSemicolon {
+			p.lexer.Next()
+			continue
+		}
+
+		// Parse decorators for this property
+		firstDecoratorLoc := p.lexer.Loc()
+		scopeIndex := len(p.scopesInOrder)
+		opts.decorators = p.parseDecorators(p.currentScope, classKeyword, opts.decoratorContext)
+		if len(opts.decorators) > 0 {
+			hasPropertyDecorator = true
+		}
+
+		// This property may turn out to be a type in TypeScript, which should be ignored
+		if property, ok := p.parseProperty(p.saveExprCommentsHere(), js_ast.PropertyField, opts, nil); ok {
+			properties = append(properties, property)
+
+			// Forbid decorators on class constructors
+			if key, ok := property.Key.Data.(*js_ast.EString); ok && helpers.UTF16EqualsString(key.Value, "constructor") {
+				if len(opts.decorators) > 0 {
+					p.log.AddError(&p.tracker, logger.Range{Loc: firstDecoratorLoc},
+						"Decorators are not allowed on class constructors")
+				}
+				if property.Kind.IsMethodDefinition() && !property.Flags.Has(js_ast.PropertyIsStatic) && !property.Flags.Has(js_ast.PropertyIsComputed) {
+					if hasConstructor {
+						p.log.AddError(&p.tracker, js_lexer.RangeOfIdentifier(p.source, property.Key.Loc),
+							"Classes cannot contain more than one constructor")
+					}
+					hasConstructor = true
+				}
+			}
+		} else if !classOpts.isTypeScriptDeclare && len(opts.decorators) > 0 {
+			p.log.AddError(&p.tracker, logger.Range{Loc: firstDecoratorLoc, Len: 1}, "Decorators are not valid here")
+			p.discardScopesUpTo(scopeIndex)
+		}
+	}
+
+	// Discard the private identifier scope inside a TypeScript "declare class"
+	if classOpts.isTypeScriptDeclare {
+		p.popAndDiscardScope(scopeIndex)
+	} else {
+		p.popScope()
+	}
+
+	p.allowIn = oldAllowIn
+	p.allowPrivateIdentifiers = oldAllowPrivateIdentifiers
+
+	closeBraceLoc := p.saveExprCommentsHere()
+	p.lexer.Expect(js_lexer.TCloseBrace)
+
+	// TypeScript has legacy behavior that uses assignment semantics instead of
+	// define semantics for class fields when "useDefineForClassFields" is enabled
+	// (in which case TypeScript behaves differently than JavaScript, which is
+	// arguably "wrong").
+	//
+	// This legacy behavior exists because TypeScript added class fields to
+	// TypeScript before they were added to JavaScript. They decided to go with
+	// assignment semantics for whatever reason. Later on TC39 decided to go with
+	// define semantics for class fields instead. This behaves differently if the
+	// base class has a setter with the same name.
+	//
+	// The value of "useDefineForClassFields" defaults to false when it's not
+	// specified and the target is earlier than "ES2022" since the class field
+	// language feature was added in ES2022. However, TypeScript's "target"
+	// setting currently defaults to "ES3" which unfortunately means that the
+	// "useDefineForClassFields" setting defaults to false (i.e. to "wrong").
+	//
+	// We default "useDefineForClassFields" to true (i.e. to "correct") instead.
+	// This is partially because our target defaults to "esnext", and partially
+	// because this is a legacy behavior that no one should be using anymore.
+	// Users that want the wrong behavior can either set "useDefineForClassFields"
+	// to false in "tsconfig.json" explicitly, or set TypeScript's "target" to
+	// "ES2021" or earlier in their in "tsconfig.json" file.
+	useDefineForClassFields := !p.options.ts.Parse || p.options.ts.Config.UseDefineForClassFields == config.True ||
+		(p.options.ts.Config.UseDefineForClassFields == config.Unspecified && p.options.ts.Config.Target != config.TSTargetBelowES2022)
+
+	return js_ast.Class{
+		ClassKeyword:  classKeyword,
+		Decorators:    classOpts.decorators,
+		Name:          name,
+		ExtendsOrNil:  extendsOrNil,
+		BodyLoc:       bodyLoc,
+		Properties:    properties,
+		CloseBraceLoc: closeBraceLoc,
+
+		// Always lower standard decorators if they are present and TypeScript's
+		// "useDefineForClassFields" setting is false even if the configured target
+		// environment supports decorators. This setting changes the behavior of
+		// class fields, and so we must lower decorators so they behave correctly.
+		ShouldLowerStandardDecorators: (len(classOpts.decorators) > 0 || hasPropertyDecorator) &&
+			((!p.options.ts.Parse && p.options.unsupportedJSFeatures.Has(compat.Decorators)) ||
+				(p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators != config.True &&
+					(p.options.unsupportedJSFeatures.Has(compat.Decorators) || !useDefineForClassFields))),
+
+		UseDefineForClassFields: useDefineForClassFields,
+	}
+}
+
+func (p *parser) parseLabelName() *ast.LocRef {
+	if p.lexer.Token != js_lexer.TIdentifier || p.lexer.HasNewlineBefore {
+		return nil
+	}
+
+	name := ast.LocRef{Loc: p.lexer.Loc(), Ref: p.storeNameInRef(p.lexer.Identifier)}
+	p.lexer.Next()
+	return &name
+}
+
+func (p *parser) parsePath() (logger.Range, string, *ast.ImportAssertOrWith, ast.ImportRecordFlags) {
+	var flags ast.ImportRecordFlags
+	pathRange := p.lexer.Range()
+	pathText := helpers.UTF16ToString(p.lexer.StringLiteral())
+	if p.lexer.Token == js_lexer.TNoSubstitutionTemplateLiteral {
+		p.lexer.Next()
+	} else {
+		p.lexer.Expect(js_lexer.TStringLiteral)
+	}
+
+	// See https://github.com/tc39/proposal-import-attributes for more info
+	var assertOrWith *ast.ImportAssertOrWith
+	if p.lexer.Token == js_lexer.TWith || (!p.lexer.HasNewlineBefore && p.lexer.IsContextualKeyword("assert")) {
+		// "import './foo.json' assert { type: 'json' }"
+		// "import './foo.json' with { type: 'json' }"
+		var entries []ast.AssertOrWithEntry
+		duplicates := make(map[string]logger.Range)
+		keyword := ast.WithKeyword
+		if p.lexer.Token != js_lexer.TWith {
+			keyword = ast.AssertKeyword
+		}
+		keywordLoc := p.saveExprCommentsHere()
+		p.lexer.Next()
+		openBraceLoc := p.saveExprCommentsHere()
+		p.lexer.Expect(js_lexer.TOpenBrace)
+
+		for p.lexer.Token != js_lexer.TCloseBrace {
+			// Parse the key
+			keyLoc := p.saveExprCommentsHere()
+			preferQuotedKey := false
+			var key []uint16
+			var keyText string
+			if p.lexer.IsIdentifierOrKeyword() {
+				keyText = p.lexer.Identifier.String
+				key = helpers.StringToUTF16(keyText)
+			} else if p.lexer.Token == js_lexer.TStringLiteral {
+				key = p.lexer.StringLiteral()
+				keyText = helpers.UTF16ToString(key)
+				preferQuotedKey = !p.options.minifySyntax
+			} else {
+				p.lexer.Expect(js_lexer.TIdentifier)
+			}
+			if prevRange, ok := duplicates[keyText]; ok {
+				what := "attribute"
+				if keyword == ast.AssertKeyword {
+					what = "assertion"
+				}
+				p.log.AddErrorWithNotes(&p.tracker, p.lexer.Range(), fmt.Sprintf("Duplicate import %s %q", what, keyText),
+					[]logger.MsgData{p.tracker.MsgData(prevRange, fmt.Sprintf("The first %q was here:", keyText))})
+			}
+			duplicates[keyText] = p.lexer.Range()
+			p.lexer.Next()
+			p.lexer.Expect(js_lexer.TColon)
+
+			// Parse the value
+			valueLoc := p.saveExprCommentsHere()
+			value := p.lexer.StringLiteral()
+			p.lexer.Expect(js_lexer.TStringLiteral)
+
+			entries = append(entries, ast.AssertOrWithEntry{
+				Key:             key,
+				KeyLoc:          keyLoc,
+				Value:           value,
+				ValueLoc:        valueLoc,
+				PreferQuotedKey: preferQuotedKey,
+			})
+
+			// Using "assert: { type: 'json' }" triggers special behavior
+			if keyword == ast.AssertKeyword && helpers.UTF16EqualsString(key, "type") && helpers.UTF16EqualsString(value, "json") {
+				flags |= ast.AssertTypeJSON
+			}
+
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			p.lexer.Next()
+		}
+
+		closeBraceLoc := p.saveExprCommentsHere()
+		p.lexer.Expect(js_lexer.TCloseBrace)
+		if keyword == ast.AssertKeyword {
+			p.maybeWarnAboutAssertKeyword(keywordLoc)
+		}
+		assertOrWith = &ast.ImportAssertOrWith{
+			Entries:            entries,
+			Keyword:            keyword,
+			KeywordLoc:         keywordLoc,
+			InnerOpenBraceLoc:  openBraceLoc,
+			InnerCloseBraceLoc: closeBraceLoc,
+		}
+	}
+
+	return pathRange, pathText, assertOrWith, flags
+}
+
+// Let people know if they probably should be using "with" instead of "assert"
+func (p *parser) maybeWarnAboutAssertKeyword(loc logger.Loc) {
+	if p.options.unsupportedJSFeatures.Has(compat.ImportAssertions) && !p.options.unsupportedJSFeatures.Has(compat.ImportAttributes) {
+		where := config.PrettyPrintTargetEnvironment(p.options.originalTargetEnv, p.options.unsupportedJSFeatureOverridesMask)
+		msg := logger.Msg{
+			Kind:  logger.Warning,
+			Data:  p.tracker.MsgData(js_lexer.RangeOfIdentifier(p.source, loc), "The \"assert\" keyword is not supported in "+where),
+			Notes: []logger.MsgData{{Text: "Did you mean to use \"with\" instead of \"assert\"?"}},
+		}
+		msg.Data.Location.Suggestion = "with"
+		p.log.AddMsgID(logger.MsgID_JS_AssertToWith, msg)
+	}
+}
+
+// This assumes the "function" token has already been parsed
+func (p *parser) parseFnStmt(loc logger.Loc, opts parseStmtOpts, isAsync bool, asyncRange logger.Range) js_ast.Stmt {
+	isGenerator := p.lexer.Token == js_lexer.TAsterisk
+	hasError := false
+	if isAsync {
+		hasError = p.markAsyncFn(asyncRange, isGenerator)
+	}
+	if isGenerator {
+		if !hasError {
+			p.markSyntaxFeature(compat.Generator, p.lexer.Range())
+		}
+		p.lexer.Next()
+	}
+
+	switch opts.lexicalDecl {
+	case lexicalDeclForbid:
+		p.forbidLexicalDecl(loc)
+
+	// Allow certain function statements in certain single-statement contexts
+	case lexicalDeclAllowFnInsideIf, lexicalDeclAllowFnInsideLabel:
+		if opts.isTypeScriptDeclare || isGenerator || isAsync {
+			p.forbidLexicalDecl(loc)
+		}
+	}
+
+	var name *ast.LocRef
+	var nameText string
+
+	// The name is optional for "export default function() {}" pseudo-statements
+	if !opts.isNameOptional || p.lexer.Token == js_lexer.TIdentifier {
+		nameLoc := p.lexer.Loc()
+		nameText = p.lexer.Identifier.String
+		if !isAsync && p.fnOrArrowDataParse.await != allowIdent && nameText == "await" {
+			p.log.AddError(&p.tracker, js_lexer.RangeOfIdentifier(p.source, nameLoc), "Cannot use \"await\" as an identifier here:")
+		}
+		p.lexer.Expect(js_lexer.TIdentifier)
+		name = &ast.LocRef{Loc: nameLoc, Ref: ast.InvalidRef}
+	}
+
+	// Even anonymous functions can have TypeScript type parameters
+	if p.options.ts.Parse {
+		p.skipTypeScriptTypeParameters(allowConstModifier)
+	}
+
+	// Introduce a fake block scope for function declarations inside if statements
+	var ifStmtScopeIndex int
+	hasIfScope := opts.lexicalDecl == lexicalDeclAllowFnInsideIf
+	if hasIfScope {
+		ifStmtScopeIndex = p.pushScopeForParsePass(js_ast.ScopeBlock, loc)
+	}
+
+	scopeIndex := p.pushScopeForParsePass(js_ast.ScopeFunctionArgs, p.lexer.Loc())
+
+	await := allowIdent
+	yield := allowIdent
+	if isAsync {
+		await = allowExpr
+	}
+	if isGenerator {
+		yield = allowExpr
+	}
+
+	fn, hadBody := p.parseFn(name, logger.Range{}, 0, fnOrArrowDataParse{
+		needsAsyncLoc:       loc,
+		asyncRange:          asyncRange,
+		await:               await,
+		yield:               yield,
+		isTypeScriptDeclare: opts.isTypeScriptDeclare,
+
+		// Only allow omitting the body if we're parsing TypeScript
+		allowMissingBodyForTypeScript: p.options.ts.Parse,
+	})
+
+	// Don't output anything if it's just a forward declaration of a function
+	if opts.isTypeScriptDeclare || !hadBody {
+		p.popAndDiscardScope(scopeIndex)
+
+		// Balance the fake block scope introduced above
+		if hasIfScope {
+			p.popAndDiscardScope(ifStmtScopeIndex)
+		}
+
+		if opts.isTypeScriptDeclare && opts.isNamespaceScope && opts.isExport {
+			p.hasNonLocalExportDeclareInsideNamespace = true
+		}
+
+		return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+	}
+
+	p.popScope()
+
+	// Only declare the function after we know if it had a body or not. Otherwise
+	// TypeScript code such as this will double-declare the symbol:
+	//
+	//     function foo(): void;
+	//     function foo(): void {}
+	//
+	if name != nil {
+		kind := ast.SymbolHoistedFunction
+		if isGenerator || isAsync {
+			kind = ast.SymbolGeneratorOrAsyncFunction
+		}
+		name.Ref = p.declareSymbol(kind, name.Loc, nameText)
+	}
+
+	// Balance the fake block scope introduced above
+	if hasIfScope {
+		p.popScope()
+	}
+
+	fn.HasIfScope = hasIfScope
+	p.validateFunctionName(fn, fnStmt)
+	if opts.hasNoSideEffectsComment && !p.options.ignoreDCEAnnotations {
+		fn.HasNoSideEffectsComment = true
+		if name != nil && !opts.isTypeScriptDeclare {
+			p.symbols[name.Ref.InnerIndex].Flags |= ast.CallCanBeUnwrappedIfUnused
+		}
+	}
+	return js_ast.Stmt{Loc: loc, Data: &js_ast.SFunction{Fn: fn, IsExport: opts.isExport}}
+}
+
+type deferredDecorators struct {
+	decorators []js_ast.Decorator
+}
+
+type decoratorContextFlags uint8
+
+const (
+	decoratorBeforeClassExpr = 1 << iota
+	decoratorInClassExpr
+	decoratorInFnArgs
+)
+
+func (p *parser) parseDecorators(decoratorScope *js_ast.Scope, classKeyword logger.Range, context decoratorContextFlags) (decorators []js_ast.Decorator) {
+	if p.lexer.Token == js_lexer.TAt {
+		if p.options.ts.Parse {
+			if p.options.ts.Config.ExperimentalDecorators == config.True {
+				if (context & decoratorInClassExpr) != 0 {
+					p.lexer.AddRangeErrorWithNotes(p.lexer.Range(), "TypeScript experimental decorators can only be used with class declarations",
+						[]logger.MsgData{p.tracker.MsgData(classKeyword, "This is a class expression, not a class declaration:")})
+				} else if (context & decoratorBeforeClassExpr) != 0 {
+					p.log.AddError(&p.tracker, p.lexer.Range(), "TypeScript experimental decorators cannot be used in expression position")
+				}
+			} else {
+				if (context&decoratorInFnArgs) != 0 && p.options.ts.Config.ExperimentalDecorators != config.True {
+					p.log.AddErrorWithNotes(&p.tracker, p.lexer.Range(), "Parameter decorators only work when experimental decorators are enabled", []logger.MsgData{{
+						Text: "You can enable experimental decorators by adding \"experimentalDecorators\": true to your \"tsconfig.json\" file.",
+					}})
+				}
+			}
+		} else {
+			if (context & decoratorInFnArgs) != 0 {
+				p.log.AddError(&p.tracker, p.lexer.Range(), "Parameter decorators are not allowed in JavaScript")
+			}
+		}
+	}
+
+	// TypeScript decorators cause us to temporarily revert to the scope that
+	// encloses the class declaration, since that's where the generated code
+	// for TypeScript decorators will be inserted.
+	oldScope := p.currentScope
+	p.currentScope = decoratorScope
+
+	for p.lexer.Token == js_lexer.TAt {
+		atLoc := p.lexer.Loc()
+		p.lexer.Next()
+
+		var value js_ast.Expr
+		if p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators == config.True {
+			// TypeScript's experimental decorator syntax is more permissive than
+			// JavaScript. Parse a new/call expression with "exprFlagDecorator" so
+			// we ignore EIndex expressions, since they may be part of a computed
+			// property:
+			//
+			//   class Foo {
+			//     @foo ['computed']() {}
+			//   }
+			//
+			// This matches the behavior of the TypeScript compiler.
+			p.parseExperimentalDecoratorNesting++
+			value = p.parseExprWithFlags(js_ast.LNew, exprFlagDecorator)
+			p.parseExperimentalDecoratorNesting--
+		} else {
+			// JavaScript's decorator syntax is more restrictive. Parse it using a
+			// special parser that doesn't allow normal expressions (e.g. "?.").
+			value = p.parseDecorator()
+		}
+		decorators = append(decorators, js_ast.Decorator{
+			Value:            value,
+			AtLoc:            atLoc,
+			OmitNewlineAfter: !p.lexer.HasNewlineBefore,
+		})
+	}
+
+	// Avoid "popScope" because this decorator scope is not hierarchical
+	p.currentScope = oldScope
+	return decorators
+}
+
+func (p *parser) parseDecorator() js_ast.Expr {
+	if p.lexer.Token == js_lexer.TOpenParen {
+		p.lexer.Next()
+		value := p.parseExpr(js_ast.LLowest)
+		p.lexer.Expect(js_lexer.TCloseParen)
+		return value
+	}
+
+	name := p.lexer.Identifier
+	nameRange := p.lexer.Range()
+	p.lexer.Expect(js_lexer.TIdentifier)
+
+	// Forbid invalid identifiers
+	if (p.fnOrArrowDataParse.await != allowIdent && name.String == "await") ||
+		(p.fnOrArrowDataParse.yield != allowIdent && name.String == "yield") {
+		p.log.AddError(&p.tracker, nameRange, fmt.Sprintf("Cannot use %q as an identifier here:", name.String))
+	}
+
+	memberExpr := js_ast.Expr{Loc: nameRange.Loc, Data: &js_ast.EIdentifier{Ref: p.storeNameInRef(name)}}
+
+	// Custom error reporting for error recovery
+	var syntaxError logger.MsgData
+	wrapRange := nameRange
+
+loop:
+	for {
+		switch p.lexer.Token {
+		case js_lexer.TExclamation:
+			// Skip over TypeScript non-null assertions
+			if p.lexer.HasNewlineBefore {
+				break loop
+			}
+			if !p.options.ts.Parse {
+				p.lexer.Unexpected()
+			}
+			wrapRange.Len = p.lexer.Range().End() - wrapRange.Loc.Start
+			p.lexer.Next()
+
+		case js_lexer.TDot, js_lexer.TQuestionDot:
+			// The grammar for "DecoratorMemberExpression" currently forbids "?."
+			if p.lexer.Token == js_lexer.TQuestionDot && syntaxError.Location == nil {
+				syntaxError = p.tracker.MsgData(p.lexer.Range(), "JavaScript decorator syntax does not allow \"?.\" here")
+			}
+
+			p.lexer.Next()
+			wrapRange.Len = p.lexer.Range().End() - wrapRange.Loc.Start
+
+			if p.lexer.Token == js_lexer.TPrivateIdentifier {
+				name := p.lexer.Identifier
+				memberExpr.Data = &js_ast.EIndex{
+					Target: memberExpr,
+					Index:  js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.EPrivateIdentifier{Ref: p.storeNameInRef(name)}},
+				}
+				p.reportPrivateNameUsage(name.String)
+				p.lexer.Next()
+			} else {
+				memberExpr.Data = &js_ast.EDot{
+					Target:  memberExpr,
+					Name:    p.lexer.Identifier.String,
+					NameLoc: p.lexer.Loc(),
+				}
+				p.lexer.Expect(js_lexer.TIdentifier)
+			}
+
+		case js_lexer.TOpenParen:
+			args, closeParenLoc, isMultiLine := p.parseCallArgs()
+			memberExpr.Data = &js_ast.ECall{
+				Target:        memberExpr,
+				Args:          args,
+				CloseParenLoc: closeParenLoc,
+				IsMultiLine:   isMultiLine,
+				Kind:          js_ast.TargetWasOriginallyPropertyAccess,
+			}
+			wrapRange.Len = closeParenLoc.Start + 1 - wrapRange.Loc.Start
+
+			// The grammar for "DecoratorCallExpression" currently forbids anything after it
+			if p.lexer.Token == js_lexer.TDot {
+				if syntaxError.Location == nil {
+					syntaxError = p.tracker.MsgData(p.lexer.Range(), "JavaScript decorator syntax does not allow \".\" after a call expression")
+				}
+				continue
+			}
+			break loop
+
+		default:
+			// "@x<y>"
+			// "@x.y<z>"
+			if !p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{}) {
+				break loop
+			}
+		}
+	}
+
+	// Suggest that non-decorator expressions be wrapped in parentheses
+	if syntaxError.Location != nil {
+		var notes []logger.MsgData
+		if text := p.source.TextForRange(wrapRange); !strings.ContainsRune(text, '\n') {
+			note := p.tracker.MsgData(wrapRange, "Wrap this decorator in parentheses to allow arbitrary expressions:")
+			note.Location.Suggestion = fmt.Sprintf("(%s)", text)
+			notes = []logger.MsgData{note}
+		}
+		p.log.AddMsg(logger.Msg{
+			Kind:  logger.Error,
+			Data:  syntaxError,
+			Notes: notes,
+		})
+	}
+
+	return memberExpr
+}
+
+type lexicalDecl uint8
+
+const (
+	lexicalDeclForbid lexicalDecl = iota
+	lexicalDeclAllowAll
+	lexicalDeclAllowFnInsideIf
+	lexicalDeclAllowFnInsideLabel
+)
+
+type parseStmtOpts struct {
+	deferredDecorators      *deferredDecorators
+	lexicalDecl             lexicalDecl
+	isModuleScope           bool
+	isNamespaceScope        bool
+	isExport                bool
+	isExportDefault         bool
+	isNameOptional          bool // For "export default" pseudo-statements
+	isTypeScriptDeclare     bool
+	isForLoopInit           bool
+	isForAwaitLoopInit      bool
+	allowDirectivePrologue  bool
+	hasNoSideEffectsComment bool
+	isUsingStmt             bool
+}
+
+func (p *parser) parseStmt(opts parseStmtOpts) js_ast.Stmt {
+	loc := p.lexer.Loc()
+
+	if (p.lexer.HasCommentBefore & js_lexer.NoSideEffectsCommentBefore) != 0 {
+		opts.hasNoSideEffectsComment = true
+	}
+
+	// Do not attach any leading comments to the next expression
+	p.lexer.CommentsBeforeToken = p.lexer.CommentsBeforeToken[:0]
+
+	switch p.lexer.Token {
+	case js_lexer.TSemicolon:
+		p.lexer.Next()
+		return js_ast.Stmt{Loc: loc, Data: js_ast.SEmptyShared}
+
+	case js_lexer.TExport:
+		previousExportKeyword := p.esmExportKeyword
+		if opts.isModuleScope {
+			p.esmExportKeyword = p.lexer.Range()
+		} else if !opts.isNamespaceScope {
+			p.lexer.Unexpected()
+		}
+		p.lexer.Next()
+
+		switch p.lexer.Token {
+		case js_lexer.TClass, js_lexer.TConst, js_lexer.TFunction, js_lexer.TVar, js_lexer.TAt:
+			opts.isExport = true
+			return p.parseStmt(opts)
+
+		case js_lexer.TImport:
+			// "export import foo = bar"
+			if p.options.ts.Parse && (opts.isModuleScope || opts.isNamespaceScope) {
+				opts.isExport = true
+				return p.parseStmt(opts)
+			}
+
+			p.lexer.Unexpected()
+			return js_ast.Stmt{}
+
+		case js_lexer.TEnum:
+			if !p.options.ts.Parse {
+				p.lexer.Unexpected()
+			}
+			opts.isExport = true
+			return p.parseStmt(opts)
+
+		case js_lexer.TIdentifier:
+			if p.lexer.IsContextualKeyword("let") {
+				opts.isExport = true
+				return p.parseStmt(opts)
+			}
+
+			if p.lexer.IsContextualKeyword("as") {
+				// "export as namespace ns;"
+				p.lexer.Next()
+				p.lexer.ExpectContextualKeyword("namespace")
+				p.lexer.Expect(js_lexer.TIdentifier)
+				p.lexer.ExpectOrInsertSemicolon()
+				return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+			}
+
+			if p.lexer.IsContextualKeyword("async") {
+				// "export async function foo() {}"
+				asyncRange := p.lexer.Range()
+				p.lexer.Next()
+				if p.lexer.HasNewlineBefore {
+					p.log.AddError(&p.tracker, logger.Range{Loc: logger.Loc{Start: asyncRange.End()}},
+						"Unexpected newline after \"async\"")
+					panic(js_lexer.LexerPanic{})
+				}
+				p.lexer.Expect(js_lexer.TFunction)
+				opts.isExport = true
+				return p.parseFnStmt(loc, opts, true /* isAsync */, asyncRange)
+			}
+
+			if p.options.ts.Parse {
+				switch p.lexer.Identifier.String {
+				case "type":
+					// "export type foo = ..."
+					typeRange := p.lexer.Range()
+					p.lexer.Next()
+					if p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace && p.lexer.Token != js_lexer.TAsterisk {
+						p.log.AddError(&p.tracker, logger.Range{Loc: logger.Loc{Start: typeRange.End()}},
+							"Unexpected newline after \"type\"")
+						panic(js_lexer.LexerPanic{})
+					}
+					p.skipTypeScriptTypeStmt(parseStmtOpts{isModuleScope: opts.isModuleScope, isExport: true})
+					return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+
+				case "namespace", "abstract", "module", "interface":
+					// "export namespace Foo {}"
+					// "export abstract class Foo {}"
+					// "export module Foo {}"
+					// "export interface Foo {}"
+					opts.isExport = true
+					return p.parseStmt(opts)
+
+				case "declare":
+					// "export declare class Foo {}"
+					opts.isExport = true
+					opts.lexicalDecl = lexicalDeclAllowAll
+					opts.isTypeScriptDeclare = true
+					return p.parseStmt(opts)
+				}
+			}
+
+			p.lexer.Unexpected()
+			return js_ast.Stmt{}
+
+		case js_lexer.TDefault:
+			if !opts.isModuleScope && (!opts.isNamespaceScope || !opts.isTypeScriptDeclare) {
+				p.lexer.Unexpected()
+			}
+
+			defaultLoc := p.lexer.Loc()
+			p.lexer.Next()
+
+			// Also pick up comments after the "default" keyword
+			if (p.lexer.HasCommentBefore & js_lexer.NoSideEffectsCommentBefore) != 0 {
+				opts.hasNoSideEffectsComment = true
+			}
+
+			// The default name is lazily generated only if no other name is present
+			createDefaultName := func() ast.LocRef {
+				// This must be named "default" for when "--keep-names" is active
+				defaultName := ast.LocRef{Loc: defaultLoc, Ref: p.newSymbol(ast.SymbolOther, "default")}
+				p.currentScope.Generated = append(p.currentScope.Generated, defaultName.Ref)
+				return defaultName
+			}
+
+			// "export default async function() {}"
+			// "export default async function foo() {}"
+			if p.lexer.IsContextualKeyword("async") {
+				asyncRange := p.lexer.Range()
+				p.lexer.Next()
+
+				if p.lexer.Token == js_lexer.TFunction && !p.lexer.HasNewlineBefore {
+					p.lexer.Next()
+					stmt := p.parseFnStmt(loc, parseStmtOpts{
+						isNameOptional:          true,
+						lexicalDecl:             lexicalDeclAllowAll,
+						hasNoSideEffectsComment: opts.hasNoSideEffectsComment,
+					}, true /* isAsync */, asyncRange)
+					if _, ok := stmt.Data.(*js_ast.STypeScript); ok {
+						return stmt // This was just a type annotation
+					}
+
+					// Use the statement name if present, since it's a better name
+					var defaultName ast.LocRef
+					if s, ok := stmt.Data.(*js_ast.SFunction); ok && s.Fn.Name != nil {
+						defaultName = ast.LocRef{Loc: defaultLoc, Ref: s.Fn.Name.Ref}
+					} else {
+						defaultName = createDefaultName()
+					}
+
+					return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportDefault{DefaultName: defaultName, Value: stmt}}
+				}
+
+				defaultName := createDefaultName()
+				expr := p.parseSuffix(p.parseAsyncPrefixExpr(asyncRange, js_ast.LComma, 0), js_ast.LComma, nil, 0)
+				p.lexer.ExpectOrInsertSemicolon()
+				return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportDefault{
+					DefaultName: defaultName, Value: js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: expr}}}}
+			}
+
+			// "export default class {}"
+			// "export default class Foo {}"
+			// "export default @x class {}"
+			// "export default @x class Foo {}"
+			// "export default function() {}"
+			// "export default function foo() {}"
+			// "export default interface Foo {}"
+			// "export default interface + 1"
+			if p.lexer.Token == js_lexer.TFunction || p.lexer.Token == js_lexer.TClass || p.lexer.Token == js_lexer.TAt ||
+				(p.options.ts.Parse && p.lexer.IsContextualKeyword("interface")) {
+				stmt := p.parseStmt(parseStmtOpts{
+					deferredDecorators:      opts.deferredDecorators,
+					isNameOptional:          true,
+					isExportDefault:         true,
+					lexicalDecl:             lexicalDeclAllowAll,
+					hasNoSideEffectsComment: opts.hasNoSideEffectsComment,
+				})
+
+				// Use the statement name if present, since it's a better name
+				var defaultName ast.LocRef
+				switch s := stmt.Data.(type) {
+				case *js_ast.STypeScript, *js_ast.SExpr:
+					return stmt // Handle the "interface" case above
+				case *js_ast.SFunction:
+					if s.Fn.Name != nil {
+						defaultName = ast.LocRef{Loc: defaultLoc, Ref: s.Fn.Name.Ref}
+					} else {
+						defaultName = createDefaultName()
+					}
+				case *js_ast.SClass:
+					if s.Class.Name != nil {
+						defaultName = ast.LocRef{Loc: defaultLoc, Ref: s.Class.Name.Ref}
+					} else {
+						defaultName = createDefaultName()
+					}
+				default:
+					panic("Internal error")
+				}
+				return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportDefault{DefaultName: defaultName, Value: stmt}}
+			}
+
+			isIdentifier := p.lexer.Token == js_lexer.TIdentifier
+			name := p.lexer.Identifier.String
+			expr := p.parseExpr(js_ast.LComma)
+
+			// "export default abstract class {}"
+			// "export default abstract class Foo {}"
+			if p.options.ts.Parse && isIdentifier && name == "abstract" && !p.lexer.HasNewlineBefore {
+				if _, ok := expr.Data.(*js_ast.EIdentifier); ok && p.lexer.Token == js_lexer.TClass {
+					stmt := p.parseClassStmt(loc, parseStmtOpts{
+						deferredDecorators: opts.deferredDecorators,
+						isNameOptional:     true,
+					})
+
+					// Use the statement name if present, since it's a better name
+					var defaultName ast.LocRef
+					if s, ok := stmt.Data.(*js_ast.SClass); ok && s.Class.Name != nil {
+						defaultName = ast.LocRef{Loc: defaultLoc, Ref: s.Class.Name.Ref}
+					} else {
+						defaultName = createDefaultName()
+					}
+
+					return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportDefault{DefaultName: defaultName, Value: stmt}}
+				}
+			}
+
+			p.lexer.ExpectOrInsertSemicolon()
+			defaultName := createDefaultName()
+			return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportDefault{
+				DefaultName: defaultName, Value: js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: expr}}}}
+
+		case js_lexer.TAsterisk:
+			if !opts.isModuleScope && (!opts.isNamespaceScope || !opts.isTypeScriptDeclare) {
+				p.lexer.Unexpected()
+			}
+
+			p.lexer.Next()
+			var namespaceRef ast.Ref
+			var alias *js_ast.ExportStarAlias
+			var pathRange logger.Range
+			var pathText string
+			var assertOrWith *ast.ImportAssertOrWith
+			var flags ast.ImportRecordFlags
+
+			if p.lexer.IsContextualKeyword("as") {
+				// "export * as ns from 'path'"
+				p.lexer.Next()
+				name := p.parseClauseAlias("export")
+				namespaceRef = p.storeNameInRef(name)
+				alias = &js_ast.ExportStarAlias{Loc: p.lexer.Loc(), OriginalName: name.String}
+				p.lexer.Next()
+				p.lexer.ExpectContextualKeyword("from")
+				pathRange, pathText, assertOrWith, flags = p.parsePath()
+			} else {
+				// "export * from 'path'"
+				p.lexer.ExpectContextualKeyword("from")
+				pathRange, pathText, assertOrWith, flags = p.parsePath()
+				name := js_ast.GenerateNonUniqueNameFromPath(pathText) + "_star"
+				namespaceRef = p.storeNameInRef(js_lexer.MaybeSubstring{String: name})
+			}
+			importRecordIndex := p.addImportRecord(ast.ImportStmt, pathRange, pathText, assertOrWith, flags)
+
+			// Export-star statements anywhere in the file disable top-level const
+			// local prefix because import cycles can be used to trigger TDZ
+			p.currentScope.IsAfterConstLocalPrefix = true
+
+			p.lexer.ExpectOrInsertSemicolon()
+			return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportStar{
+				NamespaceRef:      namespaceRef,
+				Alias:             alias,
+				ImportRecordIndex: importRecordIndex,
+			}}
+
+		case js_lexer.TOpenBrace:
+			if !opts.isModuleScope && (!opts.isNamespaceScope || !opts.isTypeScriptDeclare) {
+				p.lexer.Unexpected()
+			}
+
+			items, isSingleLine := p.parseExportClause()
+			if p.lexer.IsContextualKeyword("from") {
+				// "export {} from 'path'"
+				p.lexer.Next()
+				pathLoc, pathText, assertOrWith, flags := p.parsePath()
+				importRecordIndex := p.addImportRecord(ast.ImportStmt, pathLoc, pathText, assertOrWith, flags)
+				name := "import_" + js_ast.GenerateNonUniqueNameFromPath(pathText)
+				namespaceRef := p.storeNameInRef(js_lexer.MaybeSubstring{String: name})
+
+				// Export clause statements anywhere in the file disable top-level const
+				// local prefix because import cycles can be used to trigger TDZ
+				p.currentScope.IsAfterConstLocalPrefix = true
+
+				p.lexer.ExpectOrInsertSemicolon()
+				return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportFrom{
+					Items:             items,
+					NamespaceRef:      namespaceRef,
+					ImportRecordIndex: importRecordIndex,
+					IsSingleLine:      isSingleLine,
+				}}
+			}
+
+			p.lexer.ExpectOrInsertSemicolon()
+			return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportClause{Items: items, IsSingleLine: isSingleLine}}
+
+		case js_lexer.TEquals:
+			// "export = value;"
+			p.esmExportKeyword = previousExportKeyword // This wasn't an ESM export statement after all
+			if p.options.ts.Parse {
+				p.lexer.Next()
+				value := p.parseExpr(js_ast.LLowest)
+				p.lexer.ExpectOrInsertSemicolon()
+				return js_ast.Stmt{Loc: loc, Data: &js_ast.SExportEquals{Value: value}}
+			}
+			p.lexer.Unexpected()
+			return js_ast.Stmt{}
+
+		default:
+			p.lexer.Unexpected()
+			return js_ast.Stmt{}
+		}
+
+	case js_lexer.TFunction:
+		p.lexer.Next()
+		return p.parseFnStmt(loc, opts, false /* isAsync */, logger.Range{})
+
+	case js_lexer.TEnum:
+		if !p.options.ts.Parse {
+			p.lexer.Unexpected()
+		}
+		return p.parseTypeScriptEnumStmt(loc, opts)
+
+	case js_lexer.TAt:
+		// Parse decorators before class statements, which are potentially exported
+		scopeIndex := len(p.scopesInOrder)
+		decorators := p.parseDecorators(p.currentScope, logger.Range{}, 0)
+
+		// "@x export @y class Foo {}"
+		if opts.deferredDecorators != nil {
+			p.log.AddError(&p.tracker, logger.Range{Loc: loc, Len: 1}, "Decorators are not valid here")
+			p.discardScopesUpTo(scopeIndex)
+			return p.parseStmt(opts)
+		}
+
+		// If this turns out to be a "declare class" statement, we need to undo the
+		// scopes that were potentially pushed while parsing the decorator arguments.
+		// That can look like any one of the following:
+		//
+		//   "@decorator declare class Foo {}"
+		//   "@decorator declare abstract class Foo {}"
+		//   "@decorator export declare class Foo {}"
+		//   "@decorator export declare abstract class Foo {}"
+		//
+		opts.deferredDecorators = &deferredDecorators{
+			decorators: decorators,
+		}
+
+		stmt := p.parseStmt(opts)
+
+		// Check for valid decorator targets
+		switch s := stmt.Data.(type) {
+		case *js_ast.SClass:
+			return stmt
+
+		case *js_ast.SExportDefault:
+			switch s.Value.Data.(type) {
+			case *js_ast.SClass:
+				return stmt
+			}
+
+		case *js_ast.STypeScript:
+			if s.WasDeclareClass {
+				// If this is a type declaration, discard any scopes that were pushed
+				// while parsing decorators. Unlike with the class statements above,
+				// these scopes won't end up being visited during the upcoming visit
+				// pass because type declarations aren't visited at all.
+				p.discardScopesUpTo(scopeIndex)
+				return stmt
+			}
+		}
+
+		// Forbid decorators on anything other than a class statement
+		p.log.AddError(&p.tracker, logger.Range{Loc: loc, Len: 1}, "Decorators are not valid here")
+		stmt.Data = js_ast.STypeScriptShared
+		p.discardScopesUpTo(scopeIndex)
+		return stmt
+
+	case js_lexer.TClass:
+		if opts.lexicalDecl != lexicalDeclAllowAll {
+			p.forbidLexicalDecl(loc)
+		}
+		return p.parseClassStmt(loc, opts)
+
+	case js_lexer.TVar:
+		p.lexer.Next()
+		decls := p.parseAndDeclareDecls(ast.SymbolHoisted, opts)
+		p.lexer.ExpectOrInsertSemicolon()
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SLocal{
+			Kind:     js_ast.LocalVar,
+			Decls:    decls,
+			IsExport: opts.isExport,
+		}}
+
+	case js_lexer.TConst:
+		if opts.lexicalDecl != lexicalDeclAllowAll {
+			p.forbidLexicalDecl(loc)
+		}
+		p.markSyntaxFeature(compat.ConstAndLet, p.lexer.Range())
+		p.lexer.Next()
+
+		if p.options.ts.Parse && p.lexer.Token == js_lexer.TEnum {
+			return p.parseTypeScriptEnumStmt(loc, opts)
+		}
+
+		decls := p.parseAndDeclareDecls(ast.SymbolConst, opts)
+		p.lexer.ExpectOrInsertSemicolon()
+		if !opts.isTypeScriptDeclare {
+			p.requireInitializers(js_ast.LocalConst, decls)
+		}
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SLocal{
+			Kind:     js_ast.LocalConst,
+			Decls:    decls,
+			IsExport: opts.isExport,
+		}}
+
+	case js_lexer.TIf:
+		p.lexer.Next()
+		p.lexer.Expect(js_lexer.TOpenParen)
+		test := p.parseExpr(js_ast.LLowest)
+		p.lexer.Expect(js_lexer.TCloseParen)
+		isSingleLineYes := !p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace
+		yes := p.parseStmt(parseStmtOpts{lexicalDecl: lexicalDeclAllowFnInsideIf})
+		var noOrNil js_ast.Stmt
+		var isSingleLineNo bool
+		if p.lexer.Token == js_lexer.TElse {
+			p.lexer.Next()
+			isSingleLineNo = !p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace
+			noOrNil = p.parseStmt(parseStmtOpts{lexicalDecl: lexicalDeclAllowFnInsideIf})
+		}
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SIf{Test: test, Yes: yes, NoOrNil: noOrNil, IsSingleLineYes: isSingleLineYes, IsSingleLineNo: isSingleLineNo}}
+
+	case js_lexer.TDo:
+		p.lexer.Next()
+		body := p.parseStmt(parseStmtOpts{})
+		p.lexer.Expect(js_lexer.TWhile)
+		p.lexer.Expect(js_lexer.TOpenParen)
+		test := p.parseExpr(js_ast.LLowest)
+		p.lexer.Expect(js_lexer.TCloseParen)
+
+		// This is a weird corner case where automatic semicolon insertion applies
+		// even without a newline present
+		if p.lexer.Token == js_lexer.TSemicolon {
+			p.lexer.Next()
+		}
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SDoWhile{Body: body, Test: test}}
+
+	case js_lexer.TWhile:
+		p.lexer.Next()
+		p.lexer.Expect(js_lexer.TOpenParen)
+		test := p.parseExpr(js_ast.LLowest)
+		p.lexer.Expect(js_lexer.TCloseParen)
+		isSingleLineBody := !p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace
+		body := p.parseStmt(parseStmtOpts{})
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SWhile{Test: test, Body: body, IsSingleLineBody: isSingleLineBody}}
+
+	case js_lexer.TWith:
+		p.lexer.Next()
+		p.lexer.Expect(js_lexer.TOpenParen)
+		test := p.parseExpr(js_ast.LLowest)
+		bodyLoc := p.lexer.Loc()
+		p.lexer.Expect(js_lexer.TCloseParen)
+
+		// Push a scope so we make sure to prevent any bare identifiers referenced
+		// within the body from being renamed. Renaming them might change the
+		// semantics of the code.
+		p.pushScopeForParsePass(js_ast.ScopeWith, bodyLoc)
+		isSingleLineBody := !p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace
+		body := p.parseStmt(parseStmtOpts{})
+		p.popScope()
+
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SWith{Value: test, BodyLoc: bodyLoc, Body: body, IsSingleLineBody: isSingleLineBody}}
+
+	case js_lexer.TSwitch:
+		p.lexer.Next()
+		p.lexer.Expect(js_lexer.TOpenParen)
+		test := p.parseExpr(js_ast.LLowest)
+		p.lexer.Expect(js_lexer.TCloseParen)
+
+		bodyLoc := p.lexer.Loc()
+		p.pushScopeForParsePass(js_ast.ScopeBlock, bodyLoc)
+		defer p.popScope()
+
+		p.lexer.Expect(js_lexer.TOpenBrace)
+		cases := []js_ast.Case{}
+		foundDefault := false
+
+		for p.lexer.Token != js_lexer.TCloseBrace {
+			var value js_ast.Expr
+			body := []js_ast.Stmt{}
+			caseLoc := p.saveExprCommentsHere()
+
+			if p.lexer.Token == js_lexer.TDefault {
+				if foundDefault {
+					p.log.AddError(&p.tracker, p.lexer.Range(), "Multiple default clauses are not allowed")
+					panic(js_lexer.LexerPanic{})
+				}
+				foundDefault = true
+				p.lexer.Next()
+				p.lexer.Expect(js_lexer.TColon)
+			} else {
+				p.lexer.Expect(js_lexer.TCase)
+				value = p.parseExpr(js_ast.LLowest)
+				p.lexer.Expect(js_lexer.TColon)
+			}
+
+		caseBody:
+			for {
+				switch p.lexer.Token {
+				case js_lexer.TCloseBrace, js_lexer.TCase, js_lexer.TDefault:
+					break caseBody
+
+				default:
+					body = append(body, p.parseStmt(parseStmtOpts{lexicalDecl: lexicalDeclAllowAll}))
+				}
+			}
+
+			cases = append(cases, js_ast.Case{ValueOrNil: value, Body: body, Loc: caseLoc})
+		}
+
+		closeBraceLoc := p.lexer.Loc()
+		p.lexer.Expect(js_lexer.TCloseBrace)
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SSwitch{
+			Test:          test,
+			Cases:         cases,
+			BodyLoc:       bodyLoc,
+			CloseBraceLoc: closeBraceLoc,
+		}}
+
+	case js_lexer.TTry:
+		p.lexer.Next()
+		blockLoc := p.lexer.Loc()
+		p.lexer.Expect(js_lexer.TOpenBrace)
+		p.pushScopeForParsePass(js_ast.ScopeBlock, loc)
+		body := p.parseStmtsUpTo(js_lexer.TCloseBrace, parseStmtOpts{})
+		p.popScope()
+		closeBraceLoc := p.lexer.Loc()
+		p.lexer.Next()
+
+		var catch *js_ast.Catch = nil
+		var finally *js_ast.Finally = nil
+
+		if p.lexer.Token == js_lexer.TCatch {
+			catchLoc := p.lexer.Loc()
+			p.pushScopeForParsePass(js_ast.ScopeCatchBinding, catchLoc)
+			p.lexer.Next()
+			var bindingOrNil js_ast.Binding
+
+			// The catch binding is optional, and can be omitted
+			if p.lexer.Token == js_lexer.TOpenBrace {
+				if p.options.unsupportedJSFeatures.Has(compat.OptionalCatchBinding) {
+					// Generate a new symbol for the catch binding for older browsers
+					ref := p.newSymbol(ast.SymbolOther, "e")
+					p.currentScope.Generated = append(p.currentScope.Generated, ref)
+					bindingOrNil = js_ast.Binding{Loc: p.lexer.Loc(), Data: &js_ast.BIdentifier{Ref: ref}}
+				}
+			} else {
+				p.lexer.Expect(js_lexer.TOpenParen)
+				bindingOrNil = p.parseBinding(parseBindingOpts{})
+
+				// Skip over types
+				if p.options.ts.Parse && p.lexer.Token == js_lexer.TColon {
+					p.lexer.Expect(js_lexer.TColon)
+					p.skipTypeScriptType(js_ast.LLowest)
+				}
+
+				p.lexer.Expect(js_lexer.TCloseParen)
+
+				// Bare identifiers are a special case
+				kind := ast.SymbolOther
+				if _, ok := bindingOrNil.Data.(*js_ast.BIdentifier); ok {
+					kind = ast.SymbolCatchIdentifier
+				}
+				p.declareBinding(kind, bindingOrNil, parseStmtOpts{})
+			}
+
+			blockLoc := p.lexer.Loc()
+			p.lexer.Expect(js_lexer.TOpenBrace)
+
+			p.pushScopeForParsePass(js_ast.ScopeBlock, blockLoc)
+			stmts := p.parseStmtsUpTo(js_lexer.TCloseBrace, parseStmtOpts{})
+			p.popScope()
+
+			closeBraceLoc := p.lexer.Loc()
+			p.lexer.Next()
+			catch = &js_ast.Catch{Loc: catchLoc, BindingOrNil: bindingOrNil, BlockLoc: blockLoc, Block: js_ast.SBlock{Stmts: stmts, CloseBraceLoc: closeBraceLoc}}
+			p.popScope()
+		}
+
+		if p.lexer.Token == js_lexer.TFinally || catch == nil {
+			finallyLoc := p.lexer.Loc()
+			p.pushScopeForParsePass(js_ast.ScopeBlock, finallyLoc)
+			p.lexer.Expect(js_lexer.TFinally)
+			p.lexer.Expect(js_lexer.TOpenBrace)
+			stmts := p.parseStmtsUpTo(js_lexer.TCloseBrace, parseStmtOpts{})
+			closeBraceLoc := p.lexer.Loc()
+			p.lexer.Next()
+			finally = &js_ast.Finally{Loc: finallyLoc, Block: js_ast.SBlock{Stmts: stmts, CloseBraceLoc: closeBraceLoc}}
+			p.popScope()
+		}
+
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.STry{
+			BlockLoc: blockLoc,
+			Block:    js_ast.SBlock{Stmts: body, CloseBraceLoc: closeBraceLoc},
+			Catch:    catch,
+			Finally:  finally,
+		}}
+
+	case js_lexer.TFor:
+		p.pushScopeForParsePass(js_ast.ScopeBlock, loc)
+		defer p.popScope()
+
+		p.lexer.Next()
+
+		// "for await (let x of y) {}"
+		var awaitRange logger.Range
+		if p.lexer.IsContextualKeyword("await") {
+			awaitRange = p.lexer.Range()
+			if p.fnOrArrowDataParse.await != allowExpr {
+				p.log.AddError(&p.tracker, awaitRange, "Cannot use \"await\" outside an async function")
+				awaitRange = logger.Range{}
+			} else {
+				didGenerateError := false
+				if p.fnOrArrowDataParse.isTopLevel {
+					p.topLevelAwaitKeyword = awaitRange
+				}
+				if !didGenerateError && p.options.unsupportedJSFeatures.Has(compat.AsyncAwait) && p.options.unsupportedJSFeatures.Has(compat.Generator) {
+					// If for-await loops aren't supported, then we only support lowering
+					// if either async/await or generators is supported. Otherwise we
+					// cannot lower for-await loops.
+					p.markSyntaxFeature(compat.ForAwait, awaitRange)
+				}
+			}
+			p.lexer.Next()
+		}
+
+		p.lexer.Expect(js_lexer.TOpenParen)
+
+		var initOrNil js_ast.Stmt
+		var testOrNil js_ast.Expr
+		var updateOrNil js_ast.Expr
+
+		// "in" expressions aren't allowed here
+		p.allowIn = false
+
+		var badLetRange logger.Range
+		if p.lexer.IsContextualKeyword("let") {
+			badLetRange = p.lexer.Range()
+		}
+		decls := []js_ast.Decl{}
+		initLoc := p.lexer.Loc()
+		isVar := false
+		switch p.lexer.Token {
+		case js_lexer.TVar:
+			isVar = true
+			p.lexer.Next()
+			decls = p.parseAndDeclareDecls(ast.SymbolHoisted, parseStmtOpts{})
+			initOrNil = js_ast.Stmt{Loc: initLoc, Data: &js_ast.SLocal{Kind: js_ast.LocalVar, Decls: decls}}
+
+		case js_lexer.TConst:
+			p.markSyntaxFeature(compat.ConstAndLet, p.lexer.Range())
+			p.lexer.Next()
+			decls = p.parseAndDeclareDecls(ast.SymbolConst, parseStmtOpts{})
+			initOrNil = js_ast.Stmt{Loc: initLoc, Data: &js_ast.SLocal{Kind: js_ast.LocalConst, Decls: decls}}
+
+		case js_lexer.TSemicolon:
+
+		default:
+			var expr js_ast.Expr
+			var stmt js_ast.Stmt
+			expr, stmt, decls = p.parseExprOrLetOrUsingStmt(parseStmtOpts{
+				lexicalDecl:        lexicalDeclAllowAll,
+				isForLoopInit:      true,
+				isForAwaitLoopInit: awaitRange.Len > 0,
+			})
+			if stmt.Data != nil {
+				badLetRange = logger.Range{}
+				initOrNil = stmt
+			} else {
+				initOrNil = js_ast.Stmt{Loc: expr.Loc, Data: &js_ast.SExpr{Value: expr}}
+			}
+		}
+
+		// "in" expressions are allowed again
+		p.allowIn = true
+
+		// Detect for-of loops
+		if p.lexer.IsContextualKeyword("of") || awaitRange.Len > 0 {
+			if badLetRange.Len > 0 {
+				p.log.AddError(&p.tracker, badLetRange, "\"let\" must be wrapped in parentheses to be used as an expression here:")
+			}
+			if awaitRange.Len > 0 && !p.lexer.IsContextualKeyword("of") {
+				if initOrNil.Data != nil {
+					p.lexer.ExpectedString("\"of\"")
+				} else {
+					p.lexer.Unexpected()
+				}
+			}
+			p.forbidInitializers(decls, "of", false)
+			p.markSyntaxFeature(compat.ForOf, p.lexer.Range())
+			p.lexer.Next()
+			value := p.parseExpr(js_ast.LComma)
+			p.lexer.Expect(js_lexer.TCloseParen)
+			isSingleLineBody := !p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace
+			body := p.parseStmt(parseStmtOpts{})
+			return js_ast.Stmt{Loc: loc, Data: &js_ast.SForOf{Await: awaitRange, Init: initOrNil, Value: value, Body: body, IsSingleLineBody: isSingleLineBody}}
+		}
+
+		// Detect for-in loops
+		if p.lexer.Token == js_lexer.TIn {
+			p.forbidInitializers(decls, "in", isVar)
+			if len(decls) == 1 {
+				if local, ok := initOrNil.Data.(*js_ast.SLocal); ok {
+					if local.Kind == js_ast.LocalUsing {
+						p.log.AddError(&p.tracker, js_lexer.RangeOfIdentifier(p.source, initOrNil.Loc), "\"using\" declarations are not allowed here")
+					} else if local.Kind == js_ast.LocalAwaitUsing {
+						p.log.AddError(&p.tracker, js_lexer.RangeOfIdentifier(p.source, initOrNil.Loc), "\"await using\" declarations are not allowed here")
+					}
+				}
+			}
+			p.lexer.Next()
+			value := p.parseExpr(js_ast.LLowest)
+			p.lexer.Expect(js_lexer.TCloseParen)
+			isSingleLineBody := !p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace
+			body := p.parseStmt(parseStmtOpts{})
+			return js_ast.Stmt{Loc: loc, Data: &js_ast.SForIn{Init: initOrNil, Value: value, Body: body, IsSingleLineBody: isSingleLineBody}}
+		}
+
+		p.lexer.Expect(js_lexer.TSemicolon)
+
+		// "await using" declarations are only allowed in for-of loops
+		if local, ok := initOrNil.Data.(*js_ast.SLocal); ok && local.Kind == js_ast.LocalAwaitUsing {
+			p.log.AddError(&p.tracker, js_lexer.RangeOfIdentifier(p.source, initOrNil.Loc), "\"await using\" declarations are not allowed here")
+		}
+
+		// Only require "const" statement initializers when we know we're a normal for loop
+		if local, ok := initOrNil.Data.(*js_ast.SLocal); ok && (local.Kind == js_ast.LocalConst || local.Kind == js_ast.LocalUsing) {
+			p.requireInitializers(local.Kind, decls)
+		}
+
+		if p.lexer.Token != js_lexer.TSemicolon {
+			testOrNil = p.parseExpr(js_ast.LLowest)
+		}
+
+		p.lexer.Expect(js_lexer.TSemicolon)
+
+		if p.lexer.Token != js_lexer.TCloseParen {
+			updateOrNil = p.parseExpr(js_ast.LLowest)
+		}
+
+		p.lexer.Expect(js_lexer.TCloseParen)
+		isSingleLineBody := !p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace
+		body := p.parseStmt(parseStmtOpts{})
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SFor{
+			InitOrNil:        initOrNil,
+			TestOrNil:        testOrNil,
+			UpdateOrNil:      updateOrNil,
+			Body:             body,
+			IsSingleLineBody: isSingleLineBody,
+		}}
+
+	case js_lexer.TImport:
+		previousImportStatementKeyword := p.esmImportStatementKeyword
+		p.esmImportStatementKeyword = p.lexer.Range()
+		p.lexer.Next()
+		stmt := js_ast.SImport{}
+		wasOriginallyBareImport := false
+
+		// "export import foo = bar"
+		// "import foo = bar" in a namespace
+		if (opts.isExport || (opts.isNamespaceScope && !opts.isTypeScriptDeclare)) && p.lexer.Token != js_lexer.TIdentifier {
+			p.lexer.Expected(js_lexer.TIdentifier)
+		}
+
+	syntaxBeforePath:
+		switch p.lexer.Token {
+		case js_lexer.TOpenParen, js_lexer.TDot:
+			// "import('path')"
+			// "import.meta"
+			p.esmImportStatementKeyword = previousImportStatementKeyword // This wasn't an ESM import statement after all
+			expr := p.parseSuffix(p.parseImportExpr(loc, js_ast.LLowest), js_ast.LLowest, nil, 0)
+			p.lexer.ExpectOrInsertSemicolon()
+			return js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: expr}}
+
+		case js_lexer.TStringLiteral, js_lexer.TNoSubstitutionTemplateLiteral:
+			// "import 'path'"
+			if !opts.isModuleScope && (!opts.isNamespaceScope || !opts.isTypeScriptDeclare) {
+				p.lexer.Unexpected()
+				return js_ast.Stmt{}
+			}
+
+			wasOriginallyBareImport = true
+
+		case js_lexer.TAsterisk:
+			// "import * as ns from 'path'"
+			if !opts.isModuleScope && (!opts.isNamespaceScope || !opts.isTypeScriptDeclare) {
+				p.lexer.Unexpected()
+				return js_ast.Stmt{}
+			}
+
+			p.lexer.Next()
+			p.lexer.ExpectContextualKeyword("as")
+			stmt.NamespaceRef = p.storeNameInRef(p.lexer.Identifier)
+			starLoc := p.lexer.Loc()
+			stmt.StarNameLoc = &starLoc
+			p.lexer.Expect(js_lexer.TIdentifier)
+			p.lexer.ExpectContextualKeyword("from")
+
+		case js_lexer.TOpenBrace:
+			// "import {item1, item2} from 'path'"
+			if !opts.isModuleScope && (!opts.isNamespaceScope || !opts.isTypeScriptDeclare) {
+				p.lexer.Unexpected()
+				return js_ast.Stmt{}
+			}
+
+			items, isSingleLine := p.parseImportClause()
+			stmt.Items = &items
+			stmt.IsSingleLine = isSingleLine
+			p.lexer.ExpectContextualKeyword("from")
+
+		case js_lexer.TIdentifier:
+			// "import defaultItem from 'path'"
+			// "import foo = bar"
+			if !opts.isModuleScope && !opts.isNamespaceScope {
+				p.lexer.Unexpected()
+				return js_ast.Stmt{}
+			}
+
+			defaultName := p.lexer.Identifier
+			stmt.DefaultName = &ast.LocRef{Loc: p.lexer.Loc(), Ref: p.storeNameInRef(defaultName)}
+			p.lexer.Next()
+
+			if p.options.ts.Parse {
+				// Skip over type-only imports
+				if defaultName.String == "type" {
+					switch p.lexer.Token {
+					case js_lexer.TIdentifier:
+						nameSubstring := p.lexer.Identifier
+						nameLoc := p.lexer.Loc()
+						p.lexer.Next()
+						if p.lexer.Token == js_lexer.TEquals {
+							// "import type foo = require('bar');"
+							// "import type foo = bar.baz;"
+							opts.isTypeScriptDeclare = true
+							return p.parseTypeScriptImportEqualsStmt(loc, opts, nameLoc, nameSubstring.String)
+						} else if p.lexer.Token == js_lexer.TStringLiteral && nameSubstring.String == "from" {
+							// "import type from 'bar';"
+							break syntaxBeforePath
+						} else {
+							// "import type foo from 'bar';"
+							p.lexer.ExpectContextualKeyword("from")
+							p.parsePath()
+							p.lexer.ExpectOrInsertSemicolon()
+							return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+						}
+
+					case js_lexer.TAsterisk:
+						// "import type * as foo from 'bar';"
+						p.lexer.Next()
+						p.lexer.ExpectContextualKeyword("as")
+						p.lexer.Expect(js_lexer.TIdentifier)
+						p.lexer.ExpectContextualKeyword("from")
+						p.parsePath()
+						p.lexer.ExpectOrInsertSemicolon()
+						return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+
+					case js_lexer.TOpenBrace:
+						// "import type {foo} from 'bar';"
+						p.parseImportClause()
+						p.lexer.ExpectContextualKeyword("from")
+						p.parsePath()
+						p.lexer.ExpectOrInsertSemicolon()
+						return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+					}
+				}
+
+				// Parse TypeScript import assignment statements
+				if p.lexer.Token == js_lexer.TEquals || opts.isExport || (opts.isNamespaceScope && !opts.isTypeScriptDeclare) {
+					p.esmImportStatementKeyword = previousImportStatementKeyword // This wasn't an ESM import statement after all
+					return p.parseTypeScriptImportEqualsStmt(loc, opts, stmt.DefaultName.Loc, defaultName.String)
+				}
+			}
+
+			if p.lexer.Token == js_lexer.TComma {
+				p.lexer.Next()
+				switch p.lexer.Token {
+				case js_lexer.TAsterisk:
+					// "import defaultItem, * as ns from 'path'"
+					p.lexer.Next()
+					p.lexer.ExpectContextualKeyword("as")
+					stmt.NamespaceRef = p.storeNameInRef(p.lexer.Identifier)
+					starLoc := p.lexer.Loc()
+					stmt.StarNameLoc = &starLoc
+					p.lexer.Expect(js_lexer.TIdentifier)
+
+				case js_lexer.TOpenBrace:
+					// "import defaultItem, {item1, item2} from 'path'"
+					items, isSingleLine := p.parseImportClause()
+					stmt.Items = &items
+					stmt.IsSingleLine = isSingleLine
+
+				default:
+					p.lexer.Unexpected()
+				}
+			}
+
+			p.lexer.ExpectContextualKeyword("from")
+
+		default:
+			p.lexer.Unexpected()
+			return js_ast.Stmt{}
+		}
+
+		pathLoc, pathText, assertOrWith, flags := p.parsePath()
+		p.lexer.ExpectOrInsertSemicolon()
+
+		// If TypeScript's "preserveValueImports": true setting is active, TypeScript's
+		// "importsNotUsedAsValues": "preserve" setting is NOT active, and the import
+		// clause is present and empty (or is non-empty but filled with type-only
+		// items), then the import statement should still be removed entirely to match
+		// the behavior of the TypeScript compiler:
+		//
+		//   // Keep these
+		//   import 'x'
+		//   import { y } from 'x'
+		//   import { y, type z } from 'x'
+		//
+		//   // Remove these
+		//   import {} from 'x'
+		//   import { type y } from 'x'
+		//
+		//   // Remove the items from these
+		//   import d, {} from 'x'
+		//   import d, { type y } from 'x'
+		//
+		if p.options.ts.Parse && p.options.ts.Config.UnusedImportFlags() == config.TSUnusedImport_KeepValues && stmt.Items != nil && len(*stmt.Items) == 0 {
+			if stmt.DefaultName == nil {
+				return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+			}
+			stmt.Items = nil
+		}
+
+		if wasOriginallyBareImport {
+			flags |= ast.WasOriginallyBareImport
+		}
+		stmt.ImportRecordIndex = p.addImportRecord(ast.ImportStmt, pathLoc, pathText, assertOrWith, flags)
+
+		if stmt.StarNameLoc != nil {
+			name := p.loadNameFromRef(stmt.NamespaceRef)
+			stmt.NamespaceRef = p.declareSymbol(ast.SymbolImport, *stmt.StarNameLoc, name)
+		} else {
+			// Generate a symbol for the namespace
+			name := "import_" + js_ast.GenerateNonUniqueNameFromPath(pathText)
+			stmt.NamespaceRef = p.newSymbol(ast.SymbolOther, name)
+			p.currentScope.Generated = append(p.currentScope.Generated, stmt.NamespaceRef)
+		}
+		itemRefs := make(map[string]ast.LocRef)
+
+		// Link the default item to the namespace
+		if stmt.DefaultName != nil {
+			name := p.loadNameFromRef(stmt.DefaultName.Ref)
+			ref := p.declareSymbol(ast.SymbolImport, stmt.DefaultName.Loc, name)
+			p.isImportItem[ref] = true
+			stmt.DefaultName.Ref = ref
+		}
+
+		// Link each import item to the namespace
+		if stmt.Items != nil {
+			for i, item := range *stmt.Items {
+				name := p.loadNameFromRef(item.Name.Ref)
+				ref := p.declareSymbol(ast.SymbolImport, item.Name.Loc, name)
+				p.checkForUnrepresentableIdentifier(item.AliasLoc, item.Alias)
+				p.isImportItem[ref] = true
+				(*stmt.Items)[i].Name.Ref = ref
+				itemRefs[item.Alias] = ast.LocRef{Loc: item.Name.Loc, Ref: ref}
+			}
+		}
+
+		// Track the items for this namespace
+		p.importItemsForNamespace[stmt.NamespaceRef] = namespaceImportItems{
+			entries:           itemRefs,
+			importRecordIndex: stmt.ImportRecordIndex,
+		}
+
+		// Import statements anywhere in the file disable top-level const
+		// local prefix because import cycles can be used to trigger TDZ
+		p.currentScope.IsAfterConstLocalPrefix = true
+		return js_ast.Stmt{Loc: loc, Data: &stmt}
+
+	case js_lexer.TBreak:
+		p.lexer.Next()
+		name := p.parseLabelName()
+		p.lexer.ExpectOrInsertSemicolon()
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SBreak{Label: name}}
+
+	case js_lexer.TContinue:
+		p.lexer.Next()
+		name := p.parseLabelName()
+		p.lexer.ExpectOrInsertSemicolon()
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SContinue{Label: name}}
+
+	case js_lexer.TReturn:
+		if p.fnOrArrowDataParse.isReturnDisallowed {
+			p.log.AddError(&p.tracker, p.lexer.Range(), "A return statement cannot be used here:")
+		}
+		p.lexer.Next()
+		var value js_ast.Expr
+		if p.lexer.Token != js_lexer.TSemicolon &&
+			!p.lexer.HasNewlineBefore &&
+			p.lexer.Token != js_lexer.TCloseBrace &&
+			p.lexer.Token != js_lexer.TEndOfFile {
+			value = p.parseExpr(js_ast.LLowest)
+		}
+		p.latestReturnHadSemicolon = p.lexer.Token == js_lexer.TSemicolon
+		p.lexer.ExpectOrInsertSemicolon()
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SReturn{ValueOrNil: value}}
+
+	case js_lexer.TThrow:
+		p.lexer.Next()
+		if p.lexer.HasNewlineBefore {
+			endLoc := logger.Loc{Start: loc.Start + 5}
+			p.log.AddError(&p.tracker, logger.Range{Loc: endLoc},
+				"Unexpected newline after \"throw\"")
+			return js_ast.Stmt{Loc: loc, Data: &js_ast.SThrow{Value: js_ast.Expr{Loc: endLoc, Data: js_ast.ENullShared}}}
+		}
+		expr := p.parseExpr(js_ast.LLowest)
+		p.lexer.ExpectOrInsertSemicolon()
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SThrow{Value: expr}}
+
+	case js_lexer.TDebugger:
+		p.lexer.Next()
+		p.lexer.ExpectOrInsertSemicolon()
+		return js_ast.Stmt{Loc: loc, Data: js_ast.SDebuggerShared}
+
+	case js_lexer.TOpenBrace:
+		p.pushScopeForParsePass(js_ast.ScopeBlock, loc)
+		defer p.popScope()
+
+		p.lexer.Next()
+		stmts := p.parseStmtsUpTo(js_lexer.TCloseBrace, parseStmtOpts{})
+		closeBraceLoc := p.lexer.Loc()
+		p.lexer.Next()
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SBlock{Stmts: stmts, CloseBraceLoc: closeBraceLoc}}
+
+	default:
+		isIdentifier := p.lexer.Token == js_lexer.TIdentifier
+		nameRange := p.lexer.Range()
+		name := p.lexer.Identifier.String
+
+		// Parse either an async function, an async expression, or a normal expression
+		var expr js_ast.Expr
+		if isIdentifier && p.lexer.Raw() == "async" {
+			p.lexer.Next()
+			if p.lexer.Token == js_lexer.TFunction && !p.lexer.HasNewlineBefore {
+				p.lexer.Next()
+				return p.parseFnStmt(nameRange.Loc, opts, true /* isAsync */, nameRange)
+			}
+			expr = p.parseSuffix(p.parseAsyncPrefixExpr(nameRange, js_ast.LLowest, 0), js_ast.LLowest, nil, 0)
+		} else {
+			var stmt js_ast.Stmt
+			expr, stmt, _ = p.parseExprOrLetOrUsingStmt(opts)
+			if stmt.Data != nil {
+				p.lexer.ExpectOrInsertSemicolon()
+				return stmt
+			}
+		}
+
+		if isIdentifier {
+			if ident, ok := expr.Data.(*js_ast.EIdentifier); ok {
+				if p.lexer.Token == js_lexer.TColon && opts.deferredDecorators == nil {
+					p.pushScopeForParsePass(js_ast.ScopeLabel, loc)
+					defer p.popScope()
+
+					// Parse a labeled statement
+					p.lexer.Next()
+					name := ast.LocRef{Loc: expr.Loc, Ref: ident.Ref}
+					nestedOpts := parseStmtOpts{}
+					if opts.lexicalDecl == lexicalDeclAllowAll || opts.lexicalDecl == lexicalDeclAllowFnInsideLabel {
+						nestedOpts.lexicalDecl = lexicalDeclAllowFnInsideLabel
+					}
+					isSingleLineStmt := !p.lexer.HasNewlineBefore && p.lexer.Token != js_lexer.TOpenBrace
+					stmt := p.parseStmt(nestedOpts)
+					return js_ast.Stmt{Loc: loc, Data: &js_ast.SLabel{Name: name, Stmt: stmt, IsSingleLineStmt: isSingleLineStmt}}
+				}
+
+				if p.options.ts.Parse {
+					switch name {
+					case "type":
+						if !p.lexer.HasNewlineBefore && p.lexer.Token == js_lexer.TIdentifier {
+							// "type Foo = any"
+							p.skipTypeScriptTypeStmt(parseStmtOpts{isModuleScope: opts.isModuleScope})
+							return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+						}
+
+					case "namespace", "module":
+						// "namespace Foo {}"
+						// "module Foo {}"
+						// "declare module 'fs' {}"
+						// "declare module 'fs';"
+						if !p.lexer.HasNewlineBefore && (opts.isModuleScope || opts.isNamespaceScope) && (p.lexer.Token == js_lexer.TIdentifier ||
+							(p.lexer.Token == js_lexer.TStringLiteral && opts.isTypeScriptDeclare)) {
+							return p.parseTypeScriptNamespaceStmt(loc, opts)
+						}
+
+					case "interface":
+						// "interface Foo {}"
+						// "export default interface Foo {}"
+						// "export default interface \n Foo {}"
+						if !p.lexer.HasNewlineBefore || opts.isExportDefault {
+							p.skipTypeScriptInterfaceStmt(parseStmtOpts{isModuleScope: opts.isModuleScope})
+							return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+						}
+
+						// "interface \n Foo {}"
+						// "export interface \n Foo {}"
+						if opts.isExport {
+							p.log.AddError(&p.tracker, nameRange, "Unexpected \"interface\"")
+							panic(js_lexer.LexerPanic{})
+						}
+
+					case "abstract":
+						if !p.lexer.HasNewlineBefore && p.lexer.Token == js_lexer.TClass {
+							return p.parseClassStmt(loc, opts)
+						}
+
+					case "global":
+						// "declare module 'fs' { global { namespace NodeJS {} } }"
+						if opts.isNamespaceScope && opts.isTypeScriptDeclare && p.lexer.Token == js_lexer.TOpenBrace {
+							p.lexer.Next()
+							p.parseStmtsUpTo(js_lexer.TCloseBrace, opts)
+							p.lexer.Next()
+							return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+						}
+
+					case "declare":
+						if !p.lexer.HasNewlineBefore {
+							opts.lexicalDecl = lexicalDeclAllowAll
+							opts.isTypeScriptDeclare = true
+
+							// "declare global { ... }"
+							if p.lexer.IsContextualKeyword("global") {
+								p.lexer.Next()
+								p.lexer.Expect(js_lexer.TOpenBrace)
+								p.parseStmtsUpTo(js_lexer.TCloseBrace, opts)
+								p.lexer.Next()
+								return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+							}
+
+							// "declare const x: any"
+							scopeIndex := len(p.scopesInOrder)
+							oldLexer := p.lexer
+							stmt := p.parseStmt(opts)
+							typeDeclarationData := js_ast.STypeScriptShared
+							switch s := stmt.Data.(type) {
+							case *js_ast.SEmpty:
+								return js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: expr}}
+
+							case *js_ast.STypeScript:
+								// Type declarations are expected. Propagate the "declare class"
+								// status in case our caller is a decorator that needs to know
+								// this was a "declare class" statement.
+								typeDeclarationData = s
+
+							case *js_ast.SLocal:
+								// This is also a type declaration (but doesn't use "STypeScript"
+								// because we need to be able to handle namespace exports below)
+
+							default:
+								// Anything that we don't expect is a syntax error. For example,
+								// we consider this a syntax error:
+								//
+								//   declare let declare: any, foo: any
+								//   declare foo
+								//
+								// Strangely TypeScript allows this code starting with version
+								// 4.4, but I assume this is a bug. This bug was reported here:
+								// https://github.com/microsoft/TypeScript/issues/54602
+								p.lexer = oldLexer
+								p.lexer.Unexpected()
+							}
+							p.discardScopesUpTo(scopeIndex)
+
+							// Unlike almost all uses of "declare", statements that use
+							// "export declare" with "var/let/const" inside a namespace affect
+							// code generation. They cause any declared bindings to be
+							// considered exports of the namespace. Identifier references to
+							// those names must be converted into property accesses off the
+							// namespace object:
+							//
+							//   namespace ns {
+							//     export declare const x
+							//     export function y() { return x }
+							//   }
+							//
+							//   (ns as any).x = 1
+							//   console.log(ns.y())
+							//
+							// In this example, "return x" must be replaced with "return ns.x".
+							// This is handled by replacing each "export declare" statement
+							// inside a namespace with an "export var" statement containing all
+							// of the declared bindings. That "export var" statement will later
+							// cause identifiers to be transformed into property accesses.
+							if opts.isNamespaceScope && opts.isExport {
+								var decls []js_ast.Decl
+								if s, ok := stmt.Data.(*js_ast.SLocal); ok {
+									js_ast.ForEachIdentifierBindingInDecls(s.Decls, func(loc logger.Loc, b *js_ast.BIdentifier) {
+										decls = append(decls, js_ast.Decl{Binding: js_ast.Binding{Loc: loc, Data: b}})
+									})
+								}
+								if len(decls) > 0 {
+									return js_ast.Stmt{Loc: loc, Data: &js_ast.SLocal{
+										Kind:     js_ast.LocalVar,
+										IsExport: true,
+										Decls:    decls,
+									}}
+								}
+							}
+
+							return js_ast.Stmt{Loc: loc, Data: typeDeclarationData}
+						}
+					}
+				}
+			}
+		}
+
+		p.lexer.ExpectOrInsertSemicolon()
+		return js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: expr}}
+	}
+}
+
+func (p *parser) addImportRecord(kind ast.ImportKind, pathRange logger.Range, text string, assertOrWith *ast.ImportAssertOrWith, flags ast.ImportRecordFlags) uint32 {
+	index := uint32(len(p.importRecords))
+	p.importRecords = append(p.importRecords, ast.ImportRecord{
+		Kind:         kind,
+		Range:        pathRange,
+		Path:         logger.Path{Text: text},
+		AssertOrWith: assertOrWith,
+		Flags:        flags,
+	})
+	return index
+}
+
+func (p *parser) parseFnBody(data fnOrArrowDataParse) js_ast.FnBody {
+	oldFnOrArrowData := p.fnOrArrowDataParse
+	oldAllowIn := p.allowIn
+	p.fnOrArrowDataParse = data
+	p.allowIn = true
+
+	loc := p.lexer.Loc()
+	p.pushScopeForParsePass(js_ast.ScopeFunctionBody, loc)
+	defer p.popScope()
+
+	p.lexer.Expect(js_lexer.TOpenBrace)
+	stmts := p.parseStmtsUpTo(js_lexer.TCloseBrace, parseStmtOpts{
+		allowDirectivePrologue: true,
+	})
+	closeBraceLoc := p.lexer.Loc()
+	p.lexer.Next()
+
+	p.allowIn = oldAllowIn
+	p.fnOrArrowDataParse = oldFnOrArrowData
+	return js_ast.FnBody{Loc: loc, Block: js_ast.SBlock{Stmts: stmts, CloseBraceLoc: closeBraceLoc}}
+}
+
+func (p *parser) forbidLexicalDecl(loc logger.Loc) {
+	r := js_lexer.RangeOfIdentifier(p.source, loc)
+	p.log.AddError(&p.tracker, r, "Cannot use a declaration in a single-statement context")
+}
+
+func (p *parser) parseStmtsUpTo(end js_lexer.T, opts parseStmtOpts) []js_ast.Stmt {
+	stmts := []js_ast.Stmt{}
+	returnWithoutSemicolonStart := int32(-1)
+	opts.lexicalDecl = lexicalDeclAllowAll
+	isDirectivePrologue := opts.allowDirectivePrologue
+
+	for {
+		// Preserve some statement-level comments
+		comments := p.lexer.LegalCommentsBeforeToken
+		if len(comments) > 0 {
+			for _, comment := range comments {
+				stmts = append(stmts, js_ast.Stmt{
+					Loc: comment.Loc,
+					Data: &js_ast.SComment{
+						Text:           p.source.CommentTextWithoutIndent(comment),
+						IsLegalComment: true,
+					},
+				})
+			}
+		}
+
+		if p.lexer.Token == end {
+			break
+		}
+
+		stmt := p.parseStmt(opts)
+
+		// Skip TypeScript types entirely
+		if p.options.ts.Parse {
+			if _, ok := stmt.Data.(*js_ast.STypeScript); ok {
+				continue
+			}
+		}
+
+		// Parse one or more directives at the beginning
+		if isDirectivePrologue {
+			isDirectivePrologue = false
+			if expr, ok := stmt.Data.(*js_ast.SExpr); ok {
+				if str, ok := expr.Value.Data.(*js_ast.EString); ok && !str.PreferTemplate {
+					stmt.Data = &js_ast.SDirective{Value: str.Value, LegacyOctalLoc: str.LegacyOctalLoc}
+					isDirectivePrologue = true
+
+					if helpers.UTF16EqualsString(str.Value, "use strict") {
+						// Track "use strict" directives
+						p.currentScope.StrictMode = js_ast.ExplicitStrictMode
+						p.currentScope.UseStrictLoc = expr.Value.Loc
+
+						// Inside a function, strict mode actually propagates from the child
+						// scope to the parent scope:
+						//
+						//   // This is a syntax error
+						//   function fn(arguments) {
+						//     "use strict";
+						//   }
+						//
+						if p.currentScope.Kind == js_ast.ScopeFunctionBody &&
+							p.currentScope.Parent.Kind == js_ast.ScopeFunctionArgs &&
+							p.currentScope.Parent.StrictMode == js_ast.SloppyMode {
+							p.currentScope.Parent.StrictMode = js_ast.ExplicitStrictMode
+							p.currentScope.Parent.UseStrictLoc = expr.Value.Loc
+						}
+					} else if helpers.UTF16EqualsString(str.Value, "use asm") {
+						// Deliberately remove "use asm" directives. The asm.js subset of
+						// JavaScript has complicated validation rules that are triggered
+						// by this directive. This parser is not designed with asm.js in
+						// mind and round-tripping asm.js code through esbuild will very
+						// likely cause it to no longer validate as asm.js. When this
+						// happens, V8 prints a warning and people don't like seeing the
+						// warning.
+						//
+						// We deliberately do not attempt to preserve the validity of
+						// asm.js code because it's a complicated legacy format and it's
+						// obsolete now that WebAssembly exists. By removing this directive
+						// it will just become normal JavaScript, which will work fine and
+						// won't generate a warning (but will run slower). We don't generate
+						// a warning ourselves in this case because there isn't necessarily
+						// anything easy and actionable that the user can do to fix this.
+						stmt.Data = &js_ast.SEmpty{}
+					}
+				}
+			}
+		}
+
+		stmts = append(stmts, stmt)
+
+		// Warn about ASI and return statements. Here's an example of code with
+		// this problem: https://github.com/rollup/rollup/issues/3729
+		if !p.suppressWarningsAboutWeirdCode {
+			if s, ok := stmt.Data.(*js_ast.SReturn); ok && s.ValueOrNil.Data == nil && !p.latestReturnHadSemicolon {
+				returnWithoutSemicolonStart = stmt.Loc.Start
+			} else {
+				if returnWithoutSemicolonStart != -1 {
+					if _, ok := stmt.Data.(*js_ast.SExpr); ok {
+						p.log.AddID(logger.MsgID_JS_SemicolonAfterReturn, logger.Warning, &p.tracker, logger.Range{Loc: logger.Loc{Start: returnWithoutSemicolonStart + 6}},
+							"The following expression is not returned because of an automatically-inserted semicolon")
+					}
+				}
+				returnWithoutSemicolonStart = -1
+			}
+		}
+	}
+
+	return stmts
+}
+
+type generateTempRefArg uint8
+
+const (
+	tempRefNeedsDeclare generateTempRefArg = iota
+	tempRefNoDeclare
+
+	// This is used when the generated temporary may a) be used inside of a loop
+	// body and b) may be used inside of a closure. In that case we can't use
+	// "var" for the temporary and we can't declare the temporary at the top of
+	// the enclosing function. Instead, we need to use "let" and we need to
+	// declare the temporary in the enclosing block (so it's inside of the loop
+	// body).
+	tempRefNeedsDeclareMayBeCapturedInsideLoop
+)
+
+func (p *parser) generateTempRef(declare generateTempRefArg, optionalName string) ast.Ref {
+	scope := p.currentScope
+
+	if declare != tempRefNeedsDeclareMayBeCapturedInsideLoop {
+		for !scope.Kind.StopsHoisting() {
+			scope = scope.Parent
+		}
+	}
+
+	if optionalName == "" {
+		optionalName = "_" + ast.DefaultNameMinifierJS.NumberToMinifiedName(p.tempRefCount)
+		p.tempRefCount++
+	}
+	ref := p.newSymbol(ast.SymbolOther, optionalName)
+
+	if declare == tempRefNeedsDeclareMayBeCapturedInsideLoop && !scope.Kind.StopsHoisting() {
+		p.tempLetsToDeclare = append(p.tempLetsToDeclare, ref)
+	} else if declare != tempRefNoDeclare {
+		p.tempRefsToDeclare = append(p.tempRefsToDeclare, tempRef{ref: ref})
+	}
+
+	scope.Generated = append(scope.Generated, ref)
+	return ref
+}
+
+func (p *parser) generateTopLevelTempRef() ast.Ref {
+	ref := p.newSymbol(ast.SymbolOther, "_"+ast.DefaultNameMinifierJS.NumberToMinifiedName(p.topLevelTempRefCount))
+	p.topLevelTempRefsToDeclare = append(p.topLevelTempRefsToDeclare, tempRef{ref: ref})
+	p.moduleScope.Generated = append(p.moduleScope.Generated, ref)
+	p.topLevelTempRefCount++
+	return ref
+}
+
+func (p *parser) pushScopeForVisitPass(kind js_ast.ScopeKind, loc logger.Loc) {
+	order := p.scopesInOrder[0]
+
+	// Sanity-check that the scopes generated by the first and second passes match
+	if order.loc != loc || order.scope.Kind != kind {
+		panic(fmt.Sprintf("Expected scope (%d, %d) in %s, found scope (%d, %d)",
+			kind, loc.Start,
+			p.source.PrettyPath,
+			order.scope.Kind, order.loc.Start))
+	}
+
+	p.scopesInOrder = p.scopesInOrder[1:]
+	p.currentScope = order.scope
+	p.scopesForCurrentPart = append(p.scopesForCurrentPart, order.scope)
+}
+
+type findSymbolResult struct {
+	ref               ast.Ref
+	declareLoc        logger.Loc
+	isInsideWithScope bool
+}
+
+func (p *parser) findSymbol(loc logger.Loc, name string) findSymbolResult {
+	var ref ast.Ref
+	var declareLoc logger.Loc
+	isInsideWithScope := false
+	didForbidArguments := false
+	s := p.currentScope
+
+	for {
+		// Track if we're inside a "with" statement body
+		if s.Kind == js_ast.ScopeWith {
+			isInsideWithScope = true
+		}
+
+		// Forbid referencing "arguments" inside class bodies
+		if s.ForbidArguments && name == "arguments" && !didForbidArguments {
+			r := js_lexer.RangeOfIdentifier(p.source, loc)
+			p.log.AddError(&p.tracker, r, fmt.Sprintf("Cannot access %q here:", name))
+			didForbidArguments = true
+		}
+
+		// Is the symbol a member of this scope?
+		if member, ok := s.Members[name]; ok {
+			ref = member.Ref
+			declareLoc = member.Loc
+			break
+		}
+
+		// Is the symbol a member of this scope's TypeScript namespace?
+		if tsNamespace := s.TSNamespace; tsNamespace != nil {
+			if member, ok := tsNamespace.ExportedMembers[name]; ok && tsNamespace.IsEnumScope == member.IsEnumValue {
+				// If this is an identifier from a sibling TypeScript namespace, then we're
+				// going to have to generate a property access instead of a simple reference.
+				// Lazily-generate an identifier that represents this property access.
+				cache := tsNamespace.LazilyGeneratedProperyAccesses
+				if cache == nil {
+					cache = make(map[string]ast.Ref)
+					tsNamespace.LazilyGeneratedProperyAccesses = cache
+				}
+				ref, ok = cache[name]
+				if !ok {
+					ref = p.newSymbol(ast.SymbolOther, name)
+					p.symbols[ref.InnerIndex].NamespaceAlias = &ast.NamespaceAlias{
+						NamespaceRef: tsNamespace.ArgRef,
+						Alias:        name,
+					}
+					cache[name] = ref
+				}
+				declareLoc = member.Loc
+				break
+			}
+		}
+
+		s = s.Parent
+		if s == nil {
+			// Allocate an "unbound" symbol
+			p.checkForUnrepresentableIdentifier(loc, name)
+			ref = p.newSymbol(ast.SymbolUnbound, name)
+			declareLoc = loc
+			p.moduleScope.Members[name] = js_ast.ScopeMember{Ref: ref, Loc: logger.Loc{Start: -1}}
+			break
+		}
+	}
+
+	// If we had to pass through a "with" statement body to get to the symbol
+	// declaration, then this reference could potentially also refer to a
+	// property on the target object of the "with" statement. We must not rename
+	// it or we risk changing the behavior of the code.
+	if isInsideWithScope {
+		p.symbols[ref.InnerIndex].Flags |= ast.MustNotBeRenamed
+	}
+
+	// Track how many times we've referenced this symbol
+	p.recordUsage(ref)
+	return findSymbolResult{ref, declareLoc, isInsideWithScope}
+}
+
+func (p *parser) findLabelSymbol(loc logger.Loc, name string) (ref ast.Ref, isLoop bool, ok bool) {
+	for s := p.currentScope; s != nil && !s.Kind.StopsHoisting(); s = s.Parent {
+		if s.Kind == js_ast.ScopeLabel && name == p.symbols[s.Label.Ref.InnerIndex].OriginalName {
+			// Track how many times we've referenced this symbol
+			p.recordUsage(s.Label.Ref)
+			ref = s.Label.Ref
+			isLoop = s.LabelStmtIsLoop
+			ok = true
+			return
+		}
+	}
+
+	r := js_lexer.RangeOfIdentifier(p.source, loc)
+	p.log.AddError(&p.tracker, r, fmt.Sprintf("There is no containing label named %q", name))
+
+	// Allocate an "unbound" symbol
+	ref = p.newSymbol(ast.SymbolUnbound, name)
+
+	// Track how many times we've referenced this symbol
+	p.recordUsage(ref)
+	return
+}
+
+func findIdentifiers(binding js_ast.Binding, identifiers []js_ast.Decl) []js_ast.Decl {
+	switch b := binding.Data.(type) {
+	case *js_ast.BIdentifier:
+		identifiers = append(identifiers, js_ast.Decl{Binding: binding})
+
+	case *js_ast.BArray:
+		for _, item := range b.Items {
+			identifiers = findIdentifiers(item.Binding, identifiers)
+		}
+
+	case *js_ast.BObject:
+		for _, property := range b.Properties {
+			identifiers = findIdentifiers(property.Value, identifiers)
+		}
+	}
+
+	return identifiers
+}
+
+// If this is in a dead branch, then we want to trim as much dead code as we
+// can. Everything can be trimmed except for hoisted declarations ("var" and
+// "function"), which affect the parent scope. For example:
+//
+//	function foo() {
+//	  if (false) { var x; }
+//	  x = 1;
+//	}
+//
+// We can't trim the entire branch as dead or calling foo() will incorrectly
+// assign to a global variable instead.
+func shouldKeepStmtInDeadControlFlow(stmt js_ast.Stmt) bool {
+	switch s := stmt.Data.(type) {
+	case *js_ast.SEmpty, *js_ast.SExpr, *js_ast.SThrow, *js_ast.SReturn,
+		*js_ast.SBreak, *js_ast.SContinue, *js_ast.SClass, *js_ast.SDebugger:
+		// Omit these statements entirely
+		return false
+
+	case *js_ast.SLocal:
+		if s.Kind != js_ast.LocalVar {
+			// Omit these statements entirely
+			return false
+		}
+
+		// Omit everything except the identifiers
+		identifiers := []js_ast.Decl{}
+		for _, decl := range s.Decls {
+			identifiers = findIdentifiers(decl.Binding, identifiers)
+		}
+		if len(identifiers) == 0 {
+			return false
+		}
+		s.Decls = identifiers
+		return true
+
+	case *js_ast.SBlock:
+		for _, child := range s.Stmts {
+			if shouldKeepStmtInDeadControlFlow(child) {
+				return true
+			}
+		}
+		return false
+
+	case *js_ast.SIf:
+		return shouldKeepStmtInDeadControlFlow(s.Yes) || (s.NoOrNil.Data != nil && shouldKeepStmtInDeadControlFlow(s.NoOrNil))
+
+	case *js_ast.SWhile:
+		return shouldKeepStmtInDeadControlFlow(s.Body)
+
+	case *js_ast.SDoWhile:
+		return shouldKeepStmtInDeadControlFlow(s.Body)
+
+	case *js_ast.SFor:
+		return (s.InitOrNil.Data != nil && shouldKeepStmtInDeadControlFlow(s.InitOrNil)) || shouldKeepStmtInDeadControlFlow(s.Body)
+
+	case *js_ast.SForIn:
+		return shouldKeepStmtInDeadControlFlow(s.Init) || shouldKeepStmtInDeadControlFlow(s.Body)
+
+	case *js_ast.SForOf:
+		return shouldKeepStmtInDeadControlFlow(s.Init) || shouldKeepStmtInDeadControlFlow(s.Body)
+
+	case *js_ast.SLabel:
+		return shouldKeepStmtInDeadControlFlow(s.Stmt)
+
+	default:
+		// Everything else must be kept
+		return true
+	}
+}
+
+type prependTempRefsOpts struct {
+	fnBodyLoc *logger.Loc
+	kind      stmtsKind
+}
+
+func (p *parser) visitStmtsAndPrependTempRefs(stmts []js_ast.Stmt, opts prependTempRefsOpts) []js_ast.Stmt {
+	oldTempRefs := p.tempRefsToDeclare
+	oldTempRefCount := p.tempRefCount
+	p.tempRefsToDeclare = nil
+	p.tempRefCount = 0
+
+	stmts = p.visitStmts(stmts, opts.kind)
+
+	// Prepend values for "this" and "arguments"
+	if opts.fnBodyLoc != nil {
+		// Capture "this"
+		if ref := p.fnOnlyDataVisit.thisCaptureRef; ref != nil {
+			p.tempRefsToDeclare = append(p.tempRefsToDeclare, tempRef{
+				ref:        *ref,
+				valueOrNil: js_ast.Expr{Loc: *opts.fnBodyLoc, Data: js_ast.EThisShared},
+			})
+			p.currentScope.Generated = append(p.currentScope.Generated, *ref)
+		}
+
+		// Capture "arguments"
+		if ref := p.fnOnlyDataVisit.argumentsCaptureRef; ref != nil {
+			p.tempRefsToDeclare = append(p.tempRefsToDeclare, tempRef{
+				ref:        *ref,
+				valueOrNil: js_ast.Expr{Loc: *opts.fnBodyLoc, Data: &js_ast.EIdentifier{Ref: *p.fnOnlyDataVisit.argumentsRef}},
+			})
+			p.currentScope.Generated = append(p.currentScope.Generated, *ref)
+		}
+	}
+
+	// There may also be special top-level-only temporaries to declare
+	if p.currentScope == p.moduleScope && p.topLevelTempRefsToDeclare != nil {
+		p.tempRefsToDeclare = append(p.tempRefsToDeclare, p.topLevelTempRefsToDeclare...)
+		p.topLevelTempRefsToDeclare = nil
+	}
+
+	// Prepend the generated temporary variables to the beginning of the statement list
+	decls := []js_ast.Decl{}
+	for _, temp := range p.tempRefsToDeclare {
+		if p.symbols[temp.ref.InnerIndex].UseCountEstimate > 0 {
+			decls = append(decls, js_ast.Decl{Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: temp.ref}}, ValueOrNil: temp.valueOrNil})
+			p.recordDeclaredSymbol(temp.ref)
+		}
+	}
+	if len(decls) > 0 {
+		// Skip past leading directives and comments
+		split := 0
+		for split < len(stmts) {
+			switch stmts[split].Data.(type) {
+			case *js_ast.SComment, *js_ast.SDirective:
+				split++
+				continue
+			}
+			break
+		}
+		stmts = append(
+			append(
+				append(
+					[]js_ast.Stmt{},
+					stmts[:split]...),
+				js_ast.Stmt{Data: &js_ast.SLocal{Kind: js_ast.LocalVar, Decls: decls}}),
+			stmts[split:]...)
+	}
+
+	p.tempRefsToDeclare = oldTempRefs
+	p.tempRefCount = oldTempRefCount
+	return stmts
+}
+
+type stmtsKind uint8
+
+const (
+	stmtsNormal stmtsKind = iota
+	stmtsSwitch
+	stmtsLoopBody
+	stmtsFnBody
+)
+
+func (p *parser) visitStmts(stmts []js_ast.Stmt, kind stmtsKind) []js_ast.Stmt {
+	// Save the current control-flow liveness. This represents if we are
+	// currently inside an "if (false) { ... }" block.
+	oldIsControlFlowDead := p.isControlFlowDead
+
+	oldTempLetsToDeclare := p.tempLetsToDeclare
+	p.tempLetsToDeclare = nil
+
+	// Visit all statements first
+	visited := make([]js_ast.Stmt, 0, len(stmts))
+	var before []js_ast.Stmt
+	var after []js_ast.Stmt
+	var preprocessedEnums map[int][]js_ast.Stmt
+	if p.scopesInOrderForEnum != nil {
+		// Preprocess TypeScript enums to improve code generation. Otherwise
+		// uses of an enum before that enum has been declared won't be inlined:
+		//
+		//   console.log(Foo.FOO) // We want "FOO" to be inlined here
+		//   const enum Foo { FOO = 0 }
+		//
+		// The TypeScript compiler itself contains code with this pattern, so
+		// it's important to implement this optimization.
+		for i, stmt := range stmts {
+			if _, ok := stmt.Data.(*js_ast.SEnum); ok {
+				if preprocessedEnums == nil {
+					preprocessedEnums = make(map[int][]js_ast.Stmt)
+				}
+				oldScopesInOrder := p.scopesInOrder
+				p.scopesInOrder = p.scopesInOrderForEnum[stmt.Loc]
+				preprocessedEnums[i] = p.visitAndAppendStmt(nil, stmt)
+				p.scopesInOrder = oldScopesInOrder
+			}
+		}
+	}
+	for i, stmt := range stmts {
+		switch s := stmt.Data.(type) {
+		case *js_ast.SExportEquals:
+			// TypeScript "export = value;" becomes "module.exports = value;". This
+			// must happen at the end after everything is parsed because TypeScript
+			// moves this statement to the end when it generates code.
+			after = p.visitAndAppendStmt(after, stmt)
+			continue
+
+		case *js_ast.SFunction:
+			// Manually hoist block-level function declarations to preserve semantics.
+			// This is only done for function declarations that are not generators
+			// or async functions, since this is a backwards-compatibility hack from
+			// Annex B of the JavaScript standard.
+			if !p.currentScope.Kind.StopsHoisting() && p.symbols[int(s.Fn.Name.Ref.InnerIndex)].Kind == ast.SymbolHoistedFunction {
+				before = p.visitAndAppendStmt(before, stmt)
+				continue
+			}
+
+		case *js_ast.SEnum:
+			visited = append(visited, preprocessedEnums[i]...)
+			p.scopesInOrder = p.scopesInOrder[len(p.scopesInOrderForEnum[stmt.Loc]):]
+			continue
+		}
+		visited = p.visitAndAppendStmt(visited, stmt)
+	}
+
+	// This is used for temporary variables that could be captured in a closure,
+	// and therefore need to be generated inside the nearest enclosing block in
+	// case they are generated inside a loop.
+	if len(p.tempLetsToDeclare) > 0 {
+		decls := make([]js_ast.Decl, 0, len(p.tempLetsToDeclare))
+		for _, ref := range p.tempLetsToDeclare {
+			decls = append(decls, js_ast.Decl{Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: ref}}})
+		}
+		before = append(before, js_ast.Stmt{Data: &js_ast.SLocal{Kind: js_ast.LocalLet, Decls: decls}})
+	}
+	p.tempLetsToDeclare = oldTempLetsToDeclare
+
+	// Transform block-level function declarations into variable declarations
+	if len(before) > 0 {
+		var letDecls []js_ast.Decl
+		var varDecls []js_ast.Decl
+		var nonFnStmts []js_ast.Stmt
+		fnStmts := make(map[ast.Ref]int)
+		for _, stmt := range before {
+			s, ok := stmt.Data.(*js_ast.SFunction)
+			if !ok {
+				// We may get non-function statements here in certain scenarios such as when "KeepNames" is enabled
+				nonFnStmts = append(nonFnStmts, stmt)
+				continue
+			}
+
+			// This transformation of function declarations in nested scopes is
+			// intended to preserve the hoisting semantics of the original code. In
+			// JavaScript, function hoisting works differently in strict mode vs.
+			// sloppy mode code. We want the code we generate to use the semantics of
+			// the original environment, not the generated environment. However, if
+			// direct "eval" is present then it's not possible to preserve the
+			// semantics because we need two identifiers to do that and direct "eval"
+			// means neither identifier can be renamed to something else. So in that
+			// case we give up and do not preserve the semantics of the original code.
+			if p.currentScope.ContainsDirectEval {
+				if hoistedRef, ok := p.hoistedRefForSloppyModeBlockFn[s.Fn.Name.Ref]; ok {
+					// Merge the two identifiers back into a single one
+					p.symbols[hoistedRef.InnerIndex].Link = s.Fn.Name.Ref
+				}
+				nonFnStmts = append(nonFnStmts, stmt)
+				continue
+			}
+
+			index, ok := fnStmts[s.Fn.Name.Ref]
+			if !ok {
+				index = len(letDecls)
+				fnStmts[s.Fn.Name.Ref] = index
+				letDecls = append(letDecls, js_ast.Decl{Binding: js_ast.Binding{
+					Loc: s.Fn.Name.Loc, Data: &js_ast.BIdentifier{Ref: s.Fn.Name.Ref}}})
+
+				// Also write the function to the hoisted sibling symbol if applicable
+				if hoistedRef, ok := p.hoistedRefForSloppyModeBlockFn[s.Fn.Name.Ref]; ok {
+					p.recordDeclaredSymbol(hoistedRef)
+					p.recordUsage(s.Fn.Name.Ref)
+					varDecls = append(varDecls, js_ast.Decl{
+						Binding:    js_ast.Binding{Loc: s.Fn.Name.Loc, Data: &js_ast.BIdentifier{Ref: hoistedRef}},
+						ValueOrNil: js_ast.Expr{Loc: s.Fn.Name.Loc, Data: &js_ast.EIdentifier{Ref: s.Fn.Name.Ref}},
+					})
+				}
+			}
+
+			// The last function statement for a given symbol wins
+			s.Fn.Name = nil
+			letDecls[index].ValueOrNil = js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EFunction{Fn: s.Fn}}
+		}
+
+		// Reuse memory from "before"
+		before = before[:0]
+		kind := js_ast.LocalLet
+		if p.options.unsupportedJSFeatures.Has(compat.ConstAndLet) {
+			kind = js_ast.LocalVar
+		}
+		if len(letDecls) > 0 {
+			before = append(before, js_ast.Stmt{Loc: letDecls[0].ValueOrNil.Loc, Data: &js_ast.SLocal{Kind: kind, Decls: letDecls}})
+		}
+		if len(varDecls) > 0 {
+			// Potentially relocate "var" declarations to the top level
+			if assign, ok := p.maybeRelocateVarsToTopLevel(varDecls, relocateVarsNormal); ok {
+				if assign.Data != nil {
+					before = append(before, assign)
+				}
+			} else {
+				before = append(before, js_ast.Stmt{Loc: varDecls[0].ValueOrNil.Loc, Data: &js_ast.SLocal{Kind: js_ast.LocalVar, Decls: varDecls}})
+			}
+		}
+		before = append(before, nonFnStmts...)
+		visited = append(before, visited...)
+	}
+
+	// Move TypeScript "export =" statements to the end
+	visited = append(visited, after...)
+
+	// Restore the current control-flow liveness if it was changed inside the
+	// loop above. This is important because the caller will not restore it.
+	p.isControlFlowDead = oldIsControlFlowDead
+
+	// Lower using declarations
+	if kind != stmtsSwitch && p.shouldLowerUsingDeclarations(visited) {
+		ctx := p.lowerUsingDeclarationContext()
+		ctx.scanStmts(p, visited)
+		visited = ctx.finalize(p, visited, p.currentScope.Parent == nil)
+	}
+
+	// Stop now if we're not mangling
+	if !p.options.minifySyntax {
+		return visited
+	}
+
+	// If this is in a dead branch, trim as much dead code as we can
+	if p.isControlFlowDead {
+		end := 0
+		for _, stmt := range visited {
+			if !shouldKeepStmtInDeadControlFlow(stmt) {
+				continue
+			}
+
+			// Merge adjacent var statements
+			if s, ok := stmt.Data.(*js_ast.SLocal); ok && s.Kind == js_ast.LocalVar && end > 0 {
+				prevStmt := visited[end-1]
+				if prevS, ok := prevStmt.Data.(*js_ast.SLocal); ok && prevS.Kind == js_ast.LocalVar && s.IsExport == prevS.IsExport {
+					prevS.Decls = append(prevS.Decls, s.Decls...)
+					continue
+				}
+			}
+
+			visited[end] = stmt
+			end++
+		}
+		return visited[:end]
+	}
+
+	return p.mangleStmts(visited, kind)
+}
+
+func (p *parser) mangleStmts(stmts []js_ast.Stmt, kind stmtsKind) []js_ast.Stmt {
+	// Remove inlined constants now that we know whether any of these statements
+	// contained a direct eval() or not. This can't be done earlier when we
+	// encounter the constant because we haven't encountered the eval() yet.
+	// Inlined constants are not removed if they are in a top-level scope or
+	// if they are exported (which could be in a nested TypeScript namespace).
+	if p.currentScope.Parent != nil && !p.currentScope.ContainsDirectEval {
+		for i, stmt := range stmts {
+			switch s := stmt.Data.(type) {
+			case *js_ast.SEmpty, *js_ast.SComment, *js_ast.SDirective, *js_ast.SDebugger, *js_ast.STypeScript:
+				continue
+
+			case *js_ast.SLocal:
+				if !s.IsExport {
+					end := 0
+					for _, d := range s.Decls {
+						if id, ok := d.Binding.Data.(*js_ast.BIdentifier); ok {
+							if _, ok := p.constValues[id.Ref]; ok && p.symbols[id.Ref.InnerIndex].UseCountEstimate == 0 {
+								continue
+							}
+						}
+						s.Decls[end] = d
+						end++
+					}
+					if end == 0 {
+						stmts[i].Data = js_ast.SEmptyShared
+					} else {
+						s.Decls = s.Decls[:end]
+					}
+				}
+				continue
+			}
+			break
+		}
+	}
+
+	// Merge adjacent statements during mangling
+	result := make([]js_ast.Stmt, 0, len(stmts))
+	isControlFlowDead := false
+	for i, stmt := range stmts {
+		if isControlFlowDead && !shouldKeepStmtInDeadControlFlow(stmt) {
+			// Strip unnecessary statements if the control flow is dead here
+			continue
+		}
+
+		// Inline single-use variable declarations where possible:
+		//
+		//   // Before
+		//   let x = fn();
+		//   return x.y();
+		//
+		//   // After
+		//   return fn().y();
+		//
+		// The declaration must not be exported. We can't just check for the
+		// "export" keyword because something might do "export {id};" later on.
+		// Instead we just ignore all top-level declarations for now. That means
+		// this optimization currently only applies in nested scopes.
+		//
+		// Ignore declarations if the scope is shadowed by a direct "eval" call.
+		// The eval'd code may indirectly reference this symbol and the actual
+		// use count may be greater than 1.
+		if p.currentScope != p.moduleScope && !p.currentScope.ContainsDirectEval {
+			// Keep inlining variables until a failure or until there are none left.
+			// That handles cases like this:
+			//
+			//   // Before
+			//   let x = fn();
+			//   let y = x.prop;
+			//   return y;
+			//
+			//   // After
+			//   return fn().prop;
+			//
+			for len(result) > 0 {
+				// Ignore "var" declarations since those have function-level scope and
+				// we may not have visited all of their uses yet by this point. We
+				// should have visited all the uses of "let" and "const" declarations
+				// by now since they are scoped to this block which we just finished
+				// visiting.
+				if prevS, ok := result[len(result)-1].Data.(*js_ast.SLocal); ok && prevS.Kind != js_ast.LocalVar {
+					last := prevS.Decls[len(prevS.Decls)-1]
+
+					// The binding must be an identifier that is only used once.
+					// Ignore destructuring bindings since that's not the simple case.
+					// Destructuring bindings could potentially execute side-effecting
+					// code which would invalidate reordering.
+					if id, ok := last.Binding.Data.(*js_ast.BIdentifier); ok {
+						// Don't do this if "__name" was called on this symbol. In that
+						// case there is actually more than one use even though it says
+						// there is only one. The "__name" use isn't counted so that
+						// tree shaking still works when names are kept.
+						if symbol := p.symbols[id.Ref.InnerIndex]; symbol.UseCountEstimate == 1 && !symbol.Flags.Has(ast.DidKeepName) {
+							replacement := last.ValueOrNil
+
+							// The variable must be initialized, since we will be substituting
+							// the value into the usage.
+							if replacement.Data == nil {
+								replacement = js_ast.Expr{Loc: last.Binding.Loc, Data: js_ast.EUndefinedShared}
+							}
+
+							// Try to substitute the identifier with the initializer. This will
+							// fail if something with side effects is in between the declaration
+							// and the usage.
+							if p.substituteSingleUseSymbolInStmt(stmt, id.Ref, replacement) {
+								// Remove the previous declaration, since the substitution was
+								// successful.
+								if len(prevS.Decls) == 1 {
+									result = result[:len(result)-1]
+								} else {
+									prevS.Decls = prevS.Decls[:len(prevS.Decls)-1]
+								}
+
+								// Loop back to try again
+								continue
+							}
+						}
+					}
+				}
+
+				// Substitution failed so stop trying
+				break
+			}
+		}
+
+		switch s := stmt.Data.(type) {
+		case *js_ast.SEmpty:
+			// Strip empty statements
+			continue
+
+		case *js_ast.SLocal:
+			// Merge adjacent local statements
+			if len(result) > 0 {
+				prevStmt := result[len(result)-1]
+				if prevS, ok := prevStmt.Data.(*js_ast.SLocal); ok && s.Kind == prevS.Kind && s.IsExport == prevS.IsExport {
+					prevS.Decls = append(prevS.Decls, s.Decls...)
+					continue
+				}
+			}
+
+		case *js_ast.SExpr:
+			// Merge adjacent expression statements
+			if len(result) > 0 {
+				prevStmt := result[len(result)-1]
+				if prevS, ok := prevStmt.Data.(*js_ast.SExpr); ok {
+					if !s.IsFromClassOrFnThatCanBeRemovedIfUnused {
+						prevS.IsFromClassOrFnThatCanBeRemovedIfUnused = false
+					}
+					prevS.Value = js_ast.JoinWithComma(prevS.Value, s.Value)
+					continue
+				}
+			}
+
+		case *js_ast.SSwitch:
+			// Absorb a previous expression statement
+			if len(result) > 0 {
+				prevStmt := result[len(result)-1]
+				if prevS, ok := prevStmt.Data.(*js_ast.SExpr); ok {
+					s.Test = js_ast.JoinWithComma(prevS.Value, s.Test)
+					result = result[:len(result)-1]
+				}
+			}
+
+		case *js_ast.SIf:
+			// Absorb a previous expression statement
+			if len(result) > 0 {
+				prevStmt := result[len(result)-1]
+				if prevS, ok := prevStmt.Data.(*js_ast.SExpr); ok {
+					s.Test = js_ast.JoinWithComma(prevS.Value, s.Test)
+					result = result[:len(result)-1]
+				}
+			}
+
+			if isJumpStatement(s.Yes.Data) {
+				optimizeImplicitJump := false
+
+				// Absorb a previous if statement
+				if len(result) > 0 {
+					prevStmt := result[len(result)-1]
+					if prevS, ok := prevStmt.Data.(*js_ast.SIf); ok && prevS.NoOrNil.Data == nil && jumpStmtsLookTheSame(prevS.Yes.Data, s.Yes.Data) {
+						// "if (a) break c; if (b) break c;" => "if (a || b) break c;"
+						// "if (a) continue c; if (b) continue c;" => "if (a || b) continue c;"
+						// "if (a) return c; if (b) return c;" => "if (a || b) return c;"
+						// "if (a) throw c; if (b) throw c;" => "if (a || b) throw c;"
+						s.Test = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpLogicalOr, prevS.Test, s.Test)
+						result = result[:len(result)-1]
+					}
+				}
+
+				// "while (x) { if (y) continue; z(); }" => "while (x) { if (!y) z(); }"
+				// "while (x) { if (y) continue; else z(); w(); }" => "while (x) { if (!y) { z(); w(); } }" => "for (; x;) !y && (z(), w());"
+				if kind == stmtsLoopBody {
+					if continueS, ok := s.Yes.Data.(*js_ast.SContinue); ok && continueS.Label == nil {
+						optimizeImplicitJump = true
+					}
+				}
+
+				// "let x = () => { if (y) return; z(); };" => "let x = () => { if (!y) z(); };"
+				// "let x = () => { if (y) return; else z(); w(); };" => "let x = () => { if (!y) { z(); w(); } };" => "let x = () => { !y && (z(), w()); };"
+				if kind == stmtsFnBody {
+					if returnS, ok := s.Yes.Data.(*js_ast.SReturn); ok && returnS.ValueOrNil.Data == nil {
+						optimizeImplicitJump = true
+					}
+				}
+
+				if optimizeImplicitJump {
+					var body []js_ast.Stmt
+					if s.NoOrNil.Data != nil {
+						body = append(body, s.NoOrNil)
+					}
+					body = append(body, stmts[i+1:]...)
+
+					// Don't do this transformation if the branch condition could
+					// potentially access symbols declared later on on this scope below.
+					// If so, inverting the branch condition and nesting statements after
+					// this in a block would break that access which is a behavior change.
+					//
+					//   // This transformation is incorrect
+					//   if (a()) return; function a() {}
+					//   if (!a()) { function a() {} }
+					//
+					//   // This transformation is incorrect
+					//   if (a(() => b)) return; let b;
+					//   if (a(() => b)) { let b; }
+					//
+					canMoveBranchConditionOutsideScope := true
+					for _, stmt := range body {
+						if statementCaresAboutScope(stmt) {
+							canMoveBranchConditionOutsideScope = false
+							break
+						}
+					}
+
+					if canMoveBranchConditionOutsideScope {
+						body = p.mangleStmts(body, kind)
+						bodyLoc := s.Yes.Loc
+						if len(body) > 0 {
+							bodyLoc = body[0].Loc
+						}
+						return p.mangleIf(result, stmt.Loc, &js_ast.SIf{
+							Test: p.astHelpers.SimplifyBooleanExpr(js_ast.Not(s.Test)),
+							Yes:  stmtsToSingleStmt(bodyLoc, body, logger.Loc{}),
+						})
+					}
+				}
+
+				if s.NoOrNil.Data != nil {
+					// "if (a) return b; else if (c) return d; else return e;" => "if (a) return b; if (c) return d; return e;"
+					for {
+						result = append(result, stmt)
+						stmt = s.NoOrNil
+						s.NoOrNil = js_ast.Stmt{}
+						var ok bool
+						s, ok = stmt.Data.(*js_ast.SIf)
+						if !ok || !isJumpStatement(s.Yes.Data) || s.NoOrNil.Data == nil {
+							break
+						}
+					}
+					result = appendIfOrLabelBodyPreservingScope(result, stmt)
+					if isJumpStatement(stmt.Data) {
+						isControlFlowDead = true
+					}
+					continue
+				}
+			}
+
+		case *js_ast.SReturn:
+			// Merge return statements with the previous expression statement
+			if len(result) > 0 && s.ValueOrNil.Data != nil {
+				prevStmt := result[len(result)-1]
+				if prevS, ok := prevStmt.Data.(*js_ast.SExpr); ok {
+					result[len(result)-1] = js_ast.Stmt{Loc: prevStmt.Loc,
+						Data: &js_ast.SReturn{ValueOrNil: js_ast.JoinWithComma(prevS.Value, s.ValueOrNil)}}
+					continue
+				}
+			}
+
+			isControlFlowDead = true
+
+		case *js_ast.SThrow:
+			// Merge throw statements with the previous expression statement
+			if len(result) > 0 {
+				prevStmt := result[len(result)-1]
+				if prevS, ok := prevStmt.Data.(*js_ast.SExpr); ok {
+					result[len(result)-1] = js_ast.Stmt{Loc: prevStmt.Loc, Data: &js_ast.SThrow{Value: js_ast.JoinWithComma(prevS.Value, s.Value)}}
+					continue
+				}
+			}
+
+			isControlFlowDead = true
+
+		case *js_ast.SBreak, *js_ast.SContinue:
+			isControlFlowDead = true
+
+		case *js_ast.SFor:
+			if len(result) > 0 {
+				prevStmt := result[len(result)-1]
+				if prevS, ok := prevStmt.Data.(*js_ast.SExpr); ok {
+					// Insert the previous expression into the for loop initializer
+					if s.InitOrNil.Data == nil {
+						result[len(result)-1] = stmt
+						s.InitOrNil = js_ast.Stmt{Loc: prevStmt.Loc, Data: &js_ast.SExpr{Value: prevS.Value}}
+						continue
+					} else if s2, ok := s.InitOrNil.Data.(*js_ast.SExpr); ok {
+						result[len(result)-1] = stmt
+						s.InitOrNil = js_ast.Stmt{Loc: prevStmt.Loc, Data: &js_ast.SExpr{Value: js_ast.JoinWithComma(prevS.Value, s2.Value)}}
+						continue
+					}
+				} else {
+					// Insert the previous variable declaration into the for loop
+					// initializer if it's a "var" declaration, since the scope
+					// doesn't matter due to scope hoisting
+					if s.InitOrNil.Data == nil {
+						if s2, ok := prevStmt.Data.(*js_ast.SLocal); ok && s2.Kind == js_ast.LocalVar && !s2.IsExport {
+							result[len(result)-1] = stmt
+							s.InitOrNil = prevStmt
+							continue
+						}
+					} else {
+						if s2, ok := prevStmt.Data.(*js_ast.SLocal); ok && s2.Kind == js_ast.LocalVar && !s2.IsExport {
+							if s3, ok := s.InitOrNil.Data.(*js_ast.SLocal); ok && s3.Kind == js_ast.LocalVar {
+								result[len(result)-1] = stmt
+								s.InitOrNil.Data = &js_ast.SLocal{Kind: js_ast.LocalVar, Decls: append(s2.Decls, s3.Decls...)}
+								continue
+							}
+						}
+					}
+				}
+			}
+
+		case *js_ast.STry:
+			// Drop an unused identifier binding if the optional catch binding feature is supported
+			if !p.options.unsupportedJSFeatures.Has(compat.OptionalCatchBinding) && s.Catch != nil {
+				if id, ok := s.Catch.BindingOrNil.Data.(*js_ast.BIdentifier); ok {
+					if symbol := p.symbols[id.Ref.InnerIndex]; symbol.UseCountEstimate == 0 {
+						if symbol.Link != ast.InvalidRef {
+							// We cannot transform "try { x() } catch (y) { var y = 1 }" into
+							// "try { x() } catch { var y = 1 }" even though "y" is never used
+							// because the hoisted variable "y" would have different values
+							// after the statement ends due to a strange JavaScript quirk:
+							//
+							//   try { x() } catch (y) { var y = 1 }
+							//   console.log(y) // undefined
+							//
+							//   try { x() } catch { var y = 1 }
+							//   console.log(y) // 1
+							//
+						} else if p.currentScope.ContainsDirectEval {
+							// We cannot transform "try { x() } catch (y) { eval('z = y') }"
+							// into "try { x() } catch { eval('z = y') }" because the variable
+							// "y" is actually still used.
+						} else {
+							// "try { x() } catch (y) {}" => "try { x() } catch {}"
+							s.Catch.BindingOrNil.Data = nil
+						}
+					}
+				}
+			}
+		}
+
+		result = append(result, stmt)
+	}
+
+	// Drop a trailing unconditional jump statement if applicable
+	if len(result) > 0 {
+		switch kind {
+		case stmtsLoopBody:
+			// "while (x) { y(); continue; }" => "while (x) { y(); }"
+			if continueS, ok := result[len(result)-1].Data.(*js_ast.SContinue); ok && continueS.Label == nil {
+				result = result[:len(result)-1]
+			}
+
+		case stmtsFnBody:
+			// "function f() { x(); return; }" => "function f() { x(); }"
+			if returnS, ok := result[len(result)-1].Data.(*js_ast.SReturn); ok && returnS.ValueOrNil.Data == nil {
+				result = result[:len(result)-1]
+			}
+		}
+	}
+
+	// Merge certain statements in reverse order
+	if len(result) >= 2 {
+		lastStmt := result[len(result)-1]
+
+		if lastReturn, ok := lastStmt.Data.(*js_ast.SReturn); ok {
+			// "if (a) return b; if (c) return d; return e;" => "return a ? b : c ? d : e;"
+		returnLoop:
+			for len(result) >= 2 {
+				prevIndex := len(result) - 2
+				prevStmt := result[prevIndex]
+
+				switch prevS := prevStmt.Data.(type) {
+				case *js_ast.SExpr:
+					// This return statement must have a value
+					if lastReturn.ValueOrNil.Data == nil {
+						break returnLoop
+					}
+
+					// "a(); return b;" => "return a(), b;"
+					lastReturn = &js_ast.SReturn{ValueOrNil: js_ast.JoinWithComma(prevS.Value, lastReturn.ValueOrNil)}
+
+					// Merge the last two statements
+					lastStmt = js_ast.Stmt{Loc: prevStmt.Loc, Data: lastReturn}
+					result[prevIndex] = lastStmt
+					result = result[:len(result)-1]
+
+				case *js_ast.SIf:
+					// The previous statement must be an if statement with no else clause
+					if prevS.NoOrNil.Data != nil {
+						break returnLoop
+					}
+
+					// The then clause must be a return
+					prevReturn, ok := prevS.Yes.Data.(*js_ast.SReturn)
+					if !ok {
+						break returnLoop
+					}
+
+					// Handle some or all of the values being undefined
+					left := prevReturn.ValueOrNil
+					right := lastReturn.ValueOrNil
+					if left.Data == nil {
+						// "if (a) return; return b;" => "return a ? void 0 : b;"
+						left = js_ast.Expr{Loc: prevS.Yes.Loc, Data: js_ast.EUndefinedShared}
+					}
+					if right.Data == nil {
+						// "if (a) return a; return;" => "return a ? b : void 0;"
+						right = js_ast.Expr{Loc: lastStmt.Loc, Data: js_ast.EUndefinedShared}
+					}
+
+					// "if (!a) return b; return c;" => "return a ? c : b;"
+					if not, ok := prevS.Test.Data.(*js_ast.EUnary); ok && not.Op == js_ast.UnOpNot {
+						prevS.Test = not.Value
+						left, right = right, left
+					}
+
+					if comma, ok := prevS.Test.Data.(*js_ast.EBinary); ok && comma.Op == js_ast.BinOpComma {
+						// "if (a, b) return c; return d;" => "return a, b ? c : d;"
+						lastReturn = &js_ast.SReturn{ValueOrNil: js_ast.JoinWithComma(comma.Left,
+							p.astHelpers.MangleIfExpr(comma.Right.Loc, &js_ast.EIf{Test: comma.Right, Yes: left, No: right}, p.options.unsupportedJSFeatures))}
+					} else {
+						// "if (a) return b; return c;" => "return a ? b : c;"
+						lastReturn = &js_ast.SReturn{ValueOrNil: p.astHelpers.MangleIfExpr(
+							prevS.Test.Loc, &js_ast.EIf{Test: prevS.Test, Yes: left, No: right}, p.options.unsupportedJSFeatures)}
+					}
+
+					// Merge the last two statements
+					lastStmt = js_ast.Stmt{Loc: prevStmt.Loc, Data: lastReturn}
+					result[prevIndex] = lastStmt
+					result = result[:len(result)-1]
+
+				default:
+					break returnLoop
+				}
+			}
+		} else if lastThrow, ok := lastStmt.Data.(*js_ast.SThrow); ok {
+			// "if (a) throw b; if (c) throw d; throw e;" => "throw a ? b : c ? d : e;"
+		throwLoop:
+			for len(result) >= 2 {
+				prevIndex := len(result) - 2
+				prevStmt := result[prevIndex]
+
+				switch prevS := prevStmt.Data.(type) {
+				case *js_ast.SExpr:
+					// "a(); throw b;" => "throw a(), b;"
+					lastThrow = &js_ast.SThrow{Value: js_ast.JoinWithComma(prevS.Value, lastThrow.Value)}
+
+					// Merge the last two statements
+					lastStmt = js_ast.Stmt{Loc: prevStmt.Loc, Data: lastThrow}
+					result[prevIndex] = lastStmt
+					result = result[:len(result)-1]
+
+				case *js_ast.SIf:
+					// The previous statement must be an if statement with no else clause
+					if prevS.NoOrNil.Data != nil {
+						break throwLoop
+					}
+
+					// The then clause must be a throw
+					prevThrow, ok := prevS.Yes.Data.(*js_ast.SThrow)
+					if !ok {
+						break throwLoop
+					}
+
+					left := prevThrow.Value
+					right := lastThrow.Value
+
+					// "if (!a) throw b; throw c;" => "throw a ? c : b;"
+					if not, ok := prevS.Test.Data.(*js_ast.EUnary); ok && not.Op == js_ast.UnOpNot {
+						prevS.Test = not.Value
+						left, right = right, left
+					}
+
+					// Merge the last two statements
+					if comma, ok := prevS.Test.Data.(*js_ast.EBinary); ok && comma.Op == js_ast.BinOpComma {
+						// "if (a, b) return c; return d;" => "return a, b ? c : d;"
+						lastThrow = &js_ast.SThrow{Value: js_ast.JoinWithComma(comma.Left,
+							p.astHelpers.MangleIfExpr(comma.Right.Loc, &js_ast.EIf{Test: comma.Right, Yes: left, No: right}, p.options.unsupportedJSFeatures))}
+					} else {
+						// "if (a) return b; return c;" => "return a ? b : c;"
+						lastThrow = &js_ast.SThrow{
+							Value: p.astHelpers.MangleIfExpr(prevS.Test.Loc, &js_ast.EIf{Test: prevS.Test, Yes: left, No: right}, p.options.unsupportedJSFeatures)}
+					}
+					lastStmt = js_ast.Stmt{Loc: prevStmt.Loc, Data: lastThrow}
+					result[prevIndex] = lastStmt
+					result = result[:len(result)-1]
+
+				default:
+					break throwLoop
+				}
+			}
+		}
+	}
+
+	return result
+}
+
+func (p *parser) substituteSingleUseSymbolInStmt(stmt js_ast.Stmt, ref ast.Ref, replacement js_ast.Expr) bool {
+	var expr *js_ast.Expr
+
+	switch s := stmt.Data.(type) {
+	case *js_ast.SExpr:
+		expr = &s.Value
+	case *js_ast.SThrow:
+		expr = &s.Value
+	case *js_ast.SReturn:
+		expr = &s.ValueOrNil
+	case *js_ast.SIf:
+		expr = &s.Test
+	case *js_ast.SSwitch:
+		expr = &s.Test
+	case *js_ast.SLocal:
+		// Only try substituting into the initializer for the first declaration
+		if first := &s.Decls[0]; first.ValueOrNil.Data != nil {
+			// Make sure there isn't destructuring, which could evaluate code
+			if _, ok := first.Binding.Data.(*js_ast.BIdentifier); ok {
+				expr = &first.ValueOrNil
+			}
+		}
+	}
+
+	if expr != nil {
+		// Only continue trying to insert this replacement into sub-expressions
+		// after the first one if the replacement has no side effects:
+		//
+		//   // Substitution is ok
+		//   let replacement = 123;
+		//   return x + replacement;
+		//
+		//   // Substitution is not ok because "fn()" may change "x"
+		//   let replacement = fn();
+		//   return x + replacement;
+		//
+		//   // Substitution is not ok because "x == x" may change "x" due to "valueOf()" evaluation
+		//   let replacement = [x];
+		//   return (x == x) + replacement;
+		//
+		replacementCanBeRemoved := p.astHelpers.ExprCanBeRemovedIfUnused(replacement)
+
+		if new, status := p.substituteSingleUseSymbolInExpr(*expr, ref, replacement, replacementCanBeRemoved); status == substituteSuccess {
+			*expr = new
+			return true
+		}
+	}
+
+	return false
+}
+
+type substituteStatus uint8
+
+const (
+	substituteContinue substituteStatus = iota
+	substituteSuccess
+	substituteFailure
+)
+
+func (p *parser) substituteSingleUseSymbolInExpr(
+	expr js_ast.Expr,
+	ref ast.Ref,
+	replacement js_ast.Expr,
+	replacementCanBeRemoved bool,
+) (js_ast.Expr, substituteStatus) {
+	switch e := expr.Data.(type) {
+	case *js_ast.EIdentifier:
+		if e.Ref == ref {
+			p.ignoreUsage(ref)
+			return replacement, substituteSuccess
+		}
+
+	case *js_ast.ESpread:
+		if value, status := p.substituteSingleUseSymbolInExpr(e.Value, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+			e.Value = value
+			return expr, status
+		}
+
+	case *js_ast.EAwait:
+		if value, status := p.substituteSingleUseSymbolInExpr(e.Value, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+			e.Value = value
+			return expr, status
+		}
+
+	case *js_ast.EYield:
+		if e.ValueOrNil.Data != nil {
+			if value, status := p.substituteSingleUseSymbolInExpr(e.ValueOrNil, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+				e.ValueOrNil = value
+				return expr, status
+			}
+		}
+
+	case *js_ast.EImportCall:
+		if value, status := p.substituteSingleUseSymbolInExpr(e.Expr, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+			e.Expr = value
+			return expr, status
+		}
+
+		// The "import()" expression has side effects but the side effects are
+		// always asynchronous so there is no way for the side effects to modify
+		// the replacement value. So it's ok to reorder the replacement value
+		// past the "import()" expression assuming everything else checks out.
+		if replacementCanBeRemoved && p.astHelpers.ExprCanBeRemovedIfUnused(e.Expr) {
+			return expr, substituteContinue
+		}
+
+	case *js_ast.EUnary:
+		switch e.Op {
+		case js_ast.UnOpPreInc, js_ast.UnOpPostInc, js_ast.UnOpPreDec, js_ast.UnOpPostDec, js_ast.UnOpDelete:
+			// Do not substitute into an assignment position
+
+		default:
+			if value, status := p.substituteSingleUseSymbolInExpr(e.Value, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+				e.Value = value
+				return expr, status
+			}
+		}
+
+	case *js_ast.EDot:
+		if value, status := p.substituteSingleUseSymbolInExpr(e.Target, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+			e.Target = value
+			return expr, status
+		}
+
+	case *js_ast.EBinary:
+		// Do not substitute into an assignment position
+		if e.Op.BinaryAssignTarget() == js_ast.AssignTargetNone {
+			if value, status := p.substituteSingleUseSymbolInExpr(e.Left, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+				e.Left = value
+				return expr, status
+			}
+		} else if !p.astHelpers.ExprCanBeRemovedIfUnused(e.Left) {
+			// Do not reorder past a side effect in an assignment target, as that may
+			// change the replacement value. For example, "fn()" may change "a" here:
+			//
+			//   let a = 1;
+			//   foo[fn()] = a;
+			//
+			return expr, substituteFailure
+		} else if e.Op.BinaryAssignTarget() == js_ast.AssignTargetUpdate && !replacementCanBeRemoved {
+			// If this is a read-modify-write assignment and the replacement has side
+			// effects, don't reorder it past the assignment target. The assignment
+			// target is being read so it may be changed by the side effect. For
+			// example, "fn()" may change "foo" here:
+			//
+			//   let a = fn();
+			//   foo += a;
+			//
+			return expr, substituteFailure
+		}
+
+		// If we get here then it should be safe to attempt to substitute the
+		// replacement past the left operand into the right operand.
+		if value, status := p.substituteSingleUseSymbolInExpr(e.Right, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+			e.Right = value
+			return expr, status
+		}
+
+	case *js_ast.EIf:
+		if value, status := p.substituteSingleUseSymbolInExpr(e.Test, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+			e.Test = value
+			return expr, status
+		}
+
+		// Do not substitute our unconditionally-executed value into a branch
+		// unless the value itself has no side effects
+		if replacementCanBeRemoved {
+			// Unlike other branches in this function such as "a && b" or "a?.[b]",
+			// the "a ? b : c" form has potential code evaluation along both control
+			// flow paths. Handle this by allowing substitution into either branch.
+			// Side effects in one branch should not prevent the substitution into
+			// the other branch.
+
+			yesValue, yesStatus := p.substituteSingleUseSymbolInExpr(e.Yes, ref, replacement, replacementCanBeRemoved)
+			if yesStatus == substituteSuccess {
+				e.Yes = yesValue
+				return expr, yesStatus
+			}
+
+			noValue, noStatus := p.substituteSingleUseSymbolInExpr(e.No, ref, replacement, replacementCanBeRemoved)
+			if noStatus == substituteSuccess {
+				e.No = noValue
+				return expr, noStatus
+			}
+
+			// Side effects in either branch should stop us from continuing to try to
+			// substitute the replacement after the control flow branches merge again.
+			if yesStatus != substituteContinue || noStatus != substituteContinue {
+				return expr, substituteFailure
+			}
+		}
+
+	case *js_ast.EIndex:
+		if value, status := p.substituteSingleUseSymbolInExpr(e.Target, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+			e.Target = value
+			return expr, status
+		}
+
+		// Do not substitute our unconditionally-executed value into a branch
+		// unless the value itself has no side effects
+		if replacementCanBeRemoved || e.OptionalChain == js_ast.OptionalChainNone {
+			if value, status := p.substituteSingleUseSymbolInExpr(e.Index, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+				e.Index = value
+				return expr, status
+			}
+		}
+
+	case *js_ast.ECall:
+		// Don't substitute something into a call target that could change "this"
+		_, isDot := replacement.Data.(*js_ast.EDot)
+		_, isIndex := replacement.Data.(*js_ast.EIndex)
+		if isDot || isIndex {
+			if id, ok := e.Target.Data.(*js_ast.EIdentifier); ok && id.Ref == ref {
+				break
+			}
+		}
+
+		if value, status := p.substituteSingleUseSymbolInExpr(e.Target, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+			e.Target = value
+			return expr, status
+		}
+
+		// Do not substitute our unconditionally-executed value into a branch
+		// unless the value itself has no side effects
+		if replacementCanBeRemoved || e.OptionalChain == js_ast.OptionalChainNone {
+			for i, arg := range e.Args {
+				if value, status := p.substituteSingleUseSymbolInExpr(arg, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+					e.Args[i] = value
+					return expr, status
+				}
+			}
+		}
+
+	case *js_ast.EArray:
+		for i, item := range e.Items {
+			if value, status := p.substituteSingleUseSymbolInExpr(item, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+				e.Items[i] = value
+				return expr, status
+			}
+		}
+
+	case *js_ast.EObject:
+		for i, property := range e.Properties {
+			// Check the key
+			if property.Flags.Has(js_ast.PropertyIsComputed) {
+				if value, status := p.substituteSingleUseSymbolInExpr(property.Key, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+					e.Properties[i].Key = value
+					return expr, status
+				}
+
+				// Stop now because both computed keys and property spread have side effects
+				return expr, substituteFailure
+			}
+
+			// Check the value
+			if property.ValueOrNil.Data != nil {
+				if value, status := p.substituteSingleUseSymbolInExpr(property.ValueOrNil, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+					e.Properties[i].ValueOrNil = value
+					return expr, status
+				}
+			}
+		}
+
+	case *js_ast.ETemplate:
+		if e.TagOrNil.Data != nil {
+			if value, status := p.substituteSingleUseSymbolInExpr(e.TagOrNil, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+				e.TagOrNil = value
+				return expr, status
+			}
+		}
+
+		for i, part := range e.Parts {
+			if value, status := p.substituteSingleUseSymbolInExpr(part.Value, ref, replacement, replacementCanBeRemoved); status != substituteContinue {
+				e.Parts[i].Value = value
+
+				// If we substituted a primitive, merge it into the template
+				if js_ast.IsPrimitiveLiteral(value.Data) {
+					expr = js_ast.InlinePrimitivesIntoTemplate(expr.Loc, e)
+				}
+				return expr, status
+			}
+		}
+	}
+
+	// If both the replacement and this expression have no observable side
+	// effects, then we can reorder the replacement past this expression
+	if replacementCanBeRemoved && p.astHelpers.ExprCanBeRemovedIfUnused(expr) {
+		return expr, substituteContinue
+	}
+
+	// We can always reorder past primitive values
+	if js_ast.IsPrimitiveLiteral(expr.Data) || js_ast.IsPrimitiveLiteral(replacement.Data) {
+		return expr, substituteContinue
+	}
+
+	// Otherwise we should stop trying to substitute past this point
+	return expr, substituteFailure
+}
+
+func (p *parser) visitLoopBody(stmt js_ast.Stmt) js_ast.Stmt {
+	oldIsInsideLoop := p.fnOrArrowDataVisit.isInsideLoop
+	p.fnOrArrowDataVisit.isInsideLoop = true
+	p.loopBody = stmt.Data
+	stmt = p.visitSingleStmt(stmt, stmtsLoopBody)
+	p.fnOrArrowDataVisit.isInsideLoop = oldIsInsideLoop
+	return stmt
+}
+
+func (p *parser) visitSingleStmt(stmt js_ast.Stmt, kind stmtsKind) js_ast.Stmt {
+	// To reduce stack depth, special-case blocks and process their children directly
+	if block, ok := stmt.Data.(*js_ast.SBlock); ok {
+		p.pushScopeForVisitPass(js_ast.ScopeBlock, stmt.Loc)
+		block.Stmts = p.visitStmts(block.Stmts, kind)
+		p.popScope()
+		if p.options.minifySyntax {
+			stmt = stmtsToSingleStmt(stmt.Loc, block.Stmts, block.CloseBraceLoc)
+		}
+		return stmt
+	}
+
+	// Introduce a fake block scope for function declarations inside if statements
+	fn, ok := stmt.Data.(*js_ast.SFunction)
+	hasIfScope := ok && fn.Fn.HasIfScope
+	if hasIfScope {
+		p.pushScopeForVisitPass(js_ast.ScopeBlock, stmt.Loc)
+		if p.isStrictMode() {
+			p.markStrictModeFeature(ifElseFunctionStmt, js_lexer.RangeOfIdentifier(p.source, stmt.Loc), "")
+		}
+	}
+
+	stmts := p.visitStmts([]js_ast.Stmt{stmt}, kind)
+
+	// Balance the fake block scope introduced above
+	if hasIfScope {
+		p.popScope()
+	}
+
+	return stmtsToSingleStmt(stmt.Loc, stmts, logger.Loc{})
+}
+
+// One statement could potentially expand to several statements
+func stmtsToSingleStmt(loc logger.Loc, stmts []js_ast.Stmt, closeBraceLoc logger.Loc) js_ast.Stmt {
+	if len(stmts) == 0 {
+		return js_ast.Stmt{Loc: loc, Data: js_ast.SEmptyShared}
+	}
+	if len(stmts) == 1 && !statementCaresAboutScope(stmts[0]) {
+		return stmts[0]
+	}
+	return js_ast.Stmt{Loc: loc, Data: &js_ast.SBlock{Stmts: stmts, CloseBraceLoc: closeBraceLoc}}
+}
+
+func (p *parser) visitForLoopInit(stmt js_ast.Stmt, isInOrOf bool) js_ast.Stmt {
+	switch s := stmt.Data.(type) {
+	case *js_ast.SExpr:
+		assignTarget := js_ast.AssignTargetNone
+		if isInOrOf {
+			assignTarget = js_ast.AssignTargetReplace
+		}
+		p.stmtExprValue = s.Value.Data
+		s.Value, _ = p.visitExprInOut(s.Value, exprIn{assignTarget: assignTarget})
+
+	case *js_ast.SLocal:
+		for i := range s.Decls {
+			d := &s.Decls[i]
+			p.visitBinding(d.Binding, bindingOpts{})
+			if d.ValueOrNil.Data != nil {
+				d.ValueOrNil = p.visitExpr(d.ValueOrNil)
+			}
+		}
+		s.Decls = p.lowerObjectRestInDecls(s.Decls)
+		s.Kind = p.selectLocalKind(s.Kind)
+
+	default:
+		panic("Internal error")
+	}
+
+	return stmt
+}
+
+func (p *parser) recordDeclaredSymbol(ref ast.Ref) {
+	p.declaredSymbols = append(p.declaredSymbols, js_ast.DeclaredSymbol{
+		Ref:        ref,
+		IsTopLevel: p.currentScope == p.moduleScope,
+	})
+}
+
+type bindingOpts struct {
+	duplicateArgCheck map[string]logger.Range
+}
+
+func (p *parser) visitBinding(binding js_ast.Binding, opts bindingOpts) {
+	switch b := binding.Data.(type) {
+	case *js_ast.BMissing:
+
+	case *js_ast.BIdentifier:
+		p.recordDeclaredSymbol(b.Ref)
+		name := p.symbols[b.Ref.InnerIndex].OriginalName
+		p.validateDeclaredSymbolName(binding.Loc, name)
+		if opts.duplicateArgCheck != nil {
+			r := js_lexer.RangeOfIdentifier(p.source, binding.Loc)
+			if firstRange := opts.duplicateArgCheck[name]; firstRange.Len > 0 {
+				p.log.AddErrorWithNotes(&p.tracker, r,
+					fmt.Sprintf("%q cannot be bound multiple times in the same parameter list", name),
+					[]logger.MsgData{p.tracker.MsgData(firstRange, fmt.Sprintf("The name %q was originally bound here:", name))})
+			} else {
+				opts.duplicateArgCheck[name] = r
+			}
+		}
+
+	case *js_ast.BArray:
+		for i := range b.Items {
+			item := &b.Items[i]
+			p.visitBinding(item.Binding, opts)
+			if item.DefaultValueOrNil.Data != nil {
+				// Propagate the name to keep from the binding into the initializer
+				if id, ok := item.Binding.Data.(*js_ast.BIdentifier); ok {
+					p.nameToKeep = p.symbols[id.Ref.InnerIndex].OriginalName
+					p.nameToKeepIsFor = item.DefaultValueOrNil.Data
+				}
+
+				item.DefaultValueOrNil = p.visitExpr(item.DefaultValueOrNil)
+			}
+		}
+
+	case *js_ast.BObject:
+		for i, property := range b.Properties {
+			if !property.IsSpread {
+				property.Key, _ = p.visitExprInOut(property.Key, exprIn{
+					shouldMangleStringsAsProps: true,
+				})
+			}
+			p.visitBinding(property.Value, opts)
+			if property.DefaultValueOrNil.Data != nil {
+				// Propagate the name to keep from the binding into the initializer
+				if id, ok := property.Value.Data.(*js_ast.BIdentifier); ok {
+					p.nameToKeep = p.symbols[id.Ref.InnerIndex].OriginalName
+					p.nameToKeepIsFor = property.DefaultValueOrNil.Data
+				}
+
+				property.DefaultValueOrNil = p.visitExpr(property.DefaultValueOrNil)
+			}
+			b.Properties[i] = property
+		}
+
+	default:
+		panic("Internal error")
+	}
+}
+
+func statementCaresAboutScope(stmt js_ast.Stmt) bool {
+	switch s := stmt.Data.(type) {
+	case *js_ast.SBlock, *js_ast.SEmpty, *js_ast.SDebugger, *js_ast.SExpr, *js_ast.SIf,
+		*js_ast.SFor, *js_ast.SForIn, *js_ast.SForOf, *js_ast.SDoWhile, *js_ast.SWhile,
+		*js_ast.SWith, *js_ast.STry, *js_ast.SSwitch, *js_ast.SReturn, *js_ast.SThrow,
+		*js_ast.SBreak, *js_ast.SContinue, *js_ast.SDirective, *js_ast.SLabel:
+		return false
+
+	case *js_ast.SLocal:
+		return s.Kind != js_ast.LocalVar
+
+	default:
+		return true
+	}
+}
+
+func dropFirstStatement(body js_ast.Stmt, replaceOrNil js_ast.Stmt) js_ast.Stmt {
+	if block, ok := body.Data.(*js_ast.SBlock); ok && len(block.Stmts) > 0 {
+		if replaceOrNil.Data != nil {
+			block.Stmts[0] = replaceOrNil
+		} else if len(block.Stmts) == 2 && !statementCaresAboutScope(block.Stmts[1]) {
+			return block.Stmts[1]
+		} else {
+			block.Stmts = block.Stmts[1:]
+		}
+		return body
+	}
+	if replaceOrNil.Data != nil {
+		return replaceOrNil
+	}
+	return js_ast.Stmt{Loc: body.Loc, Data: js_ast.SEmptyShared}
+}
+
+func mangleFor(s *js_ast.SFor) {
+	// Get the first statement in the loop
+	first := s.Body
+	if block, ok := first.Data.(*js_ast.SBlock); ok && len(block.Stmts) > 0 {
+		first = block.Stmts[0]
+	}
+
+	if ifS, ok := first.Data.(*js_ast.SIf); ok {
+		// "for (;;) if (x) break;" => "for (; !x;) ;"
+		// "for (; a;) if (x) break;" => "for (; a && !x;) ;"
+		// "for (;;) if (x) break; else y();" => "for (; !x;) y();"
+		// "for (; a;) if (x) break; else y();" => "for (; a && !x;) y();"
+		if breakS, ok := ifS.Yes.Data.(*js_ast.SBreak); ok && breakS.Label == nil {
+			var not js_ast.Expr
+			if unary, ok := ifS.Test.Data.(*js_ast.EUnary); ok && unary.Op == js_ast.UnOpNot {
+				not = unary.Value
+			} else {
+				not = js_ast.Not(ifS.Test)
+			}
+			if s.TestOrNil.Data != nil {
+				s.TestOrNil = js_ast.Expr{Loc: s.TestOrNil.Loc, Data: &js_ast.EBinary{
+					Op:    js_ast.BinOpLogicalAnd,
+					Left:  s.TestOrNil,
+					Right: not,
+				}}
+			} else {
+				s.TestOrNil = not
+			}
+			s.Body = dropFirstStatement(s.Body, ifS.NoOrNil)
+			return
+		}
+
+		// "for (;;) if (x) y(); else break;" => "for (; x;) y();"
+		// "for (; a;) if (x) y(); else break;" => "for (; a && x;) y();"
+		if ifS.NoOrNil.Data != nil {
+			if breakS, ok := ifS.NoOrNil.Data.(*js_ast.SBreak); ok && breakS.Label == nil {
+				if s.TestOrNil.Data != nil {
+					s.TestOrNil = js_ast.Expr{Loc: s.TestOrNil.Loc, Data: &js_ast.EBinary{
+						Op:    js_ast.BinOpLogicalAnd,
+						Left:  s.TestOrNil,
+						Right: ifS.Test,
+					}}
+				} else {
+					s.TestOrNil = ifS.Test
+				}
+				s.Body = dropFirstStatement(s.Body, ifS.Yes)
+				return
+			}
+		}
+	}
+}
+
+func appendIfOrLabelBodyPreservingScope(stmts []js_ast.Stmt, body js_ast.Stmt) []js_ast.Stmt {
+	if block, ok := body.Data.(*js_ast.SBlock); ok {
+		keepBlock := false
+		for _, stmt := range block.Stmts {
+			if statementCaresAboutScope(stmt) {
+				keepBlock = true
+				break
+			}
+		}
+		if !keepBlock {
+			return append(stmts, block.Stmts...)
+		}
+	}
+
+	if statementCaresAboutScope(body) {
+		return append(stmts, js_ast.Stmt{Loc: body.Loc, Data: &js_ast.SBlock{Stmts: []js_ast.Stmt{body}}})
+	}
+
+	return append(stmts, body)
+}
+
+func (p *parser) mangleIf(stmts []js_ast.Stmt, loc logger.Loc, s *js_ast.SIf) []js_ast.Stmt {
+	// Constant folding using the test expression
+	if boolean, sideEffects, ok := js_ast.ToBooleanWithSideEffects(s.Test.Data); ok {
+		if boolean {
+			// The test is truthy
+			if s.NoOrNil.Data == nil || !shouldKeepStmtInDeadControlFlow(s.NoOrNil) {
+				// We can drop the "no" branch
+				if sideEffects == js_ast.CouldHaveSideEffects {
+					// Keep the condition if it could have side effects (but is still known to be truthy)
+					if test := p.astHelpers.SimplifyUnusedExpr(s.Test, p.options.unsupportedJSFeatures); test.Data != nil {
+						stmts = append(stmts, js_ast.Stmt{Loc: s.Test.Loc, Data: &js_ast.SExpr{Value: test}})
+					}
+				}
+				return appendIfOrLabelBodyPreservingScope(stmts, s.Yes)
+			} else {
+				// We have to keep the "no" branch
+			}
+		} else {
+			// The test is falsy
+			if !shouldKeepStmtInDeadControlFlow(s.Yes) {
+				// We can drop the "yes" branch
+				if sideEffects == js_ast.CouldHaveSideEffects {
+					// Keep the condition if it could have side effects (but is still known to be falsy)
+					if test := p.astHelpers.SimplifyUnusedExpr(s.Test, p.options.unsupportedJSFeatures); test.Data != nil {
+						stmts = append(stmts, js_ast.Stmt{Loc: s.Test.Loc, Data: &js_ast.SExpr{Value: test}})
+					}
+				}
+				if s.NoOrNil.Data == nil {
+					return stmts
+				}
+				return appendIfOrLabelBodyPreservingScope(stmts, s.NoOrNil)
+			} else {
+				// We have to keep the "yes" branch
+			}
+		}
+
+		// Use "1" and "0" instead of "true" and "false" to be shorter
+		if sideEffects == js_ast.NoSideEffects {
+			if boolean {
+				s.Test.Data = &js_ast.ENumber{Value: 1}
+			} else {
+				s.Test.Data = &js_ast.ENumber{Value: 0}
+			}
+		}
+	}
+
+	var expr js_ast.Expr
+
+	if yes, ok := s.Yes.Data.(*js_ast.SExpr); ok {
+		// "yes" is an expression
+		if s.NoOrNil.Data == nil {
+			if not, ok := s.Test.Data.(*js_ast.EUnary); ok && not.Op == js_ast.UnOpNot {
+				// "if (!a) b();" => "a || b();"
+				expr = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpLogicalOr, not.Value, yes.Value)
+			} else {
+				// "if (a) b();" => "a && b();"
+				expr = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpLogicalAnd, s.Test, yes.Value)
+			}
+		} else if no, ok := s.NoOrNil.Data.(*js_ast.SExpr); ok {
+			// "if (a) b(); else c();" => "a ? b() : c();"
+			expr = p.astHelpers.MangleIfExpr(loc, &js_ast.EIf{
+				Test: s.Test,
+				Yes:  yes.Value,
+				No:   no.Value,
+			}, p.options.unsupportedJSFeatures)
+		}
+	} else if _, ok := s.Yes.Data.(*js_ast.SEmpty); ok {
+		// "yes" is missing
+		if s.NoOrNil.Data == nil {
+			// "yes" and "no" are both missing
+			if p.astHelpers.ExprCanBeRemovedIfUnused(s.Test) {
+				// "if (1) {}" => ""
+				return stmts
+			} else {
+				// "if (a) {}" => "a;"
+				expr = s.Test
+			}
+		} else if no, ok := s.NoOrNil.Data.(*js_ast.SExpr); ok {
+			if not, ok := s.Test.Data.(*js_ast.EUnary); ok && not.Op == js_ast.UnOpNot {
+				// "if (!a) {} else b();" => "a && b();"
+				expr = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpLogicalAnd, not.Value, no.Value)
+			} else {
+				// "if (a) {} else b();" => "a || b();"
+				expr = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpLogicalOr, s.Test, no.Value)
+			}
+		} else {
+			// "yes" is missing and "no" is not missing (and is not an expression)
+			if not, ok := s.Test.Data.(*js_ast.EUnary); ok && not.Op == js_ast.UnOpNot {
+				// "if (!a) {} else throw b;" => "if (a) throw b;"
+				s.Test = not.Value
+				s.Yes = s.NoOrNil
+				s.NoOrNil = js_ast.Stmt{}
+			} else {
+				// "if (a) {} else throw b;" => "if (!a) throw b;"
+				s.Test = js_ast.Not(s.Test)
+				s.Yes = s.NoOrNil
+				s.NoOrNil = js_ast.Stmt{}
+			}
+		}
+	} else {
+		// "yes" is not missing (and is not an expression)
+		if s.NoOrNil.Data != nil {
+			// "yes" is not missing (and is not an expression) and "no" is not missing
+			if not, ok := s.Test.Data.(*js_ast.EUnary); ok && not.Op == js_ast.UnOpNot {
+				// "if (!a) return b; else return c;" => "if (a) return c; else return b;"
+				s.Test = not.Value
+				s.Yes, s.NoOrNil = s.NoOrNil, s.Yes
+			}
+		} else {
+			// "no" is missing
+			if s2, ok := s.Yes.Data.(*js_ast.SIf); ok && s2.NoOrNil.Data == nil {
+				// "if (a) if (b) return c;" => "if (a && b) return c;"
+				s.Test = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpLogicalAnd, s.Test, s2.Test)
+				s.Yes = s2.Yes
+			}
+		}
+	}
+
+	// Return an expression if we replaced the if statement with an expression above
+	if expr.Data != nil {
+		expr = p.astHelpers.SimplifyUnusedExpr(expr, p.options.unsupportedJSFeatures)
+		return append(stmts, js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: expr}})
+	}
+
+	return append(stmts, js_ast.Stmt{Loc: loc, Data: s})
+}
+
+func (p *parser) keepExprSymbolName(value js_ast.Expr, name string) js_ast.Expr {
+	value = p.callRuntime(value.Loc, "__name", []js_ast.Expr{value,
+		{Loc: value.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(name)}},
+	})
+
+	// Make sure tree shaking removes this if the function is never used
+	value.Data.(*js_ast.ECall).CanBeUnwrappedIfUnused = true
+	return value
+}
+
+func (p *parser) keepClassOrFnSymbolName(loc logger.Loc, expr js_ast.Expr, name string) js_ast.Stmt {
+	return js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{
+		Value: p.callRuntime(loc, "__name", []js_ast.Expr{
+			expr,
+			{Loc: loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(name)}},
+		}),
+		IsFromClassOrFnThatCanBeRemovedIfUnused: true,
+	}}
+}
+
+func (p *parser) visitAndAppendStmt(stmts []js_ast.Stmt, stmt js_ast.Stmt) []js_ast.Stmt {
+	// By default any statement ends the const local prefix
+	wasAfterAfterConstLocalPrefix := p.currentScope.IsAfterConstLocalPrefix
+	p.currentScope.IsAfterConstLocalPrefix = true
+
+	switch s := stmt.Data.(type) {
+	case *js_ast.SEmpty, *js_ast.SComment:
+		// Comments do not end the const local prefix
+		p.currentScope.IsAfterConstLocalPrefix = wasAfterAfterConstLocalPrefix
+
+	case *js_ast.SDebugger:
+		// Debugger statements do not end the const local prefix
+		p.currentScope.IsAfterConstLocalPrefix = wasAfterAfterConstLocalPrefix
+
+		if p.options.dropDebugger {
+			return stmts
+		}
+
+	case *js_ast.STypeScript:
+		// Type annotations do not end the const local prefix
+		p.currentScope.IsAfterConstLocalPrefix = wasAfterAfterConstLocalPrefix
+
+		// Erase TypeScript constructs from the output completely
+		return stmts
+
+	case *js_ast.SDirective:
+		// Directives do not end the const local prefix
+		p.currentScope.IsAfterConstLocalPrefix = wasAfterAfterConstLocalPrefix
+
+		if p.isStrictMode() && s.LegacyOctalLoc.Start > 0 {
+			p.markStrictModeFeature(legacyOctalEscape, p.source.RangeOfLegacyOctalEscape(s.LegacyOctalLoc), "")
+		}
+
+	case *js_ast.SImport:
+		p.recordDeclaredSymbol(s.NamespaceRef)
+
+		if s.DefaultName != nil {
+			p.recordDeclaredSymbol(s.DefaultName.Ref)
+		}
+
+		if s.Items != nil {
+			for _, item := range *s.Items {
+				p.recordDeclaredSymbol(item.Name.Ref)
+			}
+		}
+
+	case *js_ast.SExportClause:
+		// "export {foo}"
+		end := 0
+		for _, item := range s.Items {
+			name := p.loadNameFromRef(item.Name.Ref)
+			ref := p.findSymbol(item.AliasLoc, name).ref
+
+			if p.symbols[ref.InnerIndex].Kind == ast.SymbolUnbound {
+				// Silently strip exports of non-local symbols in TypeScript, since
+				// those likely correspond to type-only exports. But report exports of
+				// non-local symbols as errors in JavaScript.
+				if !p.options.ts.Parse {
+					r := js_lexer.RangeOfIdentifier(p.source, item.Name.Loc)
+					p.log.AddError(&p.tracker, r, fmt.Sprintf("%q is not declared in this file", name))
+				}
+				continue
+			}
+
+			item.Name.Ref = ref
+			s.Items[end] = item
+			end++
+		}
+
+		// Note: do not remove empty export statements since TypeScript uses them as module markers
+		s.Items = s.Items[:end]
+
+	case *js_ast.SExportFrom:
+		// "export {foo} from 'path'"
+		name := p.loadNameFromRef(s.NamespaceRef)
+		s.NamespaceRef = p.newSymbol(ast.SymbolOther, name)
+		p.currentScope.Generated = append(p.currentScope.Generated, s.NamespaceRef)
+		p.recordDeclaredSymbol(s.NamespaceRef)
+
+		// This is a re-export and the symbols created here are used to reference
+		// names in another file. This means the symbols are really aliases.
+		for i, item := range s.Items {
+			name := p.loadNameFromRef(item.Name.Ref)
+			ref := p.newSymbol(ast.SymbolOther, name)
+			p.currentScope.Generated = append(p.currentScope.Generated, ref)
+			p.recordDeclaredSymbol(ref)
+			s.Items[i].Name.Ref = ref
+		}
+
+	case *js_ast.SExportStar:
+		// "export * from 'path'"
+		// "export * as ns from 'path'"
+		name := p.loadNameFromRef(s.NamespaceRef)
+		s.NamespaceRef = p.newSymbol(ast.SymbolOther, name)
+		p.currentScope.Generated = append(p.currentScope.Generated, s.NamespaceRef)
+		p.recordDeclaredSymbol(s.NamespaceRef)
+
+		// "export * as ns from 'path'"
+		if s.Alias != nil {
+			// "import * as ns from 'path'"
+			// "export {ns}"
+			if p.options.unsupportedJSFeatures.Has(compat.ExportStarAs) {
+				p.recordUsage(s.NamespaceRef)
+				return append(stmts,
+					js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SImport{
+						NamespaceRef:      s.NamespaceRef,
+						StarNameLoc:       &s.Alias.Loc,
+						ImportRecordIndex: s.ImportRecordIndex,
+					}},
+					js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SExportClause{
+						Items: []js_ast.ClauseItem{{
+							Alias:        s.Alias.OriginalName,
+							OriginalName: s.Alias.OriginalName,
+							AliasLoc:     s.Alias.Loc,
+							Name:         ast.LocRef{Loc: s.Alias.Loc, Ref: s.NamespaceRef},
+						}},
+						IsSingleLine: true,
+					}},
+				)
+			}
+		}
+
+	case *js_ast.SExportDefault:
+		p.recordDeclaredSymbol(s.DefaultName.Ref)
+
+		switch s2 := s.Value.Data.(type) {
+		case *js_ast.SExpr:
+			// Propagate the name to keep from the export into the value
+			p.nameToKeep = "default"
+			p.nameToKeepIsFor = s2.Value.Data
+
+			s2.Value = p.visitExpr(s2.Value)
+
+			// Discard type-only export default statements
+			if p.options.ts.Parse {
+				if id, ok := s2.Value.Data.(*js_ast.EIdentifier); ok {
+					symbol := p.symbols[id.Ref.InnerIndex]
+					if symbol.Kind == ast.SymbolUnbound && p.localTypeNames[symbol.OriginalName] {
+						return stmts
+					}
+				}
+			}
+
+			// If there are lowered "using" declarations, change this into a "var"
+			if p.currentScope.Parent == nil && p.willWrapModuleInTryCatchForUsing {
+				stmts = append(stmts,
+					js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SLocal{
+						Decls: []js_ast.Decl{{
+							Binding:    js_ast.Binding{Loc: s.DefaultName.Loc, Data: &js_ast.BIdentifier{Ref: s.DefaultName.Ref}},
+							ValueOrNil: s2.Value,
+						}},
+					}},
+					js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SExportClause{Items: []js_ast.ClauseItem{{
+						Alias:    "default",
+						AliasLoc: s.DefaultName.Loc,
+						Name:     s.DefaultName,
+					}}}},
+				)
+				break
+			}
+
+			stmts = append(stmts, stmt)
+
+		case *js_ast.SFunction:
+			// If we need to preserve the name but there is no name, generate a name
+			var name string
+			if p.options.keepNames {
+				if s2.Fn.Name == nil {
+					clone := s.DefaultName
+					s2.Fn.Name = &clone
+					name = "default"
+				} else {
+					name = p.symbols[s2.Fn.Name.Ref.InnerIndex].OriginalName
+				}
+			}
+
+			p.visitFn(&s2.Fn, s2.Fn.OpenParenLoc, visitFnOpts{})
+			stmts = append(stmts, stmt)
+
+			// Optionally preserve the name
+			if p.options.keepNames {
+				p.symbols[s2.Fn.Name.Ref.InnerIndex].Flags |= ast.DidKeepName
+				fn := js_ast.Expr{Loc: s2.Fn.Name.Loc, Data: &js_ast.EIdentifier{Ref: s2.Fn.Name.Ref}}
+				stmts = append(stmts, p.keepClassOrFnSymbolName(s2.Fn.Name.Loc, fn, name))
+			}
+
+		case *js_ast.SClass:
+			result := p.visitClass(s.Value.Loc, &s2.Class, s.DefaultName.Ref, "default")
+
+			// Lower class field syntax for browsers that don't support it
+			classStmts, _ := p.lowerClass(stmt, js_ast.Expr{}, result, "")
+
+			// Remember if the class was side-effect free before lowering
+			if result.canBeRemovedIfUnused {
+				for _, classStmt := range classStmts {
+					if s2, ok := classStmt.Data.(*js_ast.SExpr); ok {
+						s2.IsFromClassOrFnThatCanBeRemovedIfUnused = true
+					}
+				}
+			}
+
+			stmts = append(stmts, classStmts...)
+
+		default:
+			panic("Internal error")
+		}
+
+		// Use a more friendly name than "default" now that "--keep-names" has
+		// been applied and has made sure to enforce the name "default"
+		if p.symbols[s.DefaultName.Ref.InnerIndex].OriginalName == "default" {
+			p.symbols[s.DefaultName.Ref.InnerIndex].OriginalName = p.source.IdentifierName + "_default"
+		}
+
+		return stmts
+
+	case *js_ast.SExportEquals:
+		// "module.exports = value"
+		stmts = append(stmts, js_ast.AssignStmt(
+			js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EDot{
+				Target:  js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: p.moduleRef}},
+				Name:    "exports",
+				NameLoc: stmt.Loc,
+			}},
+			p.visitExpr(s.Value),
+		))
+		p.recordUsage(p.moduleRef)
+		return stmts
+
+	case *js_ast.SBreak:
+		if s.Label != nil {
+			name := p.loadNameFromRef(s.Label.Ref)
+			s.Label.Ref, _, _ = p.findLabelSymbol(s.Label.Loc, name)
+		} else if !p.fnOrArrowDataVisit.isInsideLoop && !p.fnOrArrowDataVisit.isInsideSwitch {
+			r := js_lexer.RangeOfIdentifier(p.source, stmt.Loc)
+			p.log.AddError(&p.tracker, r, "Cannot use \"break\" here:")
+		}
+
+	case *js_ast.SContinue:
+		if s.Label != nil {
+			name := p.loadNameFromRef(s.Label.Ref)
+			var isLoop, ok bool
+			s.Label.Ref, isLoop, ok = p.findLabelSymbol(s.Label.Loc, name)
+			if ok && !isLoop {
+				r := js_lexer.RangeOfIdentifier(p.source, s.Label.Loc)
+				p.log.AddError(&p.tracker, r, fmt.Sprintf("Cannot continue to label \"%s\"", name))
+			}
+		} else if !p.fnOrArrowDataVisit.isInsideLoop {
+			r := js_lexer.RangeOfIdentifier(p.source, stmt.Loc)
+			p.log.AddError(&p.tracker, r, "Cannot use \"continue\" here:")
+		}
+
+	case *js_ast.SLabel:
+		// Forbid functions inside labels in strict mode
+		if p.isStrictMode() {
+			if _, ok := s.Stmt.Data.(*js_ast.SFunction); ok {
+				p.markStrictModeFeature(labelFunctionStmt, js_lexer.RangeOfIdentifier(p.source, s.Stmt.Loc), "")
+			}
+		}
+
+		p.pushScopeForVisitPass(js_ast.ScopeLabel, stmt.Loc)
+		name := p.loadNameFromRef(s.Name.Ref)
+		if js_lexer.StrictModeReservedWords[name] {
+			p.markStrictModeFeature(reservedWord, js_lexer.RangeOfIdentifier(p.source, s.Name.Loc), name)
+		}
+		ref := p.newSymbol(ast.SymbolLabel, name)
+		s.Name.Ref = ref
+
+		// Duplicate labels are an error
+		for scope := p.currentScope.Parent; scope != nil; scope = scope.Parent {
+			if scope.Label.Ref != ast.InvalidRef && name == p.symbols[scope.Label.Ref.InnerIndex].OriginalName {
+				p.log.AddErrorWithNotes(&p.tracker, js_lexer.RangeOfIdentifier(p.source, s.Name.Loc),
+					fmt.Sprintf("Duplicate label %q", name),
+					[]logger.MsgData{p.tracker.MsgData(js_lexer.RangeOfIdentifier(p.source, scope.Label.Loc),
+						fmt.Sprintf("The original label %q is here:", name))})
+				break
+			}
+			if scope.Kind == js_ast.ScopeFunctionBody {
+				// Labels are only visible within the function they are defined in.
+				break
+			}
+		}
+
+		p.currentScope.Label = ast.LocRef{Loc: s.Name.Loc, Ref: ref}
+		switch s.Stmt.Data.(type) {
+		case *js_ast.SFor, *js_ast.SForIn, *js_ast.SForOf, *js_ast.SWhile, *js_ast.SDoWhile:
+			p.currentScope.LabelStmtIsLoop = true
+		}
+
+		// If we're dropping this statement, consider control flow to be dead
+		_, shouldDropLabel := p.dropLabelsMap[name]
+		old := p.isControlFlowDead
+		if shouldDropLabel {
+			p.isControlFlowDead = true
+		}
+
+		s.Stmt = p.visitSingleStmt(s.Stmt, stmtsNormal)
+		p.popScope()
+
+		// Drop this entire statement if requested
+		if shouldDropLabel {
+			p.isControlFlowDead = old
+			return stmts
+		}
+
+		if p.options.minifySyntax {
+			// Optimize "x: break x" which some people apparently write by hand
+			if child, ok := s.Stmt.Data.(*js_ast.SBreak); ok && child.Label != nil && child.Label.Ref == s.Name.Ref {
+				return stmts
+			}
+
+			// Remove the label if it's not necessary
+			if p.symbols[ref.InnerIndex].UseCountEstimate == 0 {
+				return appendIfOrLabelBodyPreservingScope(stmts, s.Stmt)
+			}
+		}
+
+		// Handle "for await" that has been lowered by moving this label inside the "try"
+		if try, ok := s.Stmt.Data.(*js_ast.STry); ok && len(try.Block.Stmts) > 0 {
+			if _, ok := try.Block.Stmts[0].Data.(*js_ast.SFor); ok {
+				try.Block.Stmts[0] = js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SLabel{
+					Stmt:             try.Block.Stmts[0],
+					Name:             s.Name,
+					IsSingleLineStmt: s.IsSingleLineStmt,
+				}}
+				return append(stmts, s.Stmt)
+			}
+		}
+
+	case *js_ast.SLocal:
+		// Silently remove unsupported top-level "await" in dead code branches
+		if s.Kind == js_ast.LocalAwaitUsing && p.fnOrArrowDataVisit.isOutsideFnOrArrow {
+			if p.isControlFlowDead && (p.options.unsupportedJSFeatures.Has(compat.TopLevelAwait) || !p.options.outputFormat.KeepESMImportExportSyntax()) {
+				s.Kind = js_ast.LocalUsing
+			} else {
+				p.liveTopLevelAwaitKeyword = logger.Range{Loc: stmt.Loc, Len: 5}
+				p.markSyntaxFeature(compat.TopLevelAwait, logger.Range{Loc: stmt.Loc, Len: 5})
+			}
+		}
+
+		// Local statements do not end the const local prefix
+		p.currentScope.IsAfterConstLocalPrefix = wasAfterAfterConstLocalPrefix
+
+		for i := range s.Decls {
+			d := &s.Decls[i]
+			p.visitBinding(d.Binding, bindingOpts{})
+
+			// Visit the initializer
+			if d.ValueOrNil.Data != nil {
+				// Fold numeric constants in the initializer
+				oldShouldFoldTypeScriptConstantExpressions := p.shouldFoldTypeScriptConstantExpressions
+				p.shouldFoldTypeScriptConstantExpressions = p.options.minifySyntax && !p.currentScope.IsAfterConstLocalPrefix
+
+				// Propagate the name to keep from the binding into the initializer
+				if id, ok := d.Binding.Data.(*js_ast.BIdentifier); ok {
+					p.nameToKeep = p.symbols[id.Ref.InnerIndex].OriginalName
+					p.nameToKeepIsFor = d.ValueOrNil.Data
+				}
+
+				d.ValueOrNil = p.visitExpr(d.ValueOrNil)
+
+				p.shouldFoldTypeScriptConstantExpressions = oldShouldFoldTypeScriptConstantExpressions
+
+				// Initializing to undefined is implicit, but be careful to not
+				// accidentally cause a syntax error or behavior change by removing
+				// the value
+				//
+				// Good:
+				//   "let a = undefined;" => "let a;"
+				//
+				// Bad (a syntax error):
+				//   "let {} = undefined;" => "let {};"
+				//
+				// Bad (a behavior change):
+				//   "a = 123; var a = undefined;" => "a = 123; var a;"
+				//
+				if p.options.minifySyntax && s.Kind == js_ast.LocalLet {
+					if _, ok := d.Binding.Data.(*js_ast.BIdentifier); ok {
+						if _, ok := d.ValueOrNil.Data.(*js_ast.EUndefined); ok {
+							d.ValueOrNil = js_ast.Expr{}
+						}
+					}
+				}
+
+				// Yarn's PnP data may be stored in a variable: https://github.com/yarnpkg/berry/pull/4320
+				if p.options.decodeHydrateRuntimeStateYarnPnP {
+					if str, ok := d.ValueOrNil.Data.(*js_ast.EString); ok {
+						if id, ok := d.Binding.Data.(*js_ast.BIdentifier); ok {
+							if p.stringLocalsForYarnPnP == nil {
+								p.stringLocalsForYarnPnP = make(map[ast.Ref]stringLocalForYarnPnP)
+							}
+							p.stringLocalsForYarnPnP[id.Ref] = stringLocalForYarnPnP{value: str.Value, loc: d.ValueOrNil.Loc}
+						}
+					}
+				}
+			}
+
+			// Attempt to continue the const local prefix
+			if p.options.minifySyntax && !p.currentScope.IsAfterConstLocalPrefix {
+				if id, ok := d.Binding.Data.(*js_ast.BIdentifier); ok {
+					if s.Kind == js_ast.LocalConst && d.ValueOrNil.Data != nil {
+						if value := js_ast.ExprToConstValue(d.ValueOrNil); value.Kind != js_ast.ConstValueNone {
+							if p.constValues == nil {
+								p.constValues = make(map[ast.Ref]js_ast.ConstValue)
+							}
+							p.constValues[id.Ref] = value
+							continue
+						}
+					}
+
+					if d.ValueOrNil.Data != nil && !isSafeForConstLocalPrefix(d.ValueOrNil) {
+						p.currentScope.IsAfterConstLocalPrefix = true
+					}
+				} else {
+					// A non-identifier binding ends the const local prefix
+					p.currentScope.IsAfterConstLocalPrefix = true
+				}
+			}
+		}
+
+		// Handle being exported inside a namespace
+		if s.IsExport && p.enclosingNamespaceArgRef != nil {
+			wrapIdentifier := func(loc logger.Loc, ref ast.Ref) js_ast.Expr {
+				p.recordUsage(*p.enclosingNamespaceArgRef)
+				return js_ast.Expr{Loc: loc, Data: p.dotOrMangledPropVisit(
+					js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: *p.enclosingNamespaceArgRef}},
+					p.symbols[ref.InnerIndex].OriginalName,
+					loc,
+				)}
+			}
+			for _, decl := range s.Decls {
+				if decl.ValueOrNil.Data != nil {
+					target := js_ast.ConvertBindingToExpr(decl.Binding, wrapIdentifier)
+					if result, ok := p.lowerAssign(target, decl.ValueOrNil, objRestReturnValueIsUnused); ok {
+						target = result
+					} else {
+						target = js_ast.Assign(target, decl.ValueOrNil)
+					}
+					stmts = append(stmts, js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SExpr{Value: target}})
+				}
+			}
+			return stmts
+		}
+
+		s.Decls = p.lowerObjectRestInDecls(s.Decls)
+
+		// Optimization: Avoid unnecessary "using" machinery by changing ones
+		// initialized to "null" or "undefined" into a normal variable. Note that
+		// "await using" still needs the "await", so we can't do it for those.
+		if p.options.minifySyntax && s.Kind == js_ast.LocalUsing {
+			s.Kind = js_ast.LocalConst
+			for _, decl := range s.Decls {
+				if t := js_ast.KnownPrimitiveType(decl.ValueOrNil.Data); t != js_ast.PrimitiveNull && t != js_ast.PrimitiveUndefined {
+					s.Kind = js_ast.LocalUsing
+					break
+				}
+			}
+		}
+
+		s.Kind = p.selectLocalKind(s.Kind)
+
+		// Potentially relocate "var" declarations to the top level
+		if s.Kind == js_ast.LocalVar {
+			if assign, ok := p.maybeRelocateVarsToTopLevel(s.Decls, relocateVarsNormal); ok {
+				if assign.Data != nil {
+					stmts = append(stmts, assign)
+				}
+				return stmts
+			}
+		}
+
+	case *js_ast.SExpr:
+		shouldTrimUnsightlyPrimitives := !p.options.minifySyntax && !isUnsightlyPrimitive(s.Value.Data)
+		p.stmtExprValue = s.Value.Data
+		s.Value = p.visitExpr(s.Value)
+
+		// Expressions that have been simplified down to a single primitive don't
+		// have any effect, and are automatically removed during minification.
+		// However, some people are really bothered by seeing them. Remove them
+		// so we don't bother these people.
+		if shouldTrimUnsightlyPrimitives && isUnsightlyPrimitive(s.Value.Data) {
+			return stmts
+		}
+
+		// Trim expressions without side effects
+		if p.options.minifySyntax {
+			s.Value = p.astHelpers.SimplifyUnusedExpr(s.Value, p.options.unsupportedJSFeatures)
+			if s.Value.Data == nil {
+				return stmts
+			}
+		}
+
+	case *js_ast.SThrow:
+		s.Value = p.visitExpr(s.Value)
+
+	case *js_ast.SReturn:
+		// Forbid top-level return inside modules with ECMAScript syntax
+		if p.fnOrArrowDataVisit.isOutsideFnOrArrow {
+			if p.isFileConsideredESM {
+				_, notes := p.whyESModule()
+				p.log.AddErrorWithNotes(&p.tracker, js_lexer.RangeOfIdentifier(p.source, stmt.Loc),
+					"Top-level return cannot be used inside an ECMAScript module", notes)
+			} else {
+				p.hasTopLevelReturn = true
+			}
+		}
+
+		if s.ValueOrNil.Data != nil {
+			s.ValueOrNil = p.visitExpr(s.ValueOrNil)
+
+			// Returning undefined is implicit except when inside an async generator
+			// function, where "return undefined" behaves like "return await undefined"
+			// but just "return" has no "await".
+			if p.options.minifySyntax && (!p.fnOrArrowDataVisit.isAsync || !p.fnOrArrowDataVisit.isGenerator) {
+				if _, ok := s.ValueOrNil.Data.(*js_ast.EUndefined); ok {
+					s.ValueOrNil = js_ast.Expr{}
+				}
+			}
+		}
+
+	case *js_ast.SBlock:
+		p.pushScopeForVisitPass(js_ast.ScopeBlock, stmt.Loc)
+
+		// Pass the "is loop body" status on to the direct children of a block used
+		// as a loop body. This is used to enable optimizations specific to the
+		// topmost scope in a loop body block.
+		if p.loopBody == s {
+			s.Stmts = p.visitStmts(s.Stmts, stmtsLoopBody)
+		} else {
+			s.Stmts = p.visitStmts(s.Stmts, stmtsNormal)
+		}
+
+		p.popScope()
+
+		if p.options.minifySyntax {
+			if len(s.Stmts) == 1 && !statementCaresAboutScope(s.Stmts[0]) {
+				// Unwrap blocks containing a single statement
+				stmt = s.Stmts[0]
+			} else if len(s.Stmts) == 0 {
+				// Trim empty blocks
+				stmt = js_ast.Stmt{Loc: stmt.Loc, Data: js_ast.SEmptyShared}
+			}
+		}
+
+	case *js_ast.SWith:
+		p.markStrictModeFeature(withStatement, js_lexer.RangeOfIdentifier(p.source, stmt.Loc), "")
+		s.Value = p.visitExpr(s.Value)
+		p.pushScopeForVisitPass(js_ast.ScopeWith, s.BodyLoc)
+		s.Body = p.visitSingleStmt(s.Body, stmtsNormal)
+		p.popScope()
+
+	case *js_ast.SWhile:
+		s.Test = p.visitExpr(s.Test)
+		s.Body = p.visitLoopBody(s.Body)
+
+		if p.options.minifySyntax {
+			s.Test = p.astHelpers.SimplifyBooleanExpr(s.Test)
+
+			// A true value is implied
+			testOrNil := s.Test
+			if boolean, sideEffects, ok := js_ast.ToBooleanWithSideEffects(s.Test.Data); ok && boolean && sideEffects == js_ast.NoSideEffects {
+				testOrNil = js_ast.Expr{}
+			}
+
+			// "while (a) {}" => "for (;a;) {}"
+			forS := &js_ast.SFor{TestOrNil: testOrNil, Body: s.Body, IsSingleLineBody: s.IsSingleLineBody}
+			mangleFor(forS)
+			stmt = js_ast.Stmt{Loc: stmt.Loc, Data: forS}
+		}
+
+	case *js_ast.SDoWhile:
+		s.Body = p.visitLoopBody(s.Body)
+		s.Test = p.visitExpr(s.Test)
+
+		if p.options.minifySyntax {
+			s.Test = p.astHelpers.SimplifyBooleanExpr(s.Test)
+		}
+
+	case *js_ast.SIf:
+		s.Test = p.visitExpr(s.Test)
+
+		if p.options.minifySyntax {
+			s.Test = p.astHelpers.SimplifyBooleanExpr(s.Test)
+		}
+
+		// Fold constants
+		boolean, _, ok := js_ast.ToBooleanWithSideEffects(s.Test.Data)
+
+		// Mark the control flow as dead if the branch is never taken
+		if ok && !boolean {
+			old := p.isControlFlowDead
+			p.isControlFlowDead = true
+			s.Yes = p.visitSingleStmt(s.Yes, stmtsNormal)
+			p.isControlFlowDead = old
+		} else {
+			s.Yes = p.visitSingleStmt(s.Yes, stmtsNormal)
+		}
+
+		// The "else" clause is optional
+		if s.NoOrNil.Data != nil {
+			// Mark the control flow as dead if the branch is never taken
+			if ok && boolean {
+				old := p.isControlFlowDead
+				p.isControlFlowDead = true
+				s.NoOrNil = p.visitSingleStmt(s.NoOrNil, stmtsNormal)
+				p.isControlFlowDead = old
+			} else {
+				s.NoOrNil = p.visitSingleStmt(s.NoOrNil, stmtsNormal)
+			}
+
+			// Trim unnecessary "else" clauses
+			if p.options.minifySyntax {
+				if _, ok := s.NoOrNil.Data.(*js_ast.SEmpty); ok {
+					s.NoOrNil = js_ast.Stmt{}
+				}
+			}
+		}
+
+		if p.options.minifySyntax {
+			return p.mangleIf(stmts, stmt.Loc, s)
+		}
+
+	case *js_ast.SFor:
+		p.pushScopeForVisitPass(js_ast.ScopeBlock, stmt.Loc)
+		if s.InitOrNil.Data != nil {
+			p.visitForLoopInit(s.InitOrNil, false)
+		}
+
+		if s.TestOrNil.Data != nil {
+			s.TestOrNil = p.visitExpr(s.TestOrNil)
+
+			if p.options.minifySyntax {
+				s.TestOrNil = p.astHelpers.SimplifyBooleanExpr(s.TestOrNil)
+
+				// A true value is implied
+				if boolean, sideEffects, ok := js_ast.ToBooleanWithSideEffects(s.TestOrNil.Data); ok && boolean && sideEffects == js_ast.NoSideEffects {
+					s.TestOrNil = js_ast.Expr{}
+				}
+			}
+		}
+
+		if s.UpdateOrNil.Data != nil {
+			s.UpdateOrNil = p.visitExpr(s.UpdateOrNil)
+		}
+		s.Body = p.visitLoopBody(s.Body)
+
+		// Potentially relocate "var" declarations to the top level. Note that this
+		// must be done inside the scope of the for loop or they won't be relocated.
+		if s.InitOrNil.Data != nil {
+			if init, ok := s.InitOrNil.Data.(*js_ast.SLocal); ok && init.Kind == js_ast.LocalVar {
+				if assign, ok := p.maybeRelocateVarsToTopLevel(init.Decls, relocateVarsNormal); ok {
+					if assign.Data != nil {
+						s.InitOrNil = assign
+					} else {
+						s.InitOrNil = js_ast.Stmt{}
+					}
+				}
+			}
+		}
+
+		p.popScope()
+
+		if p.options.minifySyntax {
+			mangleFor(s)
+		}
+
+	case *js_ast.SForIn:
+		p.pushScopeForVisitPass(js_ast.ScopeBlock, stmt.Loc)
+		p.visitForLoopInit(s.Init, true)
+		s.Value = p.visitExpr(s.Value)
+		s.Body = p.visitLoopBody(s.Body)
+
+		// Check for a variable initializer
+		if local, ok := s.Init.Data.(*js_ast.SLocal); ok && local.Kind == js_ast.LocalVar && len(local.Decls) == 1 {
+			decl := &local.Decls[0]
+			if id, ok := decl.Binding.Data.(*js_ast.BIdentifier); ok && decl.ValueOrNil.Data != nil {
+				p.markStrictModeFeature(forInVarInit, p.source.RangeOfOperatorBefore(decl.ValueOrNil.Loc, "="), "")
+
+				// Lower for-in variable initializers in case the output is used in strict mode
+				stmts = append(stmts, js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SExpr{Value: js_ast.Assign(
+					js_ast.Expr{Loc: decl.Binding.Loc, Data: &js_ast.EIdentifier{Ref: id.Ref}},
+					decl.ValueOrNil,
+				)}})
+				decl.ValueOrNil = js_ast.Expr{}
+			}
+		}
+
+		// Potentially relocate "var" declarations to the top level. Note that this
+		// must be done inside the scope of the for loop or they won't be relocated.
+		if init, ok := s.Init.Data.(*js_ast.SLocal); ok && init.Kind == js_ast.LocalVar {
+			if replacement, ok := p.maybeRelocateVarsToTopLevel(init.Decls, relocateVarsForInOrForOf); ok {
+				s.Init = replacement
+			}
+		}
+
+		p.popScope()
+
+		p.lowerObjectRestInForLoopInit(s.Init, &s.Body)
+
+	case *js_ast.SForOf:
+		// Silently remove unsupported top-level "await" in dead code branches
+		if s.Await.Len > 0 && p.fnOrArrowDataVisit.isOutsideFnOrArrow {
+			if p.isControlFlowDead && (p.options.unsupportedJSFeatures.Has(compat.TopLevelAwait) || !p.options.outputFormat.KeepESMImportExportSyntax()) {
+				s.Await = logger.Range{}
+			} else {
+				p.liveTopLevelAwaitKeyword = s.Await
+				p.markSyntaxFeature(compat.TopLevelAwait, s.Await)
+			}
+		}
+
+		p.pushScopeForVisitPass(js_ast.ScopeBlock, stmt.Loc)
+		p.visitForLoopInit(s.Init, true)
+		s.Value = p.visitExpr(s.Value)
+		s.Body = p.visitLoopBody(s.Body)
+
+		// Potentially relocate "var" declarations to the top level. Note that this
+		// must be done inside the scope of the for loop or they won't be relocated.
+		if init, ok := s.Init.Data.(*js_ast.SLocal); ok && init.Kind == js_ast.LocalVar {
+			if replacement, ok := p.maybeRelocateVarsToTopLevel(init.Decls, relocateVarsForInOrForOf); ok {
+				s.Init = replacement
+			}
+		}
+
+		// Handle "for (using x of y)" and "for (await using x of y)"
+		if local, ok := s.Init.Data.(*js_ast.SLocal); ok {
+			if local.Kind == js_ast.LocalUsing && p.options.unsupportedJSFeatures.Has(compat.Using) {
+				p.lowerUsingDeclarationInForOf(s.Init.Loc, local, &s.Body)
+			} else if local.Kind == js_ast.LocalAwaitUsing {
+				if p.fnOrArrowDataVisit.isOutsideFnOrArrow {
+					if p.isControlFlowDead && (p.options.unsupportedJSFeatures.Has(compat.TopLevelAwait) || !p.options.outputFormat.KeepESMImportExportSyntax()) {
+						// Silently remove unsupported top-level "await" in dead code branches
+						local.Kind = js_ast.LocalUsing
+					} else {
+						p.liveTopLevelAwaitKeyword = logger.Range{Loc: s.Init.Loc, Len: 5}
+						p.markSyntaxFeature(compat.TopLevelAwait, p.liveTopLevelAwaitKeyword)
+					}
+					if p.options.unsupportedJSFeatures.Has(compat.Using) {
+						p.lowerUsingDeclarationInForOf(s.Init.Loc, local, &s.Body)
+					}
+				} else if p.options.unsupportedJSFeatures.Has(compat.Using) || p.options.unsupportedJSFeatures.Has(compat.AsyncAwait) ||
+					(p.options.unsupportedJSFeatures.Has(compat.AsyncGenerator) && p.fnOrArrowDataVisit.isGenerator) {
+					p.lowerUsingDeclarationInForOf(s.Init.Loc, local, &s.Body)
+				}
+			}
+		}
+
+		p.popScope()
+
+		p.lowerObjectRestInForLoopInit(s.Init, &s.Body)
+
+		// Lower "for await" if it's unsupported if it's in a lowered async generator
+		if s.Await.Len > 0 && (p.options.unsupportedJSFeatures.Has(compat.ForAwait) ||
+			(p.options.unsupportedJSFeatures.Has(compat.AsyncGenerator) && p.fnOrArrowDataVisit.isGenerator)) {
+			return p.lowerForAwaitLoop(stmt.Loc, s, stmts)
+		}
+
+	case *js_ast.STry:
+		p.pushScopeForVisitPass(js_ast.ScopeBlock, stmt.Loc)
+		if p.fnOrArrowDataVisit.tryBodyCount == 0 {
+			if s.Catch != nil {
+				p.fnOrArrowDataVisit.tryCatchLoc = s.Catch.Loc
+			} else {
+				p.fnOrArrowDataVisit.tryCatchLoc = stmt.Loc
+			}
+		}
+		p.fnOrArrowDataVisit.tryBodyCount++
+		s.Block.Stmts = p.visitStmts(s.Block.Stmts, stmtsNormal)
+		p.fnOrArrowDataVisit.tryBodyCount--
+		p.popScope()
+
+		if s.Catch != nil {
+			p.pushScopeForVisitPass(js_ast.ScopeCatchBinding, s.Catch.Loc)
+			if s.Catch.BindingOrNil.Data != nil {
+				p.visitBinding(s.Catch.BindingOrNil, bindingOpts{})
+			}
+
+			p.pushScopeForVisitPass(js_ast.ScopeBlock, s.Catch.BlockLoc)
+			s.Catch.Block.Stmts = p.visitStmts(s.Catch.Block.Stmts, stmtsNormal)
+			p.popScope()
+
+			p.lowerObjectRestInCatchBinding(s.Catch)
+			p.popScope()
+		}
+
+		if s.Finally != nil {
+			p.pushScopeForVisitPass(js_ast.ScopeBlock, s.Finally.Loc)
+			s.Finally.Block.Stmts = p.visitStmts(s.Finally.Block.Stmts, stmtsNormal)
+			p.popScope()
+		}
+
+	case *js_ast.SSwitch:
+		s.Test = p.visitExpr(s.Test)
+		p.pushScopeForVisitPass(js_ast.ScopeBlock, s.BodyLoc)
+		oldIsInsideSwitch := p.fnOrArrowDataVisit.isInsideSwitch
+		p.fnOrArrowDataVisit.isInsideSwitch = true
+		for i, c := range s.Cases {
+			if c.ValueOrNil.Data != nil {
+				c.ValueOrNil = p.visitExpr(c.ValueOrNil)
+				p.warnAboutEqualityCheck("case", c.ValueOrNil, c.ValueOrNil.Loc)
+				p.warnAboutTypeofAndString(s.Test, c.ValueOrNil, onlyCheckOriginalOrder)
+			}
+			c.Body = p.visitStmts(c.Body, stmtsSwitch)
+
+			// Make sure the assignment to the body above is preserved
+			s.Cases[i] = c
+		}
+		p.fnOrArrowDataVisit.isInsideSwitch = oldIsInsideSwitch
+		p.popScope()
+
+		// Check for duplicate case values
+		p.duplicateCaseChecker.reset()
+		for _, c := range s.Cases {
+			if c.ValueOrNil.Data != nil {
+				p.duplicateCaseChecker.check(p, c.ValueOrNil)
+			}
+		}
+
+		// Unwrap switch statements in dead code
+		if p.options.minifySyntax && p.isControlFlowDead {
+			for _, c := range s.Cases {
+				stmts = append(stmts, c.Body...)
+			}
+			return stmts
+		}
+
+		// "using" declarations inside switch statements must be special-cased
+		if lowered := p.maybeLowerUsingDeclarationsInSwitch(stmt.Loc, s); lowered != nil {
+			return append(stmts, lowered...)
+		}
+
+	case *js_ast.SFunction:
+		p.visitFn(&s.Fn, s.Fn.OpenParenLoc, visitFnOpts{})
+
+		// Strip this function declaration if it was overwritten
+		if p.symbols[s.Fn.Name.Ref.InnerIndex].Flags.Has(ast.RemoveOverwrittenFunctionDeclaration) && !s.IsExport {
+			return stmts
+		}
+
+		if p.options.minifySyntax && !s.Fn.IsGenerator && !s.Fn.IsAsync && !s.Fn.HasRestArg && s.Fn.Name != nil {
+			if len(s.Fn.Body.Block.Stmts) == 0 {
+				// Mark if this function is an empty function
+				hasSideEffectFreeArguments := true
+				for _, arg := range s.Fn.Args {
+					if _, ok := arg.Binding.Data.(*js_ast.BIdentifier); !ok {
+						hasSideEffectFreeArguments = false
+						break
+					}
+				}
+				if hasSideEffectFreeArguments {
+					p.symbols[s.Fn.Name.Ref.InnerIndex].Flags |= ast.IsEmptyFunction
+				}
+			} else if len(s.Fn.Args) == 1 && len(s.Fn.Body.Block.Stmts) == 1 {
+				// Mark if this function is an identity function
+				if arg := s.Fn.Args[0]; arg.DefaultOrNil.Data == nil {
+					if id, ok := arg.Binding.Data.(*js_ast.BIdentifier); ok {
+						if ret, ok := s.Fn.Body.Block.Stmts[0].Data.(*js_ast.SReturn); ok {
+							if retID, ok := ret.ValueOrNil.Data.(*js_ast.EIdentifier); ok && id.Ref == retID.Ref {
+								p.symbols[s.Fn.Name.Ref.InnerIndex].Flags |= ast.IsIdentityFunction
+							}
+						}
+					}
+				}
+			}
+		}
+
+		// Handle exporting this function from a namespace
+		if s.IsExport && p.enclosingNamespaceArgRef != nil {
+			s.IsExport = false
+			stmts = append(stmts, stmt, js_ast.AssignStmt(
+				js_ast.Expr{Loc: stmt.Loc, Data: p.dotOrMangledPropVisit(
+					js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: *p.enclosingNamespaceArgRef}},
+					p.symbols[s.Fn.Name.Ref.InnerIndex].OriginalName,
+					s.Fn.Name.Loc,
+				)},
+				js_ast.Expr{Loc: s.Fn.Name.Loc, Data: &js_ast.EIdentifier{Ref: s.Fn.Name.Ref}},
+			))
+		} else {
+			stmts = append(stmts, stmt)
+		}
+
+		// Optionally preserve the name
+		if p.options.keepNames {
+			symbol := &p.symbols[s.Fn.Name.Ref.InnerIndex]
+			symbol.Flags |= ast.DidKeepName
+			fn := js_ast.Expr{Loc: s.Fn.Name.Loc, Data: &js_ast.EIdentifier{Ref: s.Fn.Name.Ref}}
+			stmts = append(stmts, p.keepClassOrFnSymbolName(s.Fn.Name.Loc, fn, symbol.OriginalName))
+		}
+		return stmts
+
+	case *js_ast.SClass:
+		result := p.visitClass(stmt.Loc, &s.Class, ast.InvalidRef, "")
+
+		// Remove the export flag inside a namespace
+		var nameToExport string
+		wasExportInsideNamespace := s.IsExport && p.enclosingNamespaceArgRef != nil
+		if wasExportInsideNamespace {
+			nameToExport = p.symbols[s.Class.Name.Ref.InnerIndex].OriginalName
+			s.IsExport = false
+		}
+
+		// Lower class field syntax for browsers that don't support it
+		classStmts, _ := p.lowerClass(stmt, js_ast.Expr{}, result, "")
+
+		// Remember if the class was side-effect free before lowering
+		if result.canBeRemovedIfUnused {
+			for _, classStmt := range classStmts {
+				if s2, ok := classStmt.Data.(*js_ast.SExpr); ok {
+					s2.IsFromClassOrFnThatCanBeRemovedIfUnused = true
+				}
+			}
+		}
+
+		stmts = append(stmts, classStmts...)
+
+		// Handle exporting this class from a namespace
+		if wasExportInsideNamespace {
+			stmts = append(stmts, js_ast.AssignStmt(
+				js_ast.Expr{Loc: stmt.Loc, Data: p.dotOrMangledPropVisit(
+					js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: *p.enclosingNamespaceArgRef}},
+					nameToExport,
+					s.Class.Name.Loc,
+				)},
+				js_ast.Expr{Loc: s.Class.Name.Loc, Data: &js_ast.EIdentifier{Ref: s.Class.Name.Ref}},
+			))
+		}
+
+		return stmts
+
+	case *js_ast.SEnum:
+		// Do not end the const local prefix after TypeScript enums. We process
+		// them first within their scope so that they are inlined into all code in
+		// that scope. We don't want that to cause the const local prefix to end.
+		p.currentScope.IsAfterConstLocalPrefix = wasAfterAfterConstLocalPrefix
+
+		// Track cross-module enum constants during bundling
+		var tsTopLevelEnumValues map[string]js_ast.TSEnumValue
+		if p.currentScope == p.moduleScope && p.options.mode == config.ModeBundle {
+			tsTopLevelEnumValues = make(map[string]js_ast.TSEnumValue)
+		}
+
+		p.recordDeclaredSymbol(s.Name.Ref)
+		p.pushScopeForVisitPass(js_ast.ScopeEntry, stmt.Loc)
+		p.recordDeclaredSymbol(s.Arg)
+
+		// Scan ahead for any variables inside this namespace. This must be done
+		// ahead of time before visiting any statements inside the namespace
+		// because we may end up visiting the uses before the declarations.
+		// We need to convert the uses into property accesses on the namespace.
+		for _, value := range s.Values {
+			if value.Ref != ast.InvalidRef {
+				p.isExportedInsideNamespace[value.Ref] = s.Arg
+			}
+		}
+
+		// Values without initializers are initialized to one more than the
+		// previous value if the previous value is numeric. Otherwise values
+		// without initializers are initialized to undefined.
+		nextNumericValue := float64(0)
+		hasNumericValue := true
+		valueExprs := []js_ast.Expr{}
+		allValuesArePure := true
+
+		// Update the exported members of this enum as we constant fold each one
+		exportedMembers := p.currentScope.TSNamespace.ExportedMembers
+
+		// We normally don't fold numeric constants because they might increase code
+		// size, but it's important to fold numeric constants inside enums since
+		// that's what the TypeScript compiler does.
+		oldShouldFoldTypeScriptConstantExpressions := p.shouldFoldTypeScriptConstantExpressions
+		p.shouldFoldTypeScriptConstantExpressions = true
+
+		// Create an assignment for each enum value
+		for _, value := range s.Values {
+			name := helpers.UTF16ToString(value.Name)
+			var assignTarget js_ast.Expr
+			hasStringValue := false
+
+			if value.ValueOrNil.Data != nil {
+				value.ValueOrNil = p.visitExpr(value.ValueOrNil)
+				hasNumericValue = false
+
+				// "See through" any wrapped comments
+				underlyingValue := value.ValueOrNil
+				if inlined, ok := value.ValueOrNil.Data.(*js_ast.EInlinedEnum); ok {
+					underlyingValue = inlined.Value
+				}
+
+				switch e := underlyingValue.Data.(type) {
+				case *js_ast.ENumber:
+					if tsTopLevelEnumValues != nil {
+						tsTopLevelEnumValues[name] = js_ast.TSEnumValue{Number: e.Value}
+					}
+					member := exportedMembers[name]
+					member.Data = &js_ast.TSNamespaceMemberEnumNumber{Value: e.Value}
+					exportedMembers[name] = member
+					p.refToTSNamespaceMemberData[value.Ref] = member.Data
+					hasNumericValue = true
+					nextNumericValue = e.Value + 1
+
+				case *js_ast.EString:
+					if tsTopLevelEnumValues != nil {
+						tsTopLevelEnumValues[name] = js_ast.TSEnumValue{String: e.Value}
+					}
+					member := exportedMembers[name]
+					member.Data = &js_ast.TSNamespaceMemberEnumString{Value: e.Value}
+					exportedMembers[name] = member
+					p.refToTSNamespaceMemberData[value.Ref] = member.Data
+					hasStringValue = true
+
+				default:
+					if js_ast.KnownPrimitiveType(underlyingValue.Data) == js_ast.PrimitiveString {
+						hasStringValue = true
+					}
+					if !p.astHelpers.ExprCanBeRemovedIfUnused(underlyingValue) {
+						allValuesArePure = false
+					}
+				}
+			} else if hasNumericValue {
+				if tsTopLevelEnumValues != nil {
+					tsTopLevelEnumValues[name] = js_ast.TSEnumValue{Number: nextNumericValue}
+				}
+				member := exportedMembers[name]
+				member.Data = &js_ast.TSNamespaceMemberEnumNumber{Value: nextNumericValue}
+				exportedMembers[name] = member
+				p.refToTSNamespaceMemberData[value.Ref] = member.Data
+				value.ValueOrNil = js_ast.Expr{Loc: value.Loc, Data: &js_ast.ENumber{Value: nextNumericValue}}
+				nextNumericValue++
+			} else {
+				value.ValueOrNil = js_ast.Expr{Loc: value.Loc, Data: js_ast.EUndefinedShared}
+			}
+
+			if p.options.minifySyntax && js_ast.IsIdentifier(name) {
+				// "Enum.Name = value"
+				assignTarget = js_ast.Assign(
+					js_ast.Expr{Loc: value.Loc, Data: &js_ast.EDot{
+						Target:  js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIdentifier{Ref: s.Arg}},
+						Name:    name,
+						NameLoc: value.Loc,
+					}},
+					value.ValueOrNil,
+				)
+			} else {
+				// "Enum['Name'] = value"
+				assignTarget = js_ast.Assign(
+					js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIndex{
+						Target: js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIdentifier{Ref: s.Arg}},
+						Index:  js_ast.Expr{Loc: value.Loc, Data: &js_ast.EString{Value: value.Name}},
+					}},
+					value.ValueOrNil,
+				)
+			}
+			p.recordUsage(s.Arg)
+
+			// String-valued enums do not form a two-way map
+			if hasStringValue {
+				valueExprs = append(valueExprs, assignTarget)
+			} else {
+				// "Enum[assignTarget] = 'Name'"
+				valueExprs = append(valueExprs, js_ast.Assign(
+					js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIndex{
+						Target: js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIdentifier{Ref: s.Arg}},
+						Index:  assignTarget,
+					}},
+					js_ast.Expr{Loc: value.Loc, Data: &js_ast.EString{Value: value.Name}},
+				))
+				p.recordUsage(s.Arg)
+			}
+		}
+
+		p.popScope()
+		p.shouldFoldTypeScriptConstantExpressions = oldShouldFoldTypeScriptConstantExpressions
+
+		// Track all exported top-level enums for cross-module inlining
+		if tsTopLevelEnumValues != nil {
+			if p.tsEnums == nil {
+				p.tsEnums = make(map[ast.Ref]map[string]js_ast.TSEnumValue)
+			}
+			p.tsEnums[s.Name.Ref] = tsTopLevelEnumValues
+		}
+
+		// Wrap this enum definition in a closure
+		stmts = p.generateClosureForTypeScriptEnum(
+			stmts, stmt.Loc, s.IsExport, s.Name.Loc, s.Name.Ref, s.Arg, valueExprs, allValuesArePure)
+		return stmts
+
+	case *js_ast.SNamespace:
+		p.recordDeclaredSymbol(s.Name.Ref)
+
+		// Scan ahead for any variables inside this namespace. This must be done
+		// ahead of time before visiting any statements inside the namespace
+		// because we may end up visiting the uses before the declarations.
+		// We need to convert the uses into property accesses on the namespace.
+		for _, childStmt := range s.Stmts {
+			if local, ok := childStmt.Data.(*js_ast.SLocal); ok {
+				if local.IsExport {
+					js_ast.ForEachIdentifierBindingInDecls(local.Decls, func(loc logger.Loc, b *js_ast.BIdentifier) {
+						p.isExportedInsideNamespace[b.Ref] = s.Arg
+					})
+				}
+			}
+		}
+
+		oldEnclosingNamespaceArgRef := p.enclosingNamespaceArgRef
+		p.enclosingNamespaceArgRef = &s.Arg
+		p.pushScopeForVisitPass(js_ast.ScopeEntry, stmt.Loc)
+		p.recordDeclaredSymbol(s.Arg)
+		stmtsInsideNamespace := p.visitStmtsAndPrependTempRefs(s.Stmts, prependTempRefsOpts{kind: stmtsFnBody})
+		p.popScope()
+		p.enclosingNamespaceArgRef = oldEnclosingNamespaceArgRef
+
+		// Generate a closure for this namespace
+		stmts = p.generateClosureForTypeScriptNamespaceOrEnum(
+			stmts, stmt.Loc, s.IsExport, s.Name.Loc, s.Name.Ref, s.Arg, stmtsInsideNamespace)
+		return stmts
+
+	default:
+		panic("Internal error")
+	}
+
+	stmts = append(stmts, stmt)
+	return stmts
+}
+
+func isUnsightlyPrimitive(data js_ast.E) bool {
+	switch data.(type) {
+	case *js_ast.EBoolean, *js_ast.ENull, *js_ast.EUndefined, *js_ast.ENumber, *js_ast.EBigInt, *js_ast.EString:
+		return true
+	}
+	return false
+}
+
+// If we encounter a variable initializer that could possibly trigger access to
+// a constant declared later on, then we need to end the const local prefix.
+// We want to avoid situations like this:
+//
+//	const x = y; // This is supposed to throw due to TDZ
+//	const y = 1;
+//
+// or this:
+//
+//	const x = 1;
+//	const y = foo(); // This is supposed to throw due to TDZ
+//	const z = 2;
+//	const foo = () => z;
+//
+// But a situation like this is ok:
+//
+//	const x = 1;
+//	const y = [() => x + z];
+//	const z = 2;
+func isSafeForConstLocalPrefix(expr js_ast.Expr) bool {
+	switch e := expr.Data.(type) {
+	case *js_ast.EMissing, *js_ast.EString, *js_ast.ERegExp, *js_ast.EBigInt, *js_ast.EFunction, *js_ast.EArrow:
+		return true
+
+	case *js_ast.EArray:
+		for _, item := range e.Items {
+			if !isSafeForConstLocalPrefix(item) {
+				return false
+			}
+		}
+		return true
+
+	case *js_ast.EObject:
+		// For now just allow "{}" and forbid everything else
+		return len(e.Properties) == 0
+	}
+
+	return false
+}
+
+type relocateVarsMode uint8
+
+const (
+	relocateVarsNormal relocateVarsMode = iota
+	relocateVarsForInOrForOf
+)
+
+// If we are currently in a hoisted child of the module scope, relocate these
+// declarations to the top level and return an equivalent assignment statement.
+// Make sure to check that the declaration kind is "var" before calling this.
+// And make sure to check that the returned statement is not the zero value.
+//
+// This is done to make it easier to traverse top-level declarations in the linker
+// during bundling. Now it is sufficient to just scan the top-level statements
+// instead of having to traverse recursively into the statement tree.
+func (p *parser) maybeRelocateVarsToTopLevel(decls []js_ast.Decl, mode relocateVarsMode) (js_ast.Stmt, bool) {
+	// Only do this when bundling, and not when the scope is already top-level
+	if p.options.mode != config.ModeBundle || p.currentScope == p.moduleScope {
+		return js_ast.Stmt{}, false
+	}
+
+	// Only do this if we're not inside a function
+	scope := p.currentScope
+	for !scope.Kind.StopsHoisting() {
+		scope = scope.Parent
+	}
+	if scope != p.moduleScope {
+		return js_ast.Stmt{}, false
+	}
+
+	// Convert the declarations to assignments
+	wrapIdentifier := func(loc logger.Loc, ref ast.Ref) js_ast.Expr {
+		p.relocatedTopLevelVars = append(p.relocatedTopLevelVars, ast.LocRef{Loc: loc, Ref: ref})
+		p.recordUsage(ref)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+	}
+	var value js_ast.Expr
+	for _, decl := range decls {
+		binding := js_ast.ConvertBindingToExpr(decl.Binding, wrapIdentifier)
+		if decl.ValueOrNil.Data != nil {
+			value = js_ast.JoinWithComma(value, js_ast.Assign(binding, decl.ValueOrNil))
+		} else if mode == relocateVarsForInOrForOf {
+			value = js_ast.JoinWithComma(value, binding)
+		}
+	}
+	if value.Data == nil {
+		// If none of the variables had any initializers, just remove the declarations
+		return js_ast.Stmt{}, true
+	}
+	return js_ast.Stmt{Loc: value.Loc, Data: &js_ast.SExpr{Value: value}}, true
+}
+
+func (p *parser) markExprAsParenthesized(value js_ast.Expr, openParenLoc logger.Loc, isAsync bool) {
+	// Don't lose comments due to parentheses. For example, we don't want to lose
+	// the comment here:
+	//
+	//   ( /* comment */ (foo) );
+	//
+	if !isAsync {
+		if comments, ok := p.exprComments[openParenLoc]; ok {
+			delete(p.exprComments, openParenLoc)
+			p.exprComments[value.Loc] = append(comments, p.exprComments[value.Loc]...)
+		}
+	}
+
+	switch e := value.Data.(type) {
+	case *js_ast.EArray:
+		e.IsParenthesized = true
+	case *js_ast.EObject:
+		e.IsParenthesized = true
+	}
+}
+
+func (p *parser) maybeTransposeIfExprChain(expr js_ast.Expr, visit func(js_ast.Expr) js_ast.Expr) js_ast.Expr {
+	if e, ok := expr.Data.(*js_ast.EIf); ok {
+		e.Yes = p.maybeTransposeIfExprChain(e.Yes, visit)
+		e.No = p.maybeTransposeIfExprChain(e.No, visit)
+		return expr
+	}
+	return visit(expr)
+}
+
+func (p *parser) iifeCanBeRemovedIfUnused(args []js_ast.Arg, body js_ast.FnBody) bool {
+	for _, arg := range args {
+		if arg.DefaultOrNil.Data != nil && !p.astHelpers.ExprCanBeRemovedIfUnused(arg.DefaultOrNil) {
+			// The default value has a side effect
+			return false
+		}
+
+		if _, ok := arg.Binding.Data.(*js_ast.BIdentifier); !ok {
+			// Destructuring is a side effect (due to property access)
+			return false
+		}
+	}
+
+	// Check whether any statements have side effects or not. Consider return
+	// statements as not having side effects because if the IIFE can be removed
+	// then we know the return value is unused, so we know that returning the
+	// value has no side effects.
+	return p.astHelpers.StmtsCanBeRemovedIfUnused(body.Block.Stmts, js_ast.ReturnCanBeRemovedIfUnused)
+}
+
+type captureValueMode uint8
+
+const (
+	valueDefinitelyNotMutated captureValueMode = iota
+	valueCouldBeMutated
+)
+
+// This is a helper function to use when you need to capture a value that may
+// have side effects so you can use it multiple times. It guarantees that the
+// side effects take place exactly once.
+//
+// Example usage:
+//
+//	// "value" => "value + value"
+//	// "value()" => "(_a = value(), _a + _a)"
+//	valueFunc, wrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, value)
+//	return wrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+//	  Op: js_ast.BinOpAdd,
+//	  Left: valueFunc(),
+//	  Right: valueFunc(),
+//	}})
+//
+// This returns a function for generating references instead of a raw reference
+// because AST nodes are supposed to be unique in memory, not aliases of other
+// AST nodes. That way you can mutate one during lowering without having to
+// worry about messing up other nodes.
+func (p *parser) captureValueWithPossibleSideEffects(
+	loc logger.Loc, // The location to use for the generated references
+	count int, // The expected number of references to generate
+	value js_ast.Expr, // The value that might have side effects
+	mode captureValueMode, // Say if "value" might be mutated and must be captured
+) (
+	func() js_ast.Expr, // Generates reference expressions "_a"
+	func(js_ast.Expr) js_ast.Expr, // Call this on the final expression
+) {
+	wrapFunc := func(expr js_ast.Expr) js_ast.Expr {
+		// Make sure side effects still happen if no expression was generated
+		if expr.Data == nil {
+			return value
+		}
+		return expr
+	}
+
+	// Referencing certain expressions more than once has no side effects, so we
+	// can just create them inline without capturing them in a temporary variable
+	var valueFunc func() js_ast.Expr
+	switch e := value.Data.(type) {
+	case *js_ast.ENull:
+		valueFunc = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: js_ast.ENullShared} }
+	case *js_ast.EUndefined:
+		valueFunc = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: js_ast.EUndefinedShared} }
+	case *js_ast.EThis:
+		valueFunc = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: js_ast.EThisShared} }
+	case *js_ast.EBoolean:
+		valueFunc = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: e.Value}} }
+	case *js_ast.ENumber:
+		valueFunc = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: e.Value}} }
+	case *js_ast.EBigInt:
+		valueFunc = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: &js_ast.EBigInt{Value: e.Value}} }
+	case *js_ast.EString:
+		valueFunc = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: e.Value}} }
+	case *js_ast.EPrivateIdentifier:
+		valueFunc = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: &js_ast.EPrivateIdentifier{Ref: e.Ref}} }
+	case *js_ast.EIdentifier:
+		if mode == valueDefinitelyNotMutated {
+			valueFunc = func() js_ast.Expr {
+				// Make sure we record this usage in the usage count so that duplicating
+				// a single-use reference means it's no longer considered a single-use
+				// reference. Otherwise the single-use reference inlining code may
+				// incorrectly inline the initializer into the first reference, leaving
+				// the second reference without a definition.
+				p.recordUsage(e.Ref)
+				return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: e.Ref}}
+			}
+		}
+	}
+	if valueFunc != nil {
+		return valueFunc, wrapFunc
+	}
+
+	// We don't need to worry about side effects if the value won't be used
+	// multiple times. This special case lets us avoid generating a temporary
+	// reference.
+	if count < 2 {
+		return func() js_ast.Expr {
+			return value
+		}, wrapFunc
+	}
+
+	// Otherwise, fall back to generating a temporary reference
+	tempRef := ast.InvalidRef
+
+	// If we're in a function argument scope, then we won't be able to generate
+	// symbols in this scope to store stuff, since there's nowhere to put the
+	// variable declaration. We don't want to put the variable declaration
+	// outside the function since some code in the argument list may cause the
+	// function to be reentrant, and we can't put the variable declaration in
+	// the function body since that's not accessible by the argument list.
+	//
+	// Instead, we use an immediately-invoked arrow function to create a new
+	// symbol inline by introducing a new scope. Make sure to only use it for
+	// symbol declaration and still initialize the variable inline to preserve
+	// side effect order.
+	if p.currentScope.Kind == js_ast.ScopeFunctionArgs {
+		return func() js_ast.Expr {
+				if tempRef == ast.InvalidRef {
+					tempRef = p.generateTempRef(tempRefNoDeclare, "")
+
+					// Assign inline so the order of side effects remains the same
+					p.recordUsage(tempRef)
+					return js_ast.Assign(js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}}, value)
+				}
+				p.recordUsage(tempRef)
+				return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}}
+			}, func(expr js_ast.Expr) js_ast.Expr {
+				// Make sure side effects still happen if no expression was generated
+				if expr.Data == nil {
+					return value
+				}
+
+				// Generate a new variable using an arrow function to avoid messing with "this"
+				return js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+					Target: js_ast.Expr{Loc: loc, Data: &js_ast.EArrow{
+						Args:       []js_ast.Arg{{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: tempRef}}}},
+						PreferExpr: true,
+						Body:       js_ast.FnBody{Loc: loc, Block: js_ast.SBlock{Stmts: []js_ast.Stmt{{Loc: loc, Data: &js_ast.SReturn{ValueOrNil: expr}}}}},
+					}},
+				}}
+			}
+	}
+
+	return func() js_ast.Expr {
+		if tempRef == ast.InvalidRef {
+			tempRef = p.generateTempRef(tempRefNeedsDeclare, "")
+			p.recordUsage(tempRef)
+			return js_ast.Assign(js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}}, value)
+		}
+		p.recordUsage(tempRef)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}}
+	}, wrapFunc
+}
+
+func (p *parser) visitDecorators(decorators []js_ast.Decorator, decoratorScope *js_ast.Scope) []js_ast.Decorator {
+	if decorators != nil {
+		// Decorators cause us to temporarily revert to the scope that encloses the
+		// class declaration, since that's where the generated code for decorators
+		// will be inserted. I believe this currently only matters for parameter
+		// decorators, where the scope should not be within the argument list.
+		oldScope := p.currentScope
+		p.currentScope = decoratorScope
+
+		for i, decorator := range decorators {
+			decorators[i].Value = p.visitExpr(decorator.Value)
+		}
+
+		// Avoid "popScope" because this decorator scope is not hierarchical
+		p.currentScope = oldScope
+	}
+
+	return decorators
+}
+
+type visitClassResult struct {
+	bodyScope         *js_ast.Scope
+	innerClassNameRef ast.Ref
+	superCtorRef      ast.Ref
+
+	// If true, the class was determined to be safe to remove if the class is
+	// never used (i.e. the class definition is side-effect free). This is
+	// determined after visiting but before lowering since lowering may generate
+	// class mutations that cannot be automatically analyzed as side-effect free.
+	canBeRemovedIfUnused bool
+}
+
+func (p *parser) visitClass(nameScopeLoc logger.Loc, class *js_ast.Class, defaultNameRef ast.Ref, nameToKeep string) (result visitClassResult) {
+	class.Decorators = p.visitDecorators(class.Decorators, p.currentScope)
+
+	if class.Name != nil {
+		p.recordDeclaredSymbol(class.Name.Ref)
+		if p.options.keepNames {
+			nameToKeep = p.symbols[class.Name.Ref.InnerIndex].OriginalName
+		}
+	}
+
+	// Replace "this" with a reference to the class inside static field
+	// initializers if static fields are being lowered, since that relocates the
+	// field initializers outside of the class body and "this" will no longer
+	// reference the same thing.
+	classLoweringInfo := p.computeClassLoweringInfo(class)
+	recomputeClassLoweringInfo := false
+
+	// Sometimes we need to lower private members even though they are supported.
+	// This flags them for lowering so that we lower references to them as we
+	// traverse the class body.
+	//
+	// We don't need to worry about possible references to the class shadowing
+	// symbol inside the class body changing our decision to lower private members
+	// later on because that shouldn't be possible.
+	if classLoweringInfo.lowerAllStaticFields {
+		for _, prop := range class.Properties {
+			// We need to lower all private members if fields of that type are lowered,
+			// not just private fields (methods and accessors too):
+			//
+			//   class Foo {
+			//     get #foo() {}
+			//     static bar = new Foo().#foo
+			//   }
+			//
+			// We can't transform that to this:
+			//
+			//   class Foo {
+			//     get #foo() {}
+			//   }
+			//   Foo.bar = new Foo().#foo;
+			//
+			// The private getter must be lowered too.
+			if private, ok := prop.Key.Data.(*js_ast.EPrivateIdentifier); ok {
+				p.symbols[private.Ref.InnerIndex].Flags |= ast.PrivateSymbolMustBeLowered
+				recomputeClassLoweringInfo = true
+			}
+		}
+	}
+
+	// Conservatively lower all private names that have been used in a private
+	// brand check anywhere in the file. See the comment on this map for details.
+	if p.lowerAllOfThesePrivateNames != nil {
+		for _, prop := range class.Properties {
+			if private, ok := prop.Key.Data.(*js_ast.EPrivateIdentifier); ok {
+				if symbol := &p.symbols[private.Ref.InnerIndex]; p.lowerAllOfThesePrivateNames[symbol.OriginalName] {
+					symbol.Flags |= ast.PrivateSymbolMustBeLowered
+					recomputeClassLoweringInfo = true
+				}
+			}
+		}
+	}
+
+	// If we changed private symbol lowering decisions, then recompute class
+	// lowering info because that may have changed other decisions too
+	if recomputeClassLoweringInfo {
+		classLoweringInfo = p.computeClassLoweringInfo(class)
+	}
+
+	p.pushScopeForVisitPass(js_ast.ScopeClassName, nameScopeLoc)
+	oldEnclosingClassKeyword := p.enclosingClassKeyword
+	p.enclosingClassKeyword = class.ClassKeyword
+	p.currentScope.RecursiveSetStrictMode(js_ast.ImplicitStrictModeClass)
+	if class.Name != nil {
+		p.validateDeclaredSymbolName(class.Name.Loc, p.symbols[class.Name.Ref.InnerIndex].OriginalName)
+	}
+
+	// Create the "__super" symbol if necessary. This will cause us to replace
+	// all "super()" call expressions with a call to this symbol, which will
+	// then be inserted into the "constructor" method.
+	result.superCtorRef = ast.InvalidRef
+	if classLoweringInfo.shimSuperCtorCalls {
+		result.superCtorRef = p.newSymbol(ast.SymbolOther, "__super")
+		p.currentScope.Generated = append(p.currentScope.Generated, result.superCtorRef)
+		p.recordDeclaredSymbol(result.superCtorRef)
+	}
+	oldSuperCtorRef := p.superCtorRef
+	p.superCtorRef = result.superCtorRef
+
+	// Insert an immutable inner name that spans the whole class to match
+	// JavaScript's semantics specifically the "CreateImmutableBinding" here:
+	// https://262.ecma-international.org/6.0/#sec-runtime-semantics-classdefinitionevaluation
+	// The class body (and extends clause) "captures" the original value of the
+	// class name. This matters for class statements because the symbol can be
+	// re-assigned to something else later. The captured values must be the
+	// original value of the name, not the re-assigned value. Use "const" for
+	// this symbol to match JavaScript run-time semantics. You are not allowed
+	// to assign to this symbol (it throws a TypeError).
+	if class.Name != nil {
+		name := p.symbols[class.Name.Ref.InnerIndex].OriginalName
+		result.innerClassNameRef = p.newSymbol(ast.SymbolConst, "_"+name)
+		p.currentScope.Members[name] = js_ast.ScopeMember{Loc: class.Name.Loc, Ref: result.innerClassNameRef}
+	} else {
+		name := "_this"
+		if defaultNameRef != ast.InvalidRef {
+			name = "_" + p.source.IdentifierName + "_default"
+		}
+		result.innerClassNameRef = p.newSymbol(ast.SymbolConst, name)
+	}
+	p.recordDeclaredSymbol(result.innerClassNameRef)
+
+	if class.ExtendsOrNil.Data != nil {
+		class.ExtendsOrNil = p.visitExpr(class.ExtendsOrNil)
+	}
+
+	// A scope is needed for private identifiers
+	p.pushScopeForVisitPass(js_ast.ScopeClassBody, class.BodyLoc)
+	result.bodyScope = p.currentScope
+
+	for i := range class.Properties {
+		property := &class.Properties[i]
+
+		if property.Kind == js_ast.PropertyClassStaticBlock {
+			oldFnOrArrowData := p.fnOrArrowDataVisit
+			oldFnOnlyDataVisit := p.fnOnlyDataVisit
+
+			p.fnOrArrowDataVisit = fnOrArrowDataVisit{}
+			p.fnOnlyDataVisit = fnOnlyDataVisit{
+				isThisNested:           true,
+				isNewTargetAllowed:     true,
+				isInStaticClassContext: true,
+				innerClassNameRef:      &result.innerClassNameRef,
+			}
+
+			if classLoweringInfo.lowerAllStaticFields {
+				// Need to lower "this" and "super" since they won't be valid outside the class body
+				p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef = true
+				p.fnOrArrowDataVisit.shouldLowerSuperPropertyAccess = true
+			}
+
+			p.pushScopeForVisitPass(js_ast.ScopeClassStaticInit, property.ClassStaticBlock.Loc)
+
+			// Make it an error to use "arguments" in a static class block
+			p.currentScope.ForbidArguments = true
+
+			property.ClassStaticBlock.Block.Stmts = p.visitStmts(property.ClassStaticBlock.Block.Stmts, stmtsFnBody)
+			p.popScope()
+
+			p.fnOrArrowDataVisit = oldFnOrArrowData
+			p.fnOnlyDataVisit = oldFnOnlyDataVisit
+			continue
+		}
+
+		property.Decorators = p.visitDecorators(property.Decorators, result.bodyScope)
+
+		// Visit the property key
+		if private, ok := property.Key.Data.(*js_ast.EPrivateIdentifier); ok {
+			// Special-case private identifiers here
+			p.recordDeclaredSymbol(private.Ref)
+		} else {
+			// It's forbidden to reference the class name in a computed key
+			if property.Flags.Has(js_ast.PropertyIsComputed) && class.Name != nil {
+				p.symbols[result.innerClassNameRef.InnerIndex].Kind = ast.SymbolClassInComputedPropertyKey
+			}
+
+			key, _ := p.visitExprInOut(property.Key, exprIn{
+				shouldMangleStringsAsProps: true,
+			})
+			property.Key = key
+
+			// Re-allow using the class name after visiting a computed key
+			if property.Flags.Has(js_ast.PropertyIsComputed) && class.Name != nil {
+				p.symbols[result.innerClassNameRef.InnerIndex].Kind = ast.SymbolConst
+			}
+
+			if p.options.minifySyntax {
+				if inlined, ok := key.Data.(*js_ast.EInlinedEnum); ok {
+					switch inlined.Value.Data.(type) {
+					case *js_ast.EString, *js_ast.ENumber:
+						key.Data = inlined.Value.Data
+						property.Key.Data = key.Data
+					}
+				}
+				switch k := key.Data.(type) {
+				case *js_ast.ENumber, *js_ast.ENameOfSymbol:
+					// "class { [123] }" => "class { 123 }"
+					property.Flags &= ^js_ast.PropertyIsComputed
+				case *js_ast.EString:
+					if numberValue, ok := js_ast.StringToEquivalentNumberValue(k.Value); ok && numberValue >= 0 {
+						// "class { '123' }" => "class { 123 }"
+						property.Key.Data = &js_ast.ENumber{Value: numberValue}
+						property.Flags &= ^js_ast.PropertyIsComputed
+					} else if property.Flags.Has(js_ast.PropertyIsComputed) {
+						// "class {['x'] = y}" => "class {'x' = y}"
+						isInvalidConstructor := false
+						if helpers.UTF16EqualsString(k.Value, "constructor") {
+							if !property.Kind.IsMethodDefinition() {
+								// "constructor" is an invalid name for both instance and static fields
+								isInvalidConstructor = true
+							} else if !property.Flags.Has(js_ast.PropertyIsStatic) {
+								// Calling an instance method "constructor" is problematic so avoid that too
+								isInvalidConstructor = true
+							}
+						}
+
+						// A static property must not be called "prototype"
+						isInvalidPrototype := property.Flags.Has(js_ast.PropertyIsStatic) && helpers.UTF16EqualsString(k.Value, "prototype")
+
+						if !isInvalidConstructor && !isInvalidPrototype {
+							property.Flags &= ^js_ast.PropertyIsComputed
+						}
+					}
+				}
+			}
+		}
+
+		// Make it an error to use "arguments" in a class body
+		p.currentScope.ForbidArguments = true
+
+		// The value of "this" and "super" is shadowed inside property values
+		oldFnOnlyDataVisit := p.fnOnlyDataVisit
+		oldShouldLowerSuperPropertyAccess := p.fnOrArrowDataVisit.shouldLowerSuperPropertyAccess
+		p.fnOrArrowDataVisit.shouldLowerSuperPropertyAccess = false
+		p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef = false
+		p.fnOnlyDataVisit.isThisNested = true
+		p.fnOnlyDataVisit.isNewTargetAllowed = true
+		p.fnOnlyDataVisit.isInStaticClassContext = property.Flags.Has(js_ast.PropertyIsStatic)
+		p.fnOnlyDataVisit.innerClassNameRef = &result.innerClassNameRef
+
+		// We need to explicitly assign the name to the property initializer if it
+		// will be transformed such that it is no longer an inline initializer.
+		nameToKeep := ""
+		isLoweredPrivateMethod := false
+		if private, ok := property.Key.Data.(*js_ast.EPrivateIdentifier); ok {
+			if !property.Kind.IsMethodDefinition() || p.privateSymbolNeedsToBeLowered(private) {
+				nameToKeep = p.symbols[private.Ref.InnerIndex].OriginalName
+			}
+
+			// Lowered private methods (both instance and static) are initialized
+			// outside of the class body, so we must rewrite "super" property
+			// accesses inside them. Lowered private instance fields are initialized
+			// inside the constructor where "super" is valid, so those don't need to
+			// be rewritten.
+			if property.Kind.IsMethodDefinition() && p.privateSymbolNeedsToBeLowered(private) {
+				isLoweredPrivateMethod = true
+			}
+		} else if !property.Kind.IsMethodDefinition() && !property.Flags.Has(js_ast.PropertyIsComputed) {
+			if str, ok := property.Key.Data.(*js_ast.EString); ok {
+				nameToKeep = helpers.UTF16ToString(str.Value)
+			}
+		}
+
+		// Handle methods
+		if property.ValueOrNil.Data != nil {
+			p.propMethodDecoratorScope = result.bodyScope
+
+			// Propagate the name to keep from the method into the initializer
+			if nameToKeep != "" {
+				p.nameToKeep = nameToKeep
+				p.nameToKeepIsFor = property.ValueOrNil.Data
+			}
+
+			// Propagate whether we're in a derived class constructor
+			if class.ExtendsOrNil.Data != nil && !property.Flags.Has(js_ast.PropertyIsComputed) {
+				if str, ok := property.Key.Data.(*js_ast.EString); ok && helpers.UTF16EqualsString(str.Value, "constructor") {
+					p.propDerivedCtorValue = property.ValueOrNil.Data
+				}
+			}
+
+			property.ValueOrNil, _ = p.visitExprInOut(property.ValueOrNil, exprIn{
+				isMethod:               true,
+				isLoweredPrivateMethod: isLoweredPrivateMethod,
+			})
+		}
+
+		// Handle initialized fields
+		if property.InitializerOrNil.Data != nil {
+			if property.Flags.Has(js_ast.PropertyIsStatic) && classLoweringInfo.lowerAllStaticFields {
+				// Need to lower "this" and "super" since they won't be valid outside the class body
+				p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef = true
+				p.fnOrArrowDataVisit.shouldLowerSuperPropertyAccess = true
+			}
+
+			// Propagate the name to keep from the field into the initializer
+			if nameToKeep != "" {
+				p.nameToKeep = nameToKeep
+				p.nameToKeepIsFor = property.InitializerOrNil.Data
+			}
+
+			property.InitializerOrNil = p.visitExpr(property.InitializerOrNil)
+		}
+
+		// Restore "this" so it will take the inherited value in property keys
+		p.fnOnlyDataVisit = oldFnOnlyDataVisit
+		p.fnOrArrowDataVisit.shouldLowerSuperPropertyAccess = oldShouldLowerSuperPropertyAccess
+
+		// Restore the ability to use "arguments" in decorators and computed properties
+		p.currentScope.ForbidArguments = false
+	}
+
+	// Check for and warn about duplicate keys in class bodies
+	if !p.suppressWarningsAboutWeirdCode {
+		p.warnAboutDuplicateProperties(class.Properties, duplicatePropertiesInClass)
+	}
+
+	// Analyze side effects before adding the name keeping call
+	result.canBeRemovedIfUnused = p.astHelpers.ClassCanBeRemovedIfUnused(*class)
+
+	// Implement name keeping using a static block at the start of the class body
+	if p.options.keepNames && nameToKeep != "" {
+		propertyPreventsKeepNames := false
+		for _, prop := range class.Properties {
+			// A static property called "name" shadows the automatically-generated name
+			if prop.Flags.Has(js_ast.PropertyIsStatic) {
+				if str, ok := prop.Key.Data.(*js_ast.EString); ok && helpers.UTF16EqualsString(str.Value, "name") {
+					propertyPreventsKeepNames = true
+					break
+				}
+			}
+		}
+		if !propertyPreventsKeepNames {
+			var this js_ast.Expr
+			if classLoweringInfo.lowerAllStaticFields {
+				p.recordUsage(result.innerClassNameRef)
+				this = js_ast.Expr{Loc: class.BodyLoc, Data: &js_ast.EIdentifier{Ref: result.innerClassNameRef}}
+			} else {
+				this = js_ast.Expr{Loc: class.BodyLoc, Data: js_ast.EThisShared}
+			}
+			properties := make([]js_ast.Property, 0, 1+len(class.Properties))
+			properties = append(properties, js_ast.Property{
+				Kind: js_ast.PropertyClassStaticBlock,
+				ClassStaticBlock: &js_ast.ClassStaticBlock{Loc: class.BodyLoc, Block: js_ast.SBlock{Stmts: []js_ast.Stmt{
+					p.keepClassOrFnSymbolName(class.BodyLoc, this, nameToKeep),
+				}}},
+			})
+			class.Properties = append(properties, class.Properties...)
+		}
+	}
+
+	p.enclosingClassKeyword = oldEnclosingClassKeyword
+	p.superCtorRef = oldSuperCtorRef
+	p.popScope()
+
+	if p.symbols[result.innerClassNameRef.InnerIndex].UseCountEstimate == 0 {
+		// Don't generate a shadowing name if one isn't needed
+		result.innerClassNameRef = ast.InvalidRef
+	} else if class.Name == nil {
+		// If there was originally no class name but something inside needed one
+		// (e.g. there was a static property initializer that referenced "this"),
+		// populate the class name. If this is an "export default class" statement,
+		// use the existing default name so that things will work as expected if
+		// this is turned into a regular class statement later on.
+		classNameRef := defaultNameRef
+		if classNameRef == ast.InvalidRef {
+			classNameRef = p.newSymbol(ast.SymbolOther, "_this")
+			p.currentScope.Generated = append(p.currentScope.Generated, classNameRef)
+			p.recordDeclaredSymbol(classNameRef)
+		}
+		class.Name = &ast.LocRef{Loc: nameScopeLoc, Ref: classNameRef}
+	}
+
+	p.popScope()
+
+	// Sanity check that the class lowering info hasn't changed before and after
+	// visiting. The class transform relies on this because lowering assumes that
+	// must be able to expect that visiting has done certain things.
+	if classLoweringInfo != p.computeClassLoweringInfo(class) {
+		panic("Internal error")
+	}
+
+	return
+}
+
+func isSimpleParameterList(args []js_ast.Arg, hasRestArg bool) bool {
+	if hasRestArg {
+		return false
+	}
+	for _, arg := range args {
+		if _, ok := arg.Binding.Data.(*js_ast.BIdentifier); !ok || arg.DefaultOrNil.Data != nil {
+			return false
+		}
+	}
+	return true
+}
+
+func fnBodyContainsUseStrict(body []js_ast.Stmt) (logger.Loc, bool) {
+	for _, stmt := range body {
+		switch s := stmt.Data.(type) {
+		case *js_ast.SComment:
+			continue
+		case *js_ast.SDirective:
+			if helpers.UTF16EqualsString(s.Value, "use strict") {
+				return stmt.Loc, true
+			}
+		default:
+			return logger.Loc{}, false
+		}
+	}
+	return logger.Loc{}, false
+}
+
+type visitArgsOpts struct {
+	body           []js_ast.Stmt
+	decoratorScope *js_ast.Scope
+	hasRestArg     bool
+
+	// This is true if the function is an arrow function or a method
+	isUniqueFormalParameters bool
+}
+
+func (p *parser) visitArgs(args []js_ast.Arg, opts visitArgsOpts) {
+	var duplicateArgCheck map[string]logger.Range
+	useStrictLoc, hasUseStrict := fnBodyContainsUseStrict(opts.body)
+	hasSimpleArgs := isSimpleParameterList(args, opts.hasRestArg)
+
+	// Section 15.2.1 Static Semantics: Early Errors: "It is a Syntax Error if
+	// FunctionBodyContainsUseStrict of FunctionBody is true and
+	// IsSimpleParameterList of FormalParameters is false."
+	if hasUseStrict && !hasSimpleArgs {
+		p.log.AddError(&p.tracker, p.source.RangeOfString(useStrictLoc),
+			"Cannot use a \"use strict\" directive in a function with a non-simple parameter list")
+	}
+
+	// Section 15.1.1 Static Semantics: Early Errors: "Multiple occurrences of
+	// the same BindingIdentifier in a FormalParameterList is only allowed for
+	// functions which have simple parameter lists and which are not defined in
+	// strict mode code."
+	if opts.isUniqueFormalParameters || hasUseStrict || !hasSimpleArgs || p.isStrictMode() {
+		duplicateArgCheck = make(map[string]logger.Range)
+	}
+
+	for i := range args {
+		arg := &args[i]
+		arg.Decorators = p.visitDecorators(arg.Decorators, opts.decoratorScope)
+		p.visitBinding(arg.Binding, bindingOpts{
+			duplicateArgCheck: duplicateArgCheck,
+		})
+		if arg.DefaultOrNil.Data != nil {
+			arg.DefaultOrNil = p.visitExpr(arg.DefaultOrNil)
+		}
+	}
+}
+
+func (p *parser) isDotOrIndexDefineMatch(expr js_ast.Expr, parts []string) bool {
+	switch e := expr.Data.(type) {
+	case *js_ast.EDot:
+		if len(parts) > 1 {
+			// Intermediates must be dot expressions
+			last := len(parts) - 1
+			return parts[last] == e.Name && p.isDotOrIndexDefineMatch(e.Target, parts[:last])
+		}
+
+	case *js_ast.EIndex:
+		if len(parts) > 1 {
+			if str, ok := e.Index.Data.(*js_ast.EString); ok {
+				// Intermediates must be dot expressions
+				last := len(parts) - 1
+				return parts[last] == helpers.UTF16ToString(str.Value) && p.isDotOrIndexDefineMatch(e.Target, parts[:last])
+			}
+		}
+
+	case *js_ast.EThis:
+		// Allow matching on top-level "this"
+		if !p.fnOnlyDataVisit.isThisNested {
+			return len(parts) == 1 && parts[0] == "this"
+		}
+
+	case *js_ast.EImportMeta:
+		// Allow matching on "import.meta"
+		return len(parts) == 2 && parts[0] == "import" && parts[1] == "meta"
+
+	case *js_ast.EIdentifier:
+		// The last expression must be an identifier
+		if len(parts) == 1 {
+			// The name must match
+			name := p.loadNameFromRef(e.Ref)
+			if name != parts[0] {
+				return false
+			}
+
+			result := p.findSymbol(expr.Loc, name)
+
+			// The "findSymbol" function also marks this symbol as used. But that's
+			// never what we want here because we're just peeking to see what kind of
+			// symbol it is to see if it's a match. If it's not a match, it will be
+			// re-resolved again later and marked as used there. So we don't want to
+			// mark it as used twice.
+			p.ignoreUsage(result.ref)
+
+			// We must not be in a "with" statement scope
+			if result.isInsideWithScope {
+				return false
+			}
+
+			// The last symbol must be unbound or injected
+			return p.symbols[result.ref.InnerIndex].Kind.IsUnboundOrInjected()
+		}
+	}
+
+	return false
+}
+
+func (p *parser) instantiateDefineExpr(loc logger.Loc, expr config.DefineExpr, opts identifierOpts) js_ast.Expr {
+	if expr.Constant != nil {
+		return js_ast.Expr{Loc: loc, Data: expr.Constant}
+	}
+
+	if expr.InjectedDefineIndex.IsValid() {
+		ref := p.injectedDefineSymbols[expr.InjectedDefineIndex.GetIndex()]
+		p.recordUsage(ref)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+	}
+
+	parts := expr.Parts
+	if len(parts) == 0 {
+		return js_ast.Expr{}
+	}
+
+	// Check both user-specified defines and known globals
+	if opts.matchAgainstDefines {
+		// Make sure define resolution is not recursive
+		opts.matchAgainstDefines = false
+
+		// Substitute user-specified defines
+		if defines, ok := p.options.defines.DotDefines[parts[len(parts)-1]]; ok {
+			for _, define := range defines {
+				if define.Data.DefineExpr != nil && helpers.StringArraysEqual(define.Parts, parts) {
+					return p.instantiateDefineExpr(loc, *define.Data.DefineExpr, opts)
+				}
+			}
+		}
+	}
+
+	// Check injected dot names
+	if names, ok := p.injectedDotNames[parts[len(parts)-1]]; ok {
+		for _, name := range names {
+			if helpers.StringArraysEqual(name.parts, parts) {
+				return p.instantiateInjectDotName(loc, name, opts.assignTarget)
+			}
+		}
+	}
+
+	// Generate an identifier for the first part
+	var value js_ast.Expr
+	firstPart := parts[0]
+	parts = parts[1:]
+	switch firstPart {
+	case "NaN":
+		value = js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: math.NaN()}}
+
+	case "Infinity":
+		value = js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: math.Inf(1)}}
+
+	case "null":
+		value = js_ast.Expr{Loc: loc, Data: js_ast.ENullShared}
+
+	case "undefined":
+		value = js_ast.Expr{Loc: loc, Data: js_ast.EUndefinedShared}
+
+	case "this":
+		if thisValue, ok := p.valueForThis(loc, false /* shouldLog */, js_ast.AssignTargetNone, false, false); ok {
+			value = thisValue
+		} else {
+			value = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+		}
+
+	default:
+		if firstPart == "import" && len(parts) > 0 && parts[0] == "meta" {
+			if importMeta, ok := p.valueForImportMeta(loc); ok {
+				value = importMeta
+			} else {
+				value = js_ast.Expr{Loc: loc, Data: &js_ast.EImportMeta{}}
+			}
+			parts = parts[1:]
+			break
+		}
+
+		result := p.findSymbol(loc, firstPart)
+		value = p.handleIdentifier(loc, &js_ast.EIdentifier{
+			Ref:                   result.ref,
+			MustKeepDueToWithStmt: result.isInsideWithScope,
+
+			// Enable tree shaking
+			CanBeRemovedIfUnused: true,
+		}, opts)
+	}
+
+	// Build up a chain of property access expressions for subsequent parts
+	for _, part := range parts {
+		if expr, ok := p.maybeRewritePropertyAccess(loc, js_ast.AssignTargetNone, false, value, part, loc, false, false, false); ok {
+			value = expr
+		} else if p.isMangledProp(part) {
+			value = js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
+				Target: value,
+				Index:  js_ast.Expr{Loc: loc, Data: &js_ast.ENameOfSymbol{Ref: p.symbolForMangledProp(part)}},
+			}}
+		} else {
+			value = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+				Target:  value,
+				Name:    part,
+				NameLoc: loc,
+
+				// Enable tree shaking
+				CanBeRemovedIfUnused: true,
+			}}
+		}
+	}
+
+	return value
+}
+
+func (p *parser) instantiateInjectDotName(loc logger.Loc, name injectedDotName, assignTarget js_ast.AssignTarget) js_ast.Expr {
+	// Note: We don't need to "ignoreRef" on the underlying identifier
+	// because we have only parsed it but not visited it yet
+	ref := p.injectedDefineSymbols[name.injectedDefineIndex]
+	p.recordUsage(ref)
+
+	if assignTarget != js_ast.AssignTargetNone {
+		if where, ok := p.injectedSymbolSources[ref]; ok {
+			r := js_lexer.RangeOfIdentifier(p.source, loc)
+			tracker := logger.MakeLineColumnTracker(&where.source)
+			joined := strings.Join(name.parts, ".")
+			p.log.AddErrorWithNotes(&p.tracker, r,
+				fmt.Sprintf("Cannot assign to %q because it's an import from an injected file", joined),
+				[]logger.MsgData{tracker.MsgData(js_lexer.RangeOfIdentifier(where.source, where.loc),
+					fmt.Sprintf("The symbol %q was exported from %q here:", joined, where.source.PrettyPath))})
+		}
+	}
+
+	return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+}
+
+func (p *parser) checkForUnrepresentableIdentifier(loc logger.Loc, name string) {
+	if p.options.asciiOnly && p.options.unsupportedJSFeatures.Has(compat.UnicodeEscapes) &&
+		helpers.ContainsNonBMPCodePoint(name) {
+		if p.unrepresentableIdentifiers == nil {
+			p.unrepresentableIdentifiers = make(map[string]bool)
+		}
+		if !p.unrepresentableIdentifiers[name] {
+			p.unrepresentableIdentifiers[name] = true
+			where := config.PrettyPrintTargetEnvironment(p.options.originalTargetEnv, p.options.unsupportedJSFeatureOverridesMask)
+			r := js_lexer.RangeOfIdentifier(p.source, loc)
+			p.log.AddError(&p.tracker, r, fmt.Sprintf("%q cannot be escaped in %s but you "+
+				"can set the charset to \"utf8\" to allow unescaped Unicode characters", name, where))
+		}
+	}
+}
+
+type typeofStringOrder uint8
+
+const (
+	onlyCheckOriginalOrder typeofStringOrder = iota
+	checkBothOrders
+)
+
+func (p *parser) warnAboutTypeofAndString(a js_ast.Expr, b js_ast.Expr, order typeofStringOrder) {
+	if order == checkBothOrders {
+		if _, ok := a.Data.(*js_ast.EString); ok {
+			a, b = b, a
+		}
+	}
+
+	if typeof, ok := a.Data.(*js_ast.EUnary); ok && typeof.Op == js_ast.UnOpTypeof {
+		if str, ok := b.Data.(*js_ast.EString); ok {
+			value := helpers.UTF16ToString(str.Value)
+			switch value {
+			case "undefined", "object", "boolean", "number", "bigint", "string", "symbol", "function", "unknown":
+			default:
+				// Warn about typeof comparisons with values that will never be
+				// returned. Here's an example of code with this problem:
+				// https://github.com/olifolkerd/tabulator/issues/2962
+				r := p.source.RangeOfString(b.Loc)
+				text := fmt.Sprintf("The \"typeof\" operator will never evaluate to %q", value)
+				kind := logger.Warning
+				if p.suppressWarningsAboutWeirdCode {
+					kind = logger.Debug
+				}
+				var notes []logger.MsgData
+				if value == "null" {
+					notes = append(notes, logger.MsgData{
+						Text: "The expression \"typeof x\" actually evaluates to \"object\" in JavaScript, not \"null\". " +
+							"You need to use \"x === null\" to test for null.",
+					})
+				}
+				p.log.AddIDWithNotes(logger.MsgID_JS_ImpossibleTypeof, kind, &p.tracker, r, text, notes)
+			}
+		}
+	}
+}
+
+func (p *parser) warnAboutEqualityCheck(op string, value js_ast.Expr, afterOpLoc logger.Loc) bool {
+	switch e := value.Data.(type) {
+	case *js_ast.ENumber:
+		// "0 === -0" is true in JavaScript. Here's an example of code with this
+		// problem: https://github.com/mrdoob/three.js/pull/11183
+		if e.Value == 0 && math.Signbit(e.Value) {
+			r := logger.Range{Loc: value.Loc, Len: 0}
+			if int(r.Loc.Start) < len(p.source.Contents) && p.source.Contents[r.Loc.Start] == '-' {
+				zeroRange := p.source.RangeOfNumber(logger.Loc{Start: r.Loc.Start + 1})
+				r.Len = zeroRange.Len + 1
+			}
+			text := fmt.Sprintf("Comparison with -0 using the %q operator will also match 0", op)
+			if op == "case" {
+				text = "Comparison with -0 using a case clause will also match 0"
+			}
+			kind := logger.Warning
+			if p.suppressWarningsAboutWeirdCode {
+				kind = logger.Debug
+			}
+			p.log.AddIDWithNotes(logger.MsgID_JS_EqualsNegativeZero, kind, &p.tracker, r, text,
+				[]logger.MsgData{{Text: "Floating-point equality is defined such that 0 and -0 are equal, so \"x === -0\" returns true for both 0 and -0. " +
+					"You need to use \"Object.is(x, -0)\" instead to test for -0."}})
+			return true
+		}
+
+		// "NaN === NaN" is false in JavaScript
+		if math.IsNaN(e.Value) {
+			text := fmt.Sprintf("Comparison with NaN using the %q operator here is always %v", op, op[0] == '!')
+			if op == "case" {
+				text = "This case clause will never be evaluated because equality with NaN is always false"
+			}
+			r := p.source.RangeOfOperatorBefore(afterOpLoc, op)
+			kind := logger.Warning
+			if p.suppressWarningsAboutWeirdCode {
+				kind = logger.Debug
+			}
+			p.log.AddIDWithNotes(logger.MsgID_JS_EqualsNaN, kind, &p.tracker, r, text,
+				[]logger.MsgData{{Text: "Floating-point equality is defined such that NaN is never equal to anything, so \"x === NaN\" always returns false. " +
+					"You need to use \"Number.isNaN(x)\" instead to test for NaN."}})
+			return true
+		}
+
+	case *js_ast.EArray, *js_ast.EArrow, *js_ast.EClass,
+		*js_ast.EFunction, *js_ast.EObject, *js_ast.ERegExp:
+		// This warning only applies to strict equality because loose equality can
+		// cause string conversions. For example, "x == []" is true if x is the
+		// empty string. Here's an example of code with this problem:
+		// https://github.com/aws/aws-sdk-js/issues/3325
+		if len(op) > 2 {
+			text := fmt.Sprintf("Comparison using the %q operator here is always %v", op, op[0] == '!')
+			if op == "case" {
+				text = "This case clause will never be evaluated because the comparison is always false"
+			}
+			r := p.source.RangeOfOperatorBefore(afterOpLoc, op)
+			kind := logger.Warning
+			if p.suppressWarningsAboutWeirdCode {
+				kind = logger.Debug
+			}
+			p.log.AddIDWithNotes(logger.MsgID_JS_EqualsNewObject, kind, &p.tracker, r, text,
+				[]logger.MsgData{{Text: "Equality with a new object is always false in JavaScript because the equality operator tests object identity. " +
+					"You need to write code to compare the contents of the object instead. " +
+					"For example, use \"Array.isArray(x) && x.length === 0\" instead of \"x === []\" to test for an empty array."}})
+			return true
+		}
+	}
+
+	return false
+}
+
+// EDot nodes represent a property access. This function may return an
+// expression to replace the property access with. It assumes that the
+// target of the EDot expression has already been visited.
+func (p *parser) maybeRewritePropertyAccess(
+	loc logger.Loc,
+	assignTarget js_ast.AssignTarget,
+	isDeleteTarget bool,
+	target js_ast.Expr,
+	name string,
+	nameLoc logger.Loc,
+	isCallTarget bool,
+	isTemplateTag bool,
+	preferQuotedKey bool,
+) (js_ast.Expr, bool) {
+	if id, ok := target.Data.(*js_ast.EIdentifier); ok {
+		// Rewrite property accesses on explicit namespace imports as an identifier.
+		// This lets us replace them easily in the printer to rebind them to
+		// something else without paying the cost of a whole-tree traversal during
+		// module linking just to rewrite these EDot expressions.
+		if p.options.mode == config.ModeBundle {
+			if importItems, ok := p.importItemsForNamespace[id.Ref]; ok {
+				// Cache translation so each property access resolves to the same import
+				item, ok := importItems.entries[name]
+				if !ok {
+					// Replace non-default imports with "undefined" for JSON import assertions
+					if record := &p.importRecords[importItems.importRecordIndex]; (record.Flags&ast.AssertTypeJSON) != 0 && name != "default" {
+						kind := logger.Warning
+						if p.suppressWarningsAboutWeirdCode {
+							kind = logger.Debug
+						}
+						p.log.AddIDWithNotes(logger.MsgID_JS_AssertTypeJSON, kind, &p.tracker, js_lexer.RangeOfIdentifier(p.source, nameLoc),
+							fmt.Sprintf("Non-default import %q is undefined with a JSON import assertion", name),
+							p.notesForAssertTypeJSON(record, name))
+						p.ignoreUsage(id.Ref)
+						return js_ast.Expr{Loc: loc, Data: js_ast.EUndefinedShared}, true
+					}
+
+					// Generate a new import item symbol in the module scope
+					item = ast.LocRef{Loc: nameLoc, Ref: p.newSymbol(ast.SymbolImport, name)}
+					p.moduleScope.Generated = append(p.moduleScope.Generated, item.Ref)
+
+					// Link the namespace import and the import item together
+					importItems.entries[name] = item
+					p.isImportItem[item.Ref] = true
+
+					symbol := &p.symbols[item.Ref.InnerIndex]
+					if p.options.mode == config.ModePassThrough {
+						// Make sure the printer prints this as a property access
+						symbol.NamespaceAlias = &ast.NamespaceAlias{
+							NamespaceRef: id.Ref,
+							Alias:        name,
+						}
+					} else {
+						// Mark this as generated in case it's missing. We don't want to
+						// generate errors for missing import items that are automatically
+						// generated.
+						symbol.ImportItemStatus = ast.ImportItemGenerated
+					}
+				}
+
+				// Undo the usage count for the namespace itself. This is used later
+				// to detect whether the namespace symbol has ever been "captured"
+				// or whether it has just been used to read properties off of.
+				//
+				// The benefit of doing this is that if both this module and the
+				// imported module end up in the same module group and the namespace
+				// symbol has never been captured, then we don't need to generate
+				// any code for the namespace at all.
+				p.ignoreUsage(id.Ref)
+
+				// Track how many times we've referenced this symbol
+				p.recordUsage(item.Ref)
+				return p.handleIdentifier(nameLoc, &js_ast.EIdentifier{Ref: item.Ref}, identifierOpts{
+					assignTarget:    assignTarget,
+					isCallTarget:    isCallTarget,
+					isDeleteTarget:  isDeleteTarget,
+					preferQuotedKey: preferQuotedKey,
+
+					// If this expression is used as the target of a call expression, make
+					// sure the value of "this" is preserved.
+					wasOriginallyIdentifier: false,
+				}), true
+			}
+
+			// Rewrite "module.require()" to "require()" for Webpack compatibility.
+			// See https://github.com/webpack/webpack/pull/7750 for more info.
+			if isCallTarget && id.Ref == p.moduleRef && name == "require" {
+				p.ignoreUsage(p.moduleRef)
+
+				// This uses "require" instead of a reference to our "__require"
+				// function so that the code coming up that detects calls to
+				// "require" will recognize it.
+				p.recordUsage(p.requireRef)
+				return js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: p.requireRef}}, true
+			}
+		}
+	}
+
+	// Attempt to simplify statically-determined object literal property accesses
+	if !isCallTarget && !isTemplateTag && p.options.minifySyntax && assignTarget == js_ast.AssignTargetNone {
+		if object, ok := target.Data.(*js_ast.EObject); ok {
+			var replace js_ast.Expr
+			hasProtoNull := false
+			isUnsafe := false
+
+			// Check that doing this is safe
+			for _, prop := range object.Properties {
+				// "{ ...a }.a" must be preserved
+				// "new ({ a() {} }.a)" must throw
+				// "{ get a() {} }.a" must be preserved
+				// "{ set a(b) {} }.a = 1" must be preserved
+				// "{ a: 1, [String.fromCharCode(97)]: 2 }.a" must be 2
+				if prop.Kind == js_ast.PropertySpread || prop.Flags.Has(js_ast.PropertyIsComputed) || prop.Kind.IsMethodDefinition() {
+					isUnsafe = true
+					break
+				}
+
+				// Do not attempt to compare against numeric keys
+				key, ok := prop.Key.Data.(*js_ast.EString)
+				if !ok {
+					isUnsafe = true
+					break
+				}
+
+				// The "__proto__" key has special behavior
+				if helpers.UTF16EqualsString(key.Value, "__proto__") {
+					if _, ok := prop.ValueOrNil.Data.(*js_ast.ENull); ok {
+						// Replacing "{__proto__: null}.a" with undefined should be safe
+						hasProtoNull = true
+					}
+				}
+
+				// This entire object literal must have no side effects
+				if !p.astHelpers.ExprCanBeRemovedIfUnused(prop.ValueOrNil) {
+					isUnsafe = true
+					break
+				}
+
+				// Note that we need to take the last value if there are duplicate keys
+				if helpers.UTF16EqualsString(key.Value, name) {
+					replace = prop.ValueOrNil
+				}
+			}
+
+			if !isUnsafe {
+				// If the key was found, return the value for that key. Note
+				// that "{__proto__: null}.__proto__" is undefined, not null.
+				if replace.Data != nil && name != "__proto__" {
+					return replace, true
+				}
+
+				// We can only return "undefined" when a key is missing if the prototype is null
+				if hasProtoNull {
+					return js_ast.Expr{Loc: target.Loc, Data: js_ast.EUndefinedShared}, true
+				}
+			}
+		}
+	}
+
+	// Handle references to namespaces or namespace members
+	if target.Data == p.tsNamespaceTarget && assignTarget == js_ast.AssignTargetNone && !isDeleteTarget {
+		if ns, ok := p.tsNamespaceMemberData.(*js_ast.TSNamespaceMemberNamespace); ok {
+			if member, ok := ns.ExportedMembers[name]; ok {
+				switch m := member.Data.(type) {
+				case *js_ast.TSNamespaceMemberEnumNumber:
+					p.ignoreUsageOfIdentifierInDotChain(target)
+					return p.wrapInlinedEnum(js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: m.Value}}, name), true
+
+				case *js_ast.TSNamespaceMemberEnumString:
+					p.ignoreUsageOfIdentifierInDotChain(target)
+					return p.wrapInlinedEnum(js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: m.Value}}, name), true
+
+				case *js_ast.TSNamespaceMemberNamespace:
+					// If this isn't a constant, return a clone of this property access
+					// but with the namespace member data associated with it so that
+					// more property accesses off of this property access are recognized.
+					if preferQuotedKey || !js_ast.IsIdentifier(name) {
+						p.tsNamespaceTarget = &js_ast.EIndex{
+							Target: target,
+							Index:  js_ast.Expr{Loc: nameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(name)}},
+						}
+					} else {
+						p.tsNamespaceTarget = p.dotOrMangledPropVisit(target, name, nameLoc)
+					}
+					p.tsNamespaceMemberData = member.Data
+					return js_ast.Expr{Loc: loc, Data: p.tsNamespaceTarget}, true
+				}
+			}
+		}
+	}
+
+	// Symbol uses due to a property access off of an imported symbol are tracked
+	// specially. This lets us do tree shaking for cross-file TypeScript enums.
+	if p.options.mode == config.ModeBundle && !p.isControlFlowDead {
+		if id, ok := target.Data.(*js_ast.EImportIdentifier); ok {
+			// Remove the normal symbol use
+			use := p.symbolUses[id.Ref]
+			use.CountEstimate--
+			if use.CountEstimate == 0 {
+				delete(p.symbolUses, id.Ref)
+			} else {
+				p.symbolUses[id.Ref] = use
+			}
+
+			// Add a special symbol use instead
+			if p.importSymbolPropertyUses == nil {
+				p.importSymbolPropertyUses = make(map[ast.Ref]map[string]js_ast.SymbolUse)
+			}
+			properties := p.importSymbolPropertyUses[id.Ref]
+			if properties == nil {
+				properties = make(map[string]js_ast.SymbolUse)
+				p.importSymbolPropertyUses[id.Ref] = properties
+			}
+			use = properties[name]
+			use.CountEstimate++
+			properties[name] = use
+		}
+	}
+
+	// Minify "foo".length
+	if p.options.minifySyntax && assignTarget == js_ast.AssignTargetNone {
+		switch t := target.Data.(type) {
+		case *js_ast.EString:
+			if name == "length" {
+				return js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: float64(len(t.Value))}}, true
+			}
+		case *js_ast.EInlinedEnum:
+			if s, ok := t.Value.Data.(*js_ast.EString); ok && name == "length" {
+				return js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: float64(len(s.Value))}}, true
+			}
+		}
+	}
+
+	return js_ast.Expr{}, false
+}
+
+type exprIn struct {
+	isMethod               bool
+	isLoweredPrivateMethod bool
+
+	// This tells us if there are optional chain expressions (EDot, EIndex, or
+	// ECall) that are chained on to this expression. Because of the way the AST
+	// works, chaining expressions on to this expression means they are our
+	// parent expressions.
+	//
+	// Some examples:
+	//
+	//   a?.b.c  // EDot
+	//   a?.b[c] // EIndex
+	//   a?.b()  // ECall
+	//
+	// Note that this is false if our parent is a node with a OptionalChain
+	// value of OptionalChainStart. That means it's the start of a new chain, so
+	// it's not considered part of this one.
+	//
+	// Some examples:
+	//
+	//   a?.b?.c   // EDot
+	//   a?.b?.[c] // EIndex
+	//   a?.b?.()  // ECall
+	//
+	// Also note that this is false if our parent is a node with a OptionalChain
+	// value of OptionalChainNone. That means it's outside parentheses, which
+	// means it's no longer part of the chain.
+	//
+	// Some examples:
+	//
+	//   (a?.b).c  // EDot
+	//   (a?.b)[c] // EIndex
+	//   (a?.b)()  // ECall
+	//
+	hasChainParent bool
+
+	// If our parent is an ECall node with an OptionalChain value of
+	// OptionalChainStart, then we will need to store the value for the "this" of
+	// that call somewhere if the current expression is an optional chain that
+	// ends in a property access. That's because the value for "this" will be
+	// used twice: once for the inner optional chain and once for the outer
+	// optional chain.
+	//
+	// Example:
+	//
+	//   // Original
+	//   a?.b?.();
+	//
+	//   // Lowered
+	//   var _a;
+	//   (_a = a == null ? void 0 : a.b) == null ? void 0 : _a.call(a);
+	//
+	// In the example above we need to store "a" as the value for "this" so we
+	// can substitute it back in when we call "_a" if "_a" is indeed present.
+	// See also "thisArgFunc" and "thisArgWrapFunc" in "exprOut".
+	storeThisArgForParentOptionalChain bool
+
+	// If true, string literals that match the current property mangling pattern
+	// should be turned into ENameOfSymbol expressions, which will cause us to
+	// rename them in the linker.
+	shouldMangleStringsAsProps bool
+
+	// Certain substitutions of identifiers are disallowed for assignment targets.
+	// For example, we shouldn't transform "undefined = 1" into "void 0 = 1". This
+	// isn't something real-world code would do but it matters for conformance
+	// tests.
+	assignTarget js_ast.AssignTarget
+}
+
+type exprOut struct {
+	// If our parent is an ECall node with an OptionalChain value of
+	// OptionalChainContinue, then we may need to return the value for "this"
+	// from this node or one of this node's children so that the parent that is
+	// the end of the optional chain can use it.
+	//
+	// Example:
+	//
+	//   // Original
+	//   a?.b?.().c();
+	//
+	//   // Lowered
+	//   var _a;
+	//   (_a = a == null ? void 0 : a.b) == null ? void 0 : _a.call(a).c();
+	//
+	// The value "_a" for "this" must be passed all the way up to the call to
+	// ".c()" which is where the optional chain is lowered. From there it must
+	// be substituted as the value for "this" in the call to ".b?.()". See also
+	// "storeThisArgForParentOptionalChain" in "exprIn".
+	thisArgFunc     func() js_ast.Expr
+	thisArgWrapFunc func(js_ast.Expr) js_ast.Expr
+
+	// True if the child node is an optional chain node (EDot, EIndex, or ECall
+	// with an IsOptionalChain value of true)
+	childContainsOptionalChain bool
+
+	// If true and this is used as a call target, the whole call expression
+	// must be replaced with undefined.
+	methodCallMustBeReplacedWithUndefined bool
+}
+
+func (p *parser) visitExpr(expr js_ast.Expr) js_ast.Expr {
+	expr, _ = p.visitExprInOut(expr, exprIn{})
+	return expr
+}
+
+func (p *parser) valueForThis(
+	loc logger.Loc,
+	shouldLog bool,
+	assignTarget js_ast.AssignTarget,
+	isCallTarget bool,
+	isDeleteTarget bool,
+) (js_ast.Expr, bool) {
+	// Substitute "this" if we're inside a static class context
+	if p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef {
+		p.recordUsage(*p.fnOnlyDataVisit.innerClassNameRef)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: *p.fnOnlyDataVisit.innerClassNameRef}}, true
+	}
+
+	// Is this a top-level use of "this"?
+	if !p.fnOnlyDataVisit.isThisNested {
+		// Substitute user-specified defines
+		if data, ok := p.options.defines.IdentifierDefines["this"]; ok {
+			if data.DefineExpr != nil {
+				return p.instantiateDefineExpr(loc, *data.DefineExpr, identifierOpts{
+					assignTarget:   assignTarget,
+					isCallTarget:   isCallTarget,
+					isDeleteTarget: isDeleteTarget,
+				}), true
+			}
+		}
+
+		// Otherwise, replace top-level "this" with either "undefined" or "exports"
+		if p.isFileConsideredToHaveESMExports {
+			// Warn about "this" becoming undefined, but only once per file
+			if shouldLog && !p.messageAboutThisIsUndefined && !p.fnOnlyDataVisit.silenceMessageAboutThisBeingUndefined {
+				p.messageAboutThisIsUndefined = true
+				kind := logger.Debug
+				data := p.tracker.MsgData(js_lexer.RangeOfIdentifier(p.source, loc),
+					"Top-level \"this\" will be replaced with undefined since this file is an ECMAScript module")
+				data.Location.Suggestion = "undefined"
+				_, notes := p.whyESModule()
+				p.log.AddMsgID(logger.MsgID_JS_ThisIsUndefinedInESM, logger.Msg{Kind: kind, Data: data, Notes: notes})
+			}
+
+			// In an ES6 module, "this" is supposed to be undefined. Instead of
+			// doing this at runtime using "fn.call(undefined)", we do it at
+			// compile time using expression substitution here.
+			return js_ast.Expr{Loc: loc, Data: js_ast.EUndefinedShared}, true
+		} else if p.options.mode != config.ModePassThrough {
+			// In a CommonJS module, "this" is supposed to be the same as "exports".
+			// Instead of doing this at runtime using "fn.call(module.exports)", we
+			// do it at compile time using expression substitution here.
+			p.recordUsage(p.exportsRef)
+			return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: p.exportsRef}}, true
+		}
+	}
+
+	return js_ast.Expr{}, false
+}
+
+func (p *parser) valueForImportMeta(loc logger.Loc) (js_ast.Expr, bool) {
+	if p.options.unsupportedJSFeatures.Has(compat.ImportMeta) ||
+		(p.options.mode != config.ModePassThrough && !p.options.outputFormat.KeepESMImportExportSyntax()) {
+		// Generate the variable if it doesn't exist yet
+		if p.importMetaRef == ast.InvalidRef {
+			p.importMetaRef = p.newSymbol(ast.SymbolOther, "import_meta")
+			p.moduleScope.Generated = append(p.moduleScope.Generated, p.importMetaRef)
+		}
+
+		// Replace "import.meta" with a reference to the symbol
+		p.recordUsage(p.importMetaRef)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: p.importMetaRef}}, true
+	}
+
+	return js_ast.Expr{}, false
+}
+
+func locAfterOp(e *js_ast.EBinary) logger.Loc {
+	if e.Left.Loc.Start < e.Right.Loc.Start {
+		return e.Right.Loc
+	} else {
+		// Handle the case when we have transposed the operands
+		return e.Left.Loc
+	}
+}
+
+// This function exists to tie all of these checks together in one place
+func isEvalOrArguments(name string) bool {
+	return name == "eval" || name == "arguments"
+}
+
+func (p *parser) reportPrivateNameUsage(name string) {
+	if p.parseExperimentalDecoratorNesting > 0 {
+		if p.lowerAllOfThesePrivateNames == nil {
+			p.lowerAllOfThesePrivateNames = make(map[string]bool)
+		}
+		p.lowerAllOfThesePrivateNames[name] = true
+	}
+}
+
+func (p *parser) isValidAssignmentTarget(expr js_ast.Expr) bool {
+	switch e := expr.Data.(type) {
+	case *js_ast.EIdentifier:
+		if p.isStrictMode() {
+			if name := p.loadNameFromRef(e.Ref); isEvalOrArguments(name) {
+				return false
+			}
+		}
+		return true
+	case *js_ast.EDot:
+		return e.OptionalChain == js_ast.OptionalChainNone
+	case *js_ast.EIndex:
+		return e.OptionalChain == js_ast.OptionalChainNone
+
+	// Don't worry about recursive checking for objects and arrays. This will
+	// already be handled naturally by passing down the assign target flag.
+	case *js_ast.EObject:
+		return !e.IsParenthesized
+	case *js_ast.EArray:
+		return !e.IsParenthesized
+	}
+	return false
+}
+
+func containsClosingScriptTag(text string) bool {
+	for {
+		i := strings.Index(text, "</")
+		if i < 0 {
+			break
+		}
+		text = text[i+2:]
+		if len(text) >= 6 && strings.EqualFold(text[:6], "script") {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *parser) isUnsupportedRegularExpression(loc logger.Loc, value string) (pattern string, flags string, isUnsupported bool) {
+	var what string
+	var r logger.Range
+
+	end := strings.LastIndexByte(value, '/')
+	pattern = value[1:end]
+	flags = value[end+1:]
+	isUnicode := strings.IndexByte(flags, 'u') >= 0
+	parenDepth := 0
+	i := 0
+
+	// Do a simple scan for unsupported features assuming the regular expression
+	// is valid. This doesn't do a full validation of the regular expression
+	// because regular expression grammar is complicated. If it contains a syntax
+	// error that we don't catch, then we will just generate output code with a
+	// syntax error. Garbage in, garbage out.
+pattern:
+	for i < len(pattern) {
+		c := pattern[i]
+		i++
+
+		switch c {
+		case '[':
+		class:
+			for i < len(pattern) {
+				c := pattern[i]
+				i++
+
+				switch c {
+				case ']':
+					break class
+
+				case '\\':
+					i++ // Skip the escaped character
+				}
+			}
+
+		case '(':
+			tail := pattern[i:]
+
+			if strings.HasPrefix(tail, "?<=") || strings.HasPrefix(tail, "?<!") {
+				if p.options.unsupportedJSFeatures.Has(compat.RegexpLookbehindAssertions) {
+					what = "Lookbehind assertions in regular expressions are not available"
+					r = logger.Range{Loc: logger.Loc{Start: loc.Start + int32(i) + 1}, Len: 3}
+					isUnsupported = true
+					break pattern
+				}
+			} else if strings.HasPrefix(tail, "?<") {
+				if p.options.unsupportedJSFeatures.Has(compat.RegexpNamedCaptureGroups) {
+					if end := strings.IndexByte(tail, '>'); end >= 0 {
+						what = "Named capture groups in regular expressions are not available"
+						r = logger.Range{Loc: logger.Loc{Start: loc.Start + int32(i) + 1}, Len: int32(end) + 1}
+						isUnsupported = true
+						break pattern
+					}
+				}
+			}
+
+			parenDepth++
+
+		case ')':
+			if parenDepth == 0 {
+				r := logger.Range{Loc: logger.Loc{Start: loc.Start + int32(i)}, Len: 1}
+				p.log.AddError(&p.tracker, r, "Unexpected \")\" in regular expression")
+				return
+			}
+
+			parenDepth--
+
+		case '\\':
+			tail := pattern[i:]
+
+			if isUnicode && (strings.HasPrefix(tail, "p{") || strings.HasPrefix(tail, "P{")) {
+				if p.options.unsupportedJSFeatures.Has(compat.RegexpUnicodePropertyEscapes) {
+					if end := strings.IndexByte(tail, '}'); end >= 0 {
+						what = "Unicode property escapes in regular expressions are not available"
+						r = logger.Range{Loc: logger.Loc{Start: loc.Start + int32(i)}, Len: int32(end) + 2}
+						isUnsupported = true
+						break pattern
+					}
+				}
+			}
+
+			i++ // Skip the escaped character
+		}
+	}
+
+	if !isUnsupported {
+		for i, c := range flags {
+			switch c {
+			case 'g', 'i', 'm':
+				continue // These are part of ES5 and are always supported
+
+			case 's':
+				if !p.options.unsupportedJSFeatures.Has(compat.RegexpDotAllFlag) {
+					continue // This is part of ES2018
+				}
+
+			case 'y', 'u':
+				if !p.options.unsupportedJSFeatures.Has(compat.RegexpStickyAndUnicodeFlags) {
+					continue // These are part of ES2018
+				}
+
+			case 'd':
+				if !p.options.unsupportedJSFeatures.Has(compat.RegexpMatchIndices) {
+					continue // This is part of ES2022
+				}
+
+			case 'v':
+				if !p.options.unsupportedJSFeatures.Has(compat.RegexpSetNotation) {
+					continue // This is from a proposal: https://github.com/tc39/proposal-regexp-v-flag
+				}
+
+			default:
+				// Unknown flags are never supported
+			}
+
+			r = logger.Range{Loc: logger.Loc{Start: loc.Start + int32(end+1) + int32(i)}, Len: 1}
+			what = fmt.Sprintf("The regular expression flag \"%c\" is not available", c)
+			isUnsupported = true
+			break
+		}
+	}
+
+	if isUnsupported {
+		where := config.PrettyPrintTargetEnvironment(p.options.originalTargetEnv, p.options.unsupportedJSFeatureOverridesMask)
+		p.log.AddIDWithNotes(logger.MsgID_JS_UnsupportedRegExp, logger.Debug, &p.tracker, r, fmt.Sprintf("%s in %s", what, where), []logger.MsgData{{
+			Text: "This regular expression literal has been converted to a \"new RegExp()\" constructor " +
+				"to avoid generating code with a syntax error. However, you will need to include a " +
+				"polyfill for \"RegExp\" for your code to have the correct behavior at run-time."}})
+	}
+
+	return
+}
+
+// This function takes "exprIn" as input from the caller and produces "exprOut"
+// for the caller to pass along extra data. This is mostly for optional chaining.
+func (p *parser) visitExprInOut(expr js_ast.Expr, in exprIn) (js_ast.Expr, exprOut) {
+	if in.assignTarget != js_ast.AssignTargetNone && !p.isValidAssignmentTarget(expr) {
+		p.log.AddError(&p.tracker, logger.Range{Loc: expr.Loc}, "Invalid assignment target")
+	}
+
+	// Note: Anything added before or after this switch statement will be bypassed
+	// when visiting nested "EBinary" nodes due to stack overflow mitigations for
+	// deeply-nested ASTs. If anything like that is added, care must be taken that
+	// it doesn't affect these mitigations by ensuring that the mitigations are not
+	// applied in those cases (e.g. by adding an additional conditional check).
+	switch e := expr.Data.(type) {
+	case *js_ast.ENull, *js_ast.ESuper, *js_ast.EBoolean, *js_ast.EBigInt, *js_ast.EUndefined, *js_ast.EJSXText:
+
+	case *js_ast.ENameOfSymbol:
+		e.Ref = p.symbolForMangledProp(p.loadNameFromRef(e.Ref))
+
+	case *js_ast.ERegExp:
+		// "/pattern/flags" => "new RegExp('pattern', 'flags')"
+		if pattern, flags, ok := p.isUnsupportedRegularExpression(expr.Loc, e.Value); ok {
+			args := []js_ast.Expr{{
+				Loc:  logger.Loc{Start: expr.Loc.Start + 1},
+				Data: &js_ast.EString{Value: helpers.StringToUTF16(pattern)},
+			}}
+			if flags != "" {
+				args = append(args, js_ast.Expr{
+					Loc:  logger.Loc{Start: expr.Loc.Start + int32(len(pattern)) + 2},
+					Data: &js_ast.EString{Value: helpers.StringToUTF16(flags)},
+				})
+			}
+			regExpRef := p.makeRegExpRef()
+			p.recordUsage(regExpRef)
+			return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENew{
+				Target:        js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: regExpRef}},
+				Args:          args,
+				CloseParenLoc: logger.Loc{Start: expr.Loc.Start + int32(len(e.Value))},
+			}}, exprOut{}
+		}
+
+	case *js_ast.ENewTarget:
+		if !p.fnOnlyDataVisit.isNewTargetAllowed {
+			p.log.AddError(&p.tracker, e.Range, "Cannot use \"new.target\" here:")
+		}
+
+	case *js_ast.EString:
+		if e.LegacyOctalLoc.Start > 0 {
+			if e.PreferTemplate {
+				p.log.AddError(&p.tracker, p.source.RangeOfLegacyOctalEscape(e.LegacyOctalLoc),
+					"Legacy octal escape sequences cannot be used in template literals")
+			} else if p.isStrictMode() {
+				p.markStrictModeFeature(legacyOctalEscape, p.source.RangeOfLegacyOctalEscape(e.LegacyOctalLoc), "")
+			}
+		}
+
+		if in.shouldMangleStringsAsProps && p.options.mangleQuoted && !e.PreferTemplate {
+			if name := helpers.UTF16ToString(e.Value); p.isMangledProp(name) {
+				return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENameOfSymbol{
+					Ref: p.symbolForMangledProp(name),
+				}}, exprOut{}
+			}
+		}
+
+	case *js_ast.ENumber:
+		if p.legacyOctalLiterals != nil && p.isStrictMode() {
+			if r, ok := p.legacyOctalLiterals[expr.Data]; ok {
+				p.markStrictModeFeature(legacyOctalLiteral, r, "")
+			}
+		}
+
+	case *js_ast.EThis:
+		isDeleteTarget := e == p.deleteTarget
+		isCallTarget := e == p.callTarget
+
+		if value, ok := p.valueForThis(expr.Loc, true /* shouldLog */, in.assignTarget, isDeleteTarget, isCallTarget); ok {
+			return value, exprOut{}
+		}
+
+		// Capture "this" inside arrow functions that will be lowered into normal
+		// function expressions for older language environments
+		if p.fnOrArrowDataVisit.isArrow && p.options.unsupportedJSFeatures.Has(compat.Arrow) && p.fnOnlyDataVisit.isThisNested {
+			return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: p.captureThis()}}, exprOut{}
+		}
+
+	case *js_ast.EImportMeta:
+		isDeleteTarget := e == p.deleteTarget
+		isCallTarget := e == p.callTarget
+
+		// Check both user-specified defines and known globals
+		if defines, ok := p.options.defines.DotDefines["meta"]; ok {
+			for _, define := range defines {
+				if p.isDotOrIndexDefineMatch(expr, define.Parts) {
+					// Substitute user-specified defines
+					if define.Data.DefineExpr != nil {
+						return p.instantiateDefineExpr(expr.Loc, *define.Data.DefineExpr, identifierOpts{
+							assignTarget:   in.assignTarget,
+							isCallTarget:   isCallTarget,
+							isDeleteTarget: isDeleteTarget,
+						}), exprOut{}
+					}
+				}
+			}
+		}
+
+		// Check injected dot names
+		if names, ok := p.injectedDotNames["meta"]; ok {
+			for _, name := range names {
+				if p.isDotOrIndexDefineMatch(expr, name.parts) {
+					// Note: We don't need to "ignoreRef" on the underlying identifier
+					// because we have only parsed it but not visited it yet
+					return p.instantiateInjectDotName(expr.Loc, name, in.assignTarget), exprOut{}
+				}
+			}
+		}
+
+		// Warn about "import.meta" if it's not replaced by a define
+		if p.options.unsupportedJSFeatures.Has(compat.ImportMeta) {
+			r := logger.Range{Loc: expr.Loc, Len: e.RangeLen}
+			p.markSyntaxFeature(compat.ImportMeta, r)
+		} else if p.options.mode != config.ModePassThrough && !p.options.outputFormat.KeepESMImportExportSyntax() {
+			r := logger.Range{Loc: expr.Loc, Len: e.RangeLen}
+			kind := logger.Warning
+			if p.suppressWarningsAboutWeirdCode || p.fnOrArrowDataVisit.tryBodyCount > 0 {
+				kind = logger.Debug
+			}
+			p.log.AddIDWithNotes(logger.MsgID_JS_EmptyImportMeta, kind, &p.tracker, r, fmt.Sprintf(
+				"\"import.meta\" is not available with the %q output format and will be empty", p.options.outputFormat.String()),
+				[]logger.MsgData{{Text: "You need to set the output format to \"esm\" for \"import.meta\" to work correctly."}})
+		}
+
+		// Convert "import.meta" to a variable if it's not supported in the output format
+		if importMeta, ok := p.valueForImportMeta(expr.Loc); ok {
+			return importMeta, exprOut{}
+		}
+
+	case *js_ast.ESpread:
+		e.Value = p.visitExpr(e.Value)
+
+	case *js_ast.EIdentifier:
+		isCallTarget := e == p.callTarget
+		isDeleteTarget := e == p.deleteTarget
+		name := p.loadNameFromRef(e.Ref)
+		if p.isStrictMode() && js_lexer.StrictModeReservedWords[name] {
+			p.markStrictModeFeature(reservedWord, js_lexer.RangeOfIdentifier(p.source, expr.Loc), name)
+		}
+		result := p.findSymbol(expr.Loc, name)
+		e.MustKeepDueToWithStmt = result.isInsideWithScope
+		e.Ref = result.ref
+
+		// Handle referencing a class name within that class's computed property
+		// key. This is not allowed, and must fail at run-time:
+		//
+		//   class Foo {
+		//     static foo = 'bar'
+		//     static [Foo.foo] = 'foo'
+		//   }
+		//
+		if p.symbols[result.ref.InnerIndex].Kind == ast.SymbolClassInComputedPropertyKey {
+			p.log.AddID(logger.MsgID_JS_ClassNameWillThrow, logger.Warning, &p.tracker, js_lexer.RangeOfIdentifier(p.source, expr.Loc),
+				fmt.Sprintf("Accessing class %q before initialization will throw", name))
+			return p.callRuntime(expr.Loc, "__earlyAccess", []js_ast.Expr{{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(name)}}}), exprOut{}
+		}
+
+		// Handle assigning to a constant
+		if in.assignTarget != js_ast.AssignTargetNone {
+			switch p.symbols[result.ref.InnerIndex].Kind {
+			case ast.SymbolConst:
+				r := js_lexer.RangeOfIdentifier(p.source, expr.Loc)
+				notes := []logger.MsgData{p.tracker.MsgData(js_lexer.RangeOfIdentifier(p.source, result.declareLoc),
+					fmt.Sprintf("The symbol %q was declared a constant here:", name))}
+
+				// Make this an error when bundling because we may need to convert this
+				// "const" into a "var" during bundling. Also make this an error when
+				// the constant is inlined because we will otherwise generate code with
+				// a syntax error.
+				if _, isInlinedConstant := p.constValues[result.ref]; isInlinedConstant || p.options.mode == config.ModeBundle ||
+					(p.currentScope.Parent == nil && p.willWrapModuleInTryCatchForUsing) {
+					p.log.AddErrorWithNotes(&p.tracker, r,
+						fmt.Sprintf("Cannot assign to %q because it is a constant", name), notes)
+				} else {
+					p.log.AddIDWithNotes(logger.MsgID_JS_AssignToConstant, logger.Warning, &p.tracker, r,
+						fmt.Sprintf("This assignment will throw because %q is a constant", name), notes)
+				}
+
+			case ast.SymbolInjected:
+				if where, ok := p.injectedSymbolSources[result.ref]; ok {
+					r := js_lexer.RangeOfIdentifier(p.source, expr.Loc)
+					tracker := logger.MakeLineColumnTracker(&where.source)
+					p.log.AddErrorWithNotes(&p.tracker, r,
+						fmt.Sprintf("Cannot assign to %q because it's an import from an injected file", name),
+						[]logger.MsgData{tracker.MsgData(js_lexer.RangeOfIdentifier(where.source, where.loc),
+							fmt.Sprintf("The symbol %q was exported from %q here:", name, where.source.PrettyPath))})
+				}
+			}
+		}
+
+		// Substitute user-specified defines for unbound or injected symbols
+		methodCallMustBeReplacedWithUndefined := false
+		if p.symbols[e.Ref.InnerIndex].Kind.IsUnboundOrInjected() && !result.isInsideWithScope && e != p.deleteTarget {
+			if data, ok := p.options.defines.IdentifierDefines[name]; ok {
+				if data.DefineExpr != nil {
+					new := p.instantiateDefineExpr(expr.Loc, *data.DefineExpr, identifierOpts{
+						assignTarget:   in.assignTarget,
+						isCallTarget:   isCallTarget,
+						isDeleteTarget: isDeleteTarget,
+					})
+					if in.assignTarget == js_ast.AssignTargetNone || defineValueCanBeUsedInAssignTarget(new.Data) {
+						p.ignoreUsage(e.Ref)
+						return new, exprOut{}
+					} else {
+						p.logAssignToDefine(js_lexer.RangeOfIdentifier(p.source, expr.Loc), name, js_ast.Expr{})
+					}
+				}
+
+				// Copy the side effect flags over in case this expression is unused
+				if data.Flags.Has(config.CanBeRemovedIfUnused) {
+					e.CanBeRemovedIfUnused = true
+				}
+				if data.Flags.Has(config.CallCanBeUnwrappedIfUnused) && !p.options.ignoreDCEAnnotations {
+					e.CallCanBeUnwrappedIfUnused = true
+				}
+				if data.Flags.Has(config.MethodCallsMustBeReplacedWithUndefined) {
+					methodCallMustBeReplacedWithUndefined = true
+				}
+			}
+		}
+
+		return p.handleIdentifier(expr.Loc, e, identifierOpts{
+				assignTarget:            in.assignTarget,
+				isCallTarget:            isCallTarget,
+				isDeleteTarget:          isDeleteTarget,
+				wasOriginallyIdentifier: true,
+			}), exprOut{
+				methodCallMustBeReplacedWithUndefined: methodCallMustBeReplacedWithUndefined,
+			}
+
+	case *js_ast.EJSXElement:
+		propsLoc := expr.Loc
+
+		// Resolving the location index to a specific line and column in
+		// development mode is not too expensive because we seek from the
+		// previous JSX element. It amounts to at most a single additional
+		// scan over the source code. Note that this has to happen before
+		// we visit anything about this JSX element to make sure that we
+		// only ever need to scan forward, not backward.
+		var jsxSourceLine int
+		var jsxSourceColumn int
+		if p.options.jsx.Development && p.options.jsx.AutomaticRuntime {
+			for p.jsxSourceLoc < int(propsLoc.Start) {
+				r, size := utf8.DecodeRuneInString(p.source.Contents[p.jsxSourceLoc:])
+				p.jsxSourceLoc += size
+				if r == '\n' || r == '\r' || r == '\u2028' || r == '\u2029' {
+					if r == '\r' && p.jsxSourceLoc < len(p.source.Contents) && p.source.Contents[p.jsxSourceLoc] == '\n' {
+						p.jsxSourceLoc++ // Handle Windows-style CRLF newlines
+					}
+					p.jsxSourceLine++
+					p.jsxSourceColumn = 0
+				} else {
+					// Babel and TypeScript count columns in UTF-16 code units
+					if r < 0xFFFF {
+						p.jsxSourceColumn++
+					} else {
+						p.jsxSourceColumn += 2
+					}
+				}
+			}
+			jsxSourceLine = p.jsxSourceLine
+			jsxSourceColumn = p.jsxSourceColumn
+		}
+
+		if e.TagOrNil.Data != nil {
+			propsLoc = e.TagOrNil.Loc
+			e.TagOrNil = p.visitExpr(e.TagOrNil)
+			p.warnAboutImportNamespaceCall(e.TagOrNil, exprKindJSXTag)
+		}
+
+		// Visit properties
+		hasSpread := false
+		for i, property := range e.Properties {
+			if property.Kind == js_ast.PropertySpread {
+				hasSpread = true
+			} else {
+				if mangled, ok := property.Key.Data.(*js_ast.ENameOfSymbol); ok {
+					mangled.Ref = p.symbolForMangledProp(p.loadNameFromRef(mangled.Ref))
+				} else {
+					property.Key = p.visitExpr(property.Key)
+				}
+			}
+			if property.ValueOrNil.Data != nil {
+				property.ValueOrNil = p.visitExpr(property.ValueOrNil)
+			}
+			if property.InitializerOrNil.Data != nil {
+				property.InitializerOrNil = p.visitExpr(property.InitializerOrNil)
+			}
+			e.Properties[i] = property
+		}
+
+		// "{a, ...{b, c}, d}" => "{a, b, c, d}"
+		if p.options.minifySyntax && hasSpread {
+			e.Properties = js_ast.MangleObjectSpread(e.Properties)
+		}
+
+		// Visit children
+		if len(e.NullableChildren) > 0 {
+			for i, childOrNil := range e.NullableChildren {
+				if childOrNil.Data != nil {
+					e.NullableChildren[i] = p.visitExpr(childOrNil)
+				}
+			}
+		}
+
+		if p.options.jsx.Preserve {
+			// If the tag is an identifier, mark it as needing to be upper-case
+			switch tag := e.TagOrNil.Data.(type) {
+			case *js_ast.EIdentifier:
+				p.symbols[tag.Ref.InnerIndex].Flags |= ast.MustStartWithCapitalLetterForJSX
+
+			case *js_ast.EImportIdentifier:
+				p.symbols[tag.Ref.InnerIndex].Flags |= ast.MustStartWithCapitalLetterForJSX
+			}
+		} else {
+			// Remove any nil children in the array (in place) before iterating over it
+			children := e.NullableChildren
+			{
+				end := 0
+				for _, childOrNil := range children {
+					if childOrNil.Data != nil {
+						children[end] = childOrNil
+						end++
+					}
+				}
+				children = children[:end]
+			}
+
+			// A missing tag is a fragment
+			if e.TagOrNil.Data == nil {
+				if p.options.jsx.AutomaticRuntime {
+					e.TagOrNil = p.importJSXSymbol(expr.Loc, JSXImportFragment)
+				} else {
+					e.TagOrNil = p.instantiateDefineExpr(expr.Loc, p.options.jsx.Fragment, identifierOpts{
+						wasOriginallyIdentifier: true,
+						matchAgainstDefines:     true, // Allow defines to rewrite the JSX fragment factory
+					})
+				}
+			}
+
+			shouldUseCreateElement := !p.options.jsx.AutomaticRuntime
+			if !shouldUseCreateElement {
+				// Even for runtime="automatic", <div {...props} key={key} /> is special cased to createElement
+				// See https://github.com/babel/babel/blob/e482c763466ba3f44cb9e3467583b78b7f030b4a/packages/babel-plugin-transform-react-jsx/src/create-plugin.ts#L352
+				seenPropsSpread := false
+				for _, property := range e.Properties {
+					if seenPropsSpread && property.Kind == js_ast.PropertyField {
+						if str, ok := property.Key.Data.(*js_ast.EString); ok && helpers.UTF16EqualsString(str.Value, "key") {
+							shouldUseCreateElement = true
+							break
+						}
+					} else if property.Kind == js_ast.PropertySpread {
+						seenPropsSpread = true
+					}
+				}
+			}
+
+			if shouldUseCreateElement {
+				// Arguments to createElement()
+				args := []js_ast.Expr{e.TagOrNil}
+				if len(e.Properties) > 0 {
+					args = append(args, p.lowerObjectSpread(propsLoc, &js_ast.EObject{
+						Properties:   e.Properties,
+						IsSingleLine: e.IsTagSingleLine,
+					}))
+				} else {
+					args = append(args, js_ast.Expr{Loc: propsLoc, Data: js_ast.ENullShared})
+				}
+				if len(children) > 0 {
+					args = append(args, children...)
+				}
+
+				// Call createElement()
+				var target js_ast.Expr
+				kind := js_ast.NormalCall
+				if p.options.jsx.AutomaticRuntime {
+					target = p.importJSXSymbol(expr.Loc, JSXImportCreateElement)
+				} else {
+					target = p.instantiateDefineExpr(expr.Loc, p.options.jsx.Factory, identifierOpts{
+						wasOriginallyIdentifier: true,
+						matchAgainstDefines:     true, // Allow defines to rewrite the JSX factory
+					})
+					if js_ast.IsPropertyAccess(target) {
+						kind = js_ast.TargetWasOriginallyPropertyAccess
+					}
+					p.warnAboutImportNamespaceCall(target, exprKindCall)
+				}
+				return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ECall{
+					Target:        target,
+					Args:          args,
+					CloseParenLoc: e.CloseLoc,
+					IsMultiLine:   !e.IsTagSingleLine,
+					Kind:          kind,
+
+					// Enable tree shaking
+					CanBeUnwrappedIfUnused: !p.options.ignoreDCEAnnotations && !p.options.jsx.SideEffects,
+				}}, exprOut{}
+			} else {
+				// Arguments to jsx()
+				args := []js_ast.Expr{e.TagOrNil}
+
+				// Props argument
+				properties := make([]js_ast.Property, 0, len(e.Properties)+1)
+
+				// For jsx(), "key" is passed in as a separate argument, so filter it out
+				// from the props here. Also, check for __source and __self, which might have
+				// been added by some upstream plugin. Their presence here would represent a
+				// configuration error.
+				hasKey := false
+				keyProperty := js_ast.Expr{Loc: expr.Loc, Data: js_ast.EUndefinedShared}
+				for _, property := range e.Properties {
+					if str, ok := property.Key.Data.(*js_ast.EString); ok {
+						propName := helpers.UTF16ToString(str.Value)
+						switch propName {
+						case "key":
+							if boolean, ok := property.ValueOrNil.Data.(*js_ast.EBoolean); ok && boolean.Value && property.Flags.Has(js_ast.PropertyWasShorthand) {
+								r := js_lexer.RangeOfIdentifier(p.source, property.Loc)
+								msg := logger.Msg{
+									Kind:  logger.Error,
+									Data:  p.tracker.MsgData(r, "Please provide an explicit value for \"key\":"),
+									Notes: []logger.MsgData{{Text: "Using \"key\" as a shorthand for \"key={true}\" is not allowed when using React's \"automatic\" JSX transform."}},
+								}
+								msg.Data.Location.Suggestion = "key={true}"
+								p.log.AddMsg(msg)
+							} else {
+								keyProperty = property.ValueOrNil
+								hasKey = true
+							}
+							continue
+
+						case "__source", "__self":
+							r := js_lexer.RangeOfIdentifier(p.source, property.Loc)
+							p.log.AddErrorWithNotes(&p.tracker, r,
+								fmt.Sprintf("Duplicate \"%s\" prop found:", propName),
+								[]logger.MsgData{{Text: "Both \"__source\" and \"__self\" are set automatically by esbuild when using React's \"automatic\" JSX transform. " +
+									"This duplicate prop may have come from a plugin."}})
+							continue
+						}
+					}
+					properties = append(properties, property)
+				}
+
+				isStaticChildren := len(children) > 1
+
+				// Children are passed in as an explicit prop
+				if len(children) > 0 {
+					childrenValue := children[0]
+
+					if len(children) > 1 {
+						childrenValue.Data = &js_ast.EArray{Items: children}
+					} else if _, ok := childrenValue.Data.(*js_ast.ESpread); ok {
+						// TypeScript considers spread children to be static, but Babel considers
+						// it to be an error ("Spread children are not supported in React.").
+						// We'll follow TypeScript's behavior here because spread children may be
+						// valid with non-React source runtimes.
+						childrenValue.Data = &js_ast.EArray{Items: []js_ast.Expr{childrenValue}}
+						isStaticChildren = true
+					}
+
+					properties = append(properties, js_ast.Property{
+						Key: js_ast.Expr{
+							Data: &js_ast.EString{Value: helpers.StringToUTF16("children")},
+							Loc:  childrenValue.Loc,
+						},
+						ValueOrNil: childrenValue,
+						Kind:       js_ast.PropertyField,
+						Loc:        childrenValue.Loc,
+					})
+				}
+
+				args = append(args, p.lowerObjectSpread(propsLoc, &js_ast.EObject{
+					Properties:   properties,
+					IsSingleLine: e.IsTagSingleLine,
+				}))
+
+				// "key"
+				if hasKey || p.options.jsx.Development {
+					args = append(args, keyProperty)
+				}
+
+				if p.options.jsx.Development {
+					// "isStaticChildren"
+					args = append(args, js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EBoolean{Value: isStaticChildren}})
+
+					// "__source"
+					args = append(args, js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EObject{
+						Properties: []js_ast.Property{
+							{
+								Kind:       js_ast.PropertyField,
+								Key:        js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16("fileName")}},
+								ValueOrNil: js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(p.source.PrettyPath)}},
+							},
+							{
+								Kind:       js_ast.PropertyField,
+								Key:        js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16("lineNumber")}},
+								ValueOrNil: js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: float64(jsxSourceLine + 1)}}, // 1-based lines
+							},
+							{
+								Kind:       js_ast.PropertyField,
+								Key:        js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16("columnNumber")}},
+								ValueOrNil: js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: float64(jsxSourceColumn + 1)}}, // 1-based columns
+							},
+						},
+					}})
+
+					// "__self"
+					__self := js_ast.Expr{Loc: expr.Loc, Data: js_ast.EThisShared}
+					{
+						if p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef {
+							// Substitute "this" if we're inside a static class context
+							p.recordUsage(*p.fnOnlyDataVisit.innerClassNameRef)
+							__self.Data = &js_ast.EIdentifier{Ref: *p.fnOnlyDataVisit.innerClassNameRef}
+						} else if !p.fnOnlyDataVisit.isThisNested && p.options.mode != config.ModePassThrough {
+							// Replace top-level "this" with "undefined" if there's an output format
+							__self.Data = js_ast.EUndefinedShared
+						} else if p.fnOrArrowDataVisit.isDerivedClassCtor {
+							// We can't use "this" here in case it comes before "super()"
+							__self.Data = js_ast.EUndefinedShared
+						}
+					}
+					if _, ok := __self.Data.(*js_ast.EUndefined); !ok {
+						// Omit "__self" entirely if it's undefined
+						args = append(args, __self)
+					}
+				}
+
+				jsx := JSXImportJSX
+				if isStaticChildren {
+					jsx = JSXImportJSXS
+				}
+
+				return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ECall{
+					Target:        p.importJSXSymbol(expr.Loc, jsx),
+					Args:          args,
+					CloseParenLoc: e.CloseLoc,
+					IsMultiLine:   !e.IsTagSingleLine,
+
+					// Enable tree shaking
+					CanBeUnwrappedIfUnused: !p.options.ignoreDCEAnnotations && !p.options.jsx.SideEffects,
+				}}, exprOut{}
+			}
+		}
+
+	case *js_ast.ETemplate:
+		if e.LegacyOctalLoc.Start > 0 {
+			p.log.AddError(&p.tracker, p.source.RangeOfLegacyOctalEscape(e.LegacyOctalLoc),
+				"Legacy octal escape sequences cannot be used in template literals")
+		}
+
+		var tagThisFunc func() js_ast.Expr
+		var tagWrapFunc func(js_ast.Expr) js_ast.Expr
+
+		if e.TagOrNil.Data != nil {
+			// Capture the value for "this" if the tag is a lowered optional chain.
+			// We'll need to manually apply this value later to preserve semantics.
+			tagIsLoweredOptionalChain := false
+			if p.options.unsupportedJSFeatures.Has(compat.OptionalChain) {
+				switch target := e.TagOrNil.Data.(type) {
+				case *js_ast.EDot:
+					tagIsLoweredOptionalChain = target.OptionalChain != js_ast.OptionalChainNone
+				case *js_ast.EIndex:
+					tagIsLoweredOptionalChain = target.OptionalChain != js_ast.OptionalChainNone
+				}
+			}
+
+			p.templateTag = e.TagOrNil.Data
+			tag, tagOut := p.visitExprInOut(e.TagOrNil, exprIn{storeThisArgForParentOptionalChain: tagIsLoweredOptionalChain})
+			e.TagOrNil = tag
+			tagThisFunc = tagOut.thisArgFunc
+			tagWrapFunc = tagOut.thisArgWrapFunc
+
+			// Copy the call side effect flag over if this is a known target
+			if id, ok := tag.Data.(*js_ast.EIdentifier); ok && p.symbols[id.Ref.InnerIndex].Flags.Has(ast.CallCanBeUnwrappedIfUnused) {
+				e.CanBeUnwrappedIfUnused = true
+			}
+
+			// The value of "this" must be manually preserved for private member
+			// accesses inside template tag expressions such as "this.#foo``".
+			// The private member "this.#foo" must see the value of "this".
+			if target, loc, private := p.extractPrivateIndex(e.TagOrNil); private != nil {
+				// "foo.#bar`123`" => "__privateGet(_a = foo, #bar).bind(_a)`123`"
+				targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(target.Loc, 2, target, valueCouldBeMutated)
+				e.TagOrNil = targetWrapFunc(js_ast.Expr{Loc: target.Loc, Data: &js_ast.ECall{
+					Target: js_ast.Expr{Loc: target.Loc, Data: &js_ast.EDot{
+						Target:  p.lowerPrivateGet(targetFunc(), loc, private),
+						Name:    "bind",
+						NameLoc: target.Loc,
+					}},
+					Args: []js_ast.Expr{targetFunc()},
+					Kind: js_ast.TargetWasOriginallyPropertyAccess,
+				}})
+			}
+		}
+
+		for i, part := range e.Parts {
+			e.Parts[i].Value = p.visitExpr(part.Value)
+		}
+
+		// When mangling, inline string values into the template literal. Note that
+		// it may no longer be a template literal after this point (it may turn into
+		// a plain string literal instead).
+		if p.shouldFoldTypeScriptConstantExpressions || p.options.minifySyntax {
+			expr = js_ast.InlinePrimitivesIntoTemplate(expr.Loc, e)
+		}
+
+		shouldLowerTemplateLiteral := p.options.unsupportedJSFeatures.Has(compat.TemplateLiteral)
+
+		// If the tag was originally an optional chaining property access, then
+		// we'll need to lower this template literal as well to preserve the value
+		// for "this".
+		if tagThisFunc != nil {
+			shouldLowerTemplateLiteral = true
+		}
+
+		// Lower tagged template literals that include "</script"
+		// since we won't be able to escape it without lowering it
+		if !shouldLowerTemplateLiteral && !p.options.unsupportedJSFeatures.Has(compat.InlineScript) && e.TagOrNil.Data != nil {
+			if containsClosingScriptTag(e.HeadRaw) {
+				shouldLowerTemplateLiteral = true
+			} else {
+				for _, part := range e.Parts {
+					if containsClosingScriptTag(part.TailRaw) {
+						shouldLowerTemplateLiteral = true
+						break
+					}
+				}
+			}
+		}
+
+		// Convert template literals to older syntax if this is still a template literal
+		if shouldLowerTemplateLiteral {
+			if e, ok := expr.Data.(*js_ast.ETemplate); ok {
+				return p.lowerTemplateLiteral(expr.Loc, e, tagThisFunc, tagWrapFunc), exprOut{}
+			}
+		}
+
+	case *js_ast.EBinary:
+		// The handling of binary expressions is convoluted because we're using
+		// iteration on the heap instead of recursion on the call stack to avoid
+		// stack overflow for deeply-nested ASTs. See the comment before the
+		// definition of "binaryExprVisitor" for details.
+		v := binaryExprVisitor{
+			e:   e,
+			loc: expr.Loc,
+			in:  in,
+		}
+
+		// Everything uses a single stack to reduce allocation overhead. This stack
+		// should almost always be very small, and almost all visits should reuse
+		// existing memory without allocating anything.
+		stackBottom := len(p.binaryExprStack)
+
+		// Iterate down into the AST along the left node of the binary operation.
+		// Continue iterating until we encounter something that's not a binary node.
+		for {
+			// Check whether this node is a special case. If it is, a result will be
+			// provided which ends our iteration. Otherwise, the visitor object will
+			// be prepared for visiting.
+			if result := v.checkAndPrepare(p); result.Data != nil {
+				expr = result
+				break
+			}
+
+			// Grab the arguments to our nested "visitExprInOut" call for the left
+			// node. We only care about deeply-nested left nodes because most binary
+			// operators in JavaScript are left-associative and the problematic edge
+			// cases we're trying to avoid crashing on have lots of left-associative
+			// binary operators chained together without parentheses (e.g. "1+2+...").
+			left := v.e.Left
+			leftIn := v.leftIn
+			leftBinary, ok := left.Data.(*js_ast.EBinary)
+
+			// Stop iterating if iteration doesn't apply to the left node. This checks
+			// the assignment target because "visitExprInOut" has additional behavior
+			// in that case that we don't want to miss (before the top-level "switch"
+			// statement).
+			if !ok || leftIn.assignTarget != js_ast.AssignTargetNone {
+				v.e.Left, _ = p.visitExprInOut(left, leftIn)
+				expr = v.visitRightAndFinish(p)
+				break
+			}
+
+			// Note that we only append to the stack (and therefore allocate memory
+			// on the heap) when there are nested binary expressions. A single binary
+			// expression doesn't add anything to the stack.
+			p.binaryExprStack = append(p.binaryExprStack, v)
+			v = binaryExprVisitor{
+				e:   leftBinary,
+				loc: left.Loc,
+				in:  leftIn,
+			}
+		}
+
+		// Process all binary operations from the deepest-visited node back toward
+		// our original top-level binary operation.
+		for {
+			n := len(p.binaryExprStack) - 1
+			if n < stackBottom {
+				break
+			}
+			v := p.binaryExprStack[n]
+			p.binaryExprStack = p.binaryExprStack[:n]
+			v.e.Left = expr
+			expr = v.visitRightAndFinish(p)
+		}
+
+		return expr, exprOut{}
+
+	case *js_ast.EDot:
+		isDeleteTarget := e == p.deleteTarget
+		isCallTarget := e == p.callTarget
+		isTemplateTag := e == p.templateTag
+
+		// Check both user-specified defines and known globals
+		if defines, ok := p.options.defines.DotDefines[e.Name]; ok {
+			for _, define := range defines {
+				if p.isDotOrIndexDefineMatch(expr, define.Parts) {
+					// Substitute user-specified defines
+					if define.Data.DefineExpr != nil {
+						new := p.instantiateDefineExpr(expr.Loc, *define.Data.DefineExpr, identifierOpts{
+							assignTarget:   in.assignTarget,
+							isCallTarget:   isCallTarget,
+							isDeleteTarget: isDeleteTarget,
+						})
+						if in.assignTarget == js_ast.AssignTargetNone || defineValueCanBeUsedInAssignTarget(new.Data) {
+							// Note: We don't need to "ignoreRef" on the underlying identifier
+							// because we have only parsed it but not visited it yet
+							return new, exprOut{}
+						} else {
+							r := logger.Range{Loc: expr.Loc, Len: js_lexer.RangeOfIdentifier(p.source, e.NameLoc).End() - expr.Loc.Start}
+							p.logAssignToDefine(r, "", expr)
+						}
+					}
+
+					// Copy the side effect flags over in case this expression is unused
+					if define.Data.Flags.Has(config.CanBeRemovedIfUnused) {
+						e.CanBeRemovedIfUnused = true
+					}
+					if define.Data.Flags.Has(config.CallCanBeUnwrappedIfUnused) && !p.options.ignoreDCEAnnotations {
+						e.CallCanBeUnwrappedIfUnused = true
+					}
+					if define.Data.Flags.Has(config.IsSymbolInstance) {
+						e.IsSymbolInstance = true
+					}
+					break
+				}
+			}
+		}
+
+		// Check injected dot names
+		if names, ok := p.injectedDotNames[e.Name]; ok {
+			for _, name := range names {
+				if p.isDotOrIndexDefineMatch(expr, name.parts) {
+					// Note: We don't need to "ignoreRef" on the underlying identifier
+					// because we have only parsed it but not visited it yet
+					return p.instantiateInjectDotName(expr.Loc, name, in.assignTarget), exprOut{}
+				}
+			}
+		}
+
+		// Track ".then().catch()" chains
+		if isCallTarget && p.thenCatchChain.nextTarget == e {
+			if e.Name == "catch" {
+				p.thenCatchChain = thenCatchChain{
+					nextTarget: e.Target.Data,
+					hasCatch:   true,
+					catchLoc:   e.NameLoc,
+				}
+			} else if e.Name == "then" {
+				p.thenCatchChain = thenCatchChain{
+					nextTarget: e.Target.Data,
+					hasCatch:   p.thenCatchChain.hasCatch || p.thenCatchChain.hasMultipleArgs,
+					catchLoc:   p.thenCatchChain.catchLoc,
+				}
+			}
+		}
+
+		p.dotOrIndexTarget = e.Target.Data
+		target, out := p.visitExprInOut(e.Target, exprIn{
+			hasChainParent: e.OptionalChain == js_ast.OptionalChainContinue,
+		})
+		e.Target = target
+
+		// Lower "super.prop" if necessary
+		if e.OptionalChain == js_ast.OptionalChainNone && in.assignTarget == js_ast.AssignTargetNone &&
+			!isCallTarget && p.shouldLowerSuperPropertyAccess(e.Target) {
+			// "super.foo" => "__superGet('foo')"
+			key := js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
+			value := p.lowerSuperPropertyGet(expr.Loc, key)
+			if isTemplateTag {
+				value.Data = &js_ast.ECall{
+					Target: js_ast.Expr{Loc: value.Loc, Data: &js_ast.EDot{
+						Target:  value,
+						Name:    "bind",
+						NameLoc: value.Loc,
+					}},
+					Args: []js_ast.Expr{{Loc: value.Loc, Data: js_ast.EThisShared}},
+					Kind: js_ast.TargetWasOriginallyPropertyAccess,
+				}
+			}
+			return value, exprOut{}
+		}
+
+		// Lower optional chaining if we're the top of the chain
+		containsOptionalChain := e.OptionalChain == js_ast.OptionalChainStart ||
+			(e.OptionalChain == js_ast.OptionalChainContinue && out.childContainsOptionalChain)
+		if containsOptionalChain && !in.hasChainParent {
+			return p.lowerOptionalChain(expr, in, out)
+		}
+
+		// Potentially rewrite this property access
+		out = exprOut{
+			childContainsOptionalChain:            containsOptionalChain,
+			methodCallMustBeReplacedWithUndefined: out.methodCallMustBeReplacedWithUndefined,
+			thisArgFunc:                           out.thisArgFunc,
+			thisArgWrapFunc:                       out.thisArgWrapFunc,
+		}
+		if !in.hasChainParent {
+			out.thisArgFunc = nil
+			out.thisArgWrapFunc = nil
+		}
+		if e.OptionalChain == js_ast.OptionalChainNone {
+			if value, ok := p.maybeRewritePropertyAccess(expr.Loc, in.assignTarget,
+				isDeleteTarget, e.Target, e.Name, e.NameLoc, isCallTarget, isTemplateTag, false); ok {
+				return value, out
+			}
+		}
+		return js_ast.Expr{Loc: expr.Loc, Data: e}, out
+
+	case *js_ast.EIndex:
+		isCallTarget := e == p.callTarget
+		isTemplateTag := e == p.templateTag
+		isDeleteTarget := e == p.deleteTarget
+
+		// Check both user-specified defines and known globals
+		if str, ok := e.Index.Data.(*js_ast.EString); ok {
+			if defines, ok := p.options.defines.DotDefines[helpers.UTF16ToString(str.Value)]; ok {
+				for _, define := range defines {
+					if p.isDotOrIndexDefineMatch(expr, define.Parts) {
+						// Substitute user-specified defines
+						if define.Data.DefineExpr != nil {
+							new := p.instantiateDefineExpr(expr.Loc, *define.Data.DefineExpr, identifierOpts{
+								assignTarget:   in.assignTarget,
+								isCallTarget:   isCallTarget,
+								isDeleteTarget: isDeleteTarget,
+							})
+							if in.assignTarget == js_ast.AssignTargetNone || defineValueCanBeUsedInAssignTarget(new.Data) {
+								// Note: We don't need to "ignoreRef" on the underlying identifier
+								// because we have only parsed it but not visited it yet
+								return new, exprOut{}
+							} else {
+								r := logger.Range{Loc: expr.Loc}
+								afterIndex := logger.Loc{Start: p.source.RangeOfString(e.Index.Loc).End()}
+								if closeBracket := p.source.RangeOfOperatorAfter(afterIndex, "]"); closeBracket.Len > 0 {
+									r.Len = closeBracket.End() - r.Loc.Start
+								}
+								p.logAssignToDefine(r, "", expr)
+							}
+						}
+
+						// Copy the side effect flags over in case this expression is unused
+						if define.Data.Flags.Has(config.CanBeRemovedIfUnused) {
+							e.CanBeRemovedIfUnused = true
+						}
+						if define.Data.Flags.Has(config.CallCanBeUnwrappedIfUnused) && !p.options.ignoreDCEAnnotations {
+							e.CallCanBeUnwrappedIfUnused = true
+						}
+						if define.Data.Flags.Has(config.IsSymbolInstance) {
+							e.IsSymbolInstance = true
+						}
+						break
+					}
+				}
+			}
+		}
+
+		// "a['b']" => "a.b"
+		if p.options.minifySyntax {
+			if str, ok := e.Index.Data.(*js_ast.EString); ok && js_ast.IsIdentifierUTF16(str.Value) {
+				dot := p.dotOrMangledPropParse(e.Target, js_lexer.MaybeSubstring{String: helpers.UTF16ToString(str.Value)}, e.Index.Loc, e.OptionalChain, wasOriginallyIndex)
+				if isCallTarget {
+					p.callTarget = dot
+				}
+				if isTemplateTag {
+					p.templateTag = dot
+				}
+				if isDeleteTarget {
+					p.deleteTarget = dot
+				}
+				return p.visitExprInOut(js_ast.Expr{Loc: expr.Loc, Data: dot}, in)
+			}
+		}
+
+		p.dotOrIndexTarget = e.Target.Data
+		target, out := p.visitExprInOut(e.Target, exprIn{
+			hasChainParent: e.OptionalChain == js_ast.OptionalChainContinue,
+		})
+		e.Target = target
+
+		// Special-case private identifiers
+		if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok {
+			name := p.loadNameFromRef(private.Ref)
+			result := p.findSymbol(e.Index.Loc, name)
+			private.Ref = result.ref
+
+			// Unlike regular identifiers, there are no unbound private identifiers
+			kind := p.symbols[result.ref.InnerIndex].Kind
+			if !kind.IsPrivate() {
+				r := logger.Range{Loc: e.Index.Loc, Len: int32(len(name))}
+				p.log.AddError(&p.tracker, r, fmt.Sprintf("Private name %q must be declared in an enclosing class", name))
+			} else {
+				var r logger.Range
+				var text string
+				if in.assignTarget != js_ast.AssignTargetNone && (kind == ast.SymbolPrivateMethod || kind == ast.SymbolPrivateStaticMethod) {
+					r = logger.Range{Loc: e.Index.Loc, Len: int32(len(name))}
+					text = fmt.Sprintf("Writing to read-only method %q will throw", name)
+				} else if in.assignTarget != js_ast.AssignTargetNone && (kind == ast.SymbolPrivateGet || kind == ast.SymbolPrivateStaticGet) {
+					r = logger.Range{Loc: e.Index.Loc, Len: int32(len(name))}
+					text = fmt.Sprintf("Writing to getter-only property %q will throw", name)
+				} else if in.assignTarget != js_ast.AssignTargetReplace && (kind == ast.SymbolPrivateSet || kind == ast.SymbolPrivateStaticSet) {
+					r = logger.Range{Loc: e.Index.Loc, Len: int32(len(name))}
+					text = fmt.Sprintf("Reading from setter-only property %q will throw", name)
+				}
+				if text != "" {
+					kind := logger.Warning
+					if p.suppressWarningsAboutWeirdCode {
+						kind = logger.Debug
+					}
+					p.log.AddID(logger.MsgID_JS_PrivateNameWillThrow, kind, &p.tracker, r, text)
+				}
+			}
+
+			// Lower private member access only if we're sure the target isn't needed
+			// for the value of "this" for a call expression. All other cases will be
+			// taken care of by the enclosing call expression.
+			if p.privateSymbolNeedsToBeLowered(private) && e.OptionalChain == js_ast.OptionalChainNone &&
+				in.assignTarget == js_ast.AssignTargetNone && !isCallTarget && !isTemplateTag {
+				// "foo.#bar" => "__privateGet(foo, #bar)"
+				return p.lowerPrivateGet(e.Target, e.Index.Loc, private), exprOut{}
+			}
+		} else {
+			e.Index, _ = p.visitExprInOut(e.Index, exprIn{
+				shouldMangleStringsAsProps: true,
+			})
+		}
+
+		// Lower "super[prop]" if necessary
+		if e.OptionalChain == js_ast.OptionalChainNone && in.assignTarget == js_ast.AssignTargetNone &&
+			!isCallTarget && p.shouldLowerSuperPropertyAccess(e.Target) {
+			// "super[foo]" => "__superGet(foo)"
+			value := p.lowerSuperPropertyGet(expr.Loc, e.Index)
+			if isTemplateTag {
+				value.Data = &js_ast.ECall{
+					Target: js_ast.Expr{Loc: value.Loc, Data: &js_ast.EDot{
+						Target:  value,
+						Name:    "bind",
+						NameLoc: value.Loc,
+					}},
+					Args: []js_ast.Expr{{Loc: value.Loc, Data: js_ast.EThisShared}},
+					Kind: js_ast.TargetWasOriginallyPropertyAccess,
+				}
+			}
+			return value, exprOut{}
+		}
+
+		// Lower optional chaining if we're the top of the chain
+		containsOptionalChain := e.OptionalChain == js_ast.OptionalChainStart ||
+			(e.OptionalChain == js_ast.OptionalChainContinue && out.childContainsOptionalChain)
+		if containsOptionalChain && !in.hasChainParent {
+			return p.lowerOptionalChain(expr, in, out)
+		}
+
+		// Potentially rewrite this property access
+		out = exprOut{
+			childContainsOptionalChain:            containsOptionalChain,
+			methodCallMustBeReplacedWithUndefined: out.methodCallMustBeReplacedWithUndefined,
+			thisArgFunc:                           out.thisArgFunc,
+			thisArgWrapFunc:                       out.thisArgWrapFunc,
+		}
+		if !in.hasChainParent {
+			out.thisArgFunc = nil
+			out.thisArgWrapFunc = nil
+		}
+		if str, ok := e.Index.Data.(*js_ast.EString); ok && e.OptionalChain == js_ast.OptionalChainNone {
+			preferQuotedKey := !p.options.minifySyntax
+			if value, ok := p.maybeRewritePropertyAccess(expr.Loc, in.assignTarget, isDeleteTarget,
+				e.Target, helpers.UTF16ToString(str.Value), e.Index.Loc, isCallTarget, isTemplateTag, preferQuotedKey); ok {
+				return value, out
+			}
+		}
+
+		// Create an error for assigning to an import namespace when bundling. Even
+		// though this is a run-time error, we make it a compile-time error when
+		// bundling because scope hoisting means these will no longer be run-time
+		// errors.
+		if p.options.mode == config.ModeBundle && (in.assignTarget != js_ast.AssignTargetNone || isDeleteTarget) {
+			if id, ok := e.Target.Data.(*js_ast.EIdentifier); ok && p.symbols[id.Ref.InnerIndex].Kind == ast.SymbolImport {
+				r := js_lexer.RangeOfIdentifier(p.source, e.Target.Loc)
+				p.log.AddErrorWithNotes(&p.tracker, r,
+					fmt.Sprintf("Cannot assign to property on import %q", p.symbols[id.Ref.InnerIndex].OriginalName),
+					[]logger.MsgData{{Text: "Imports are immutable in JavaScript. " +
+						"To modify the value of this import, you must export a setter function in the " +
+						"imported file and then import and call that function here instead."}})
+
+			}
+		}
+
+		if p.options.minifySyntax {
+			switch index := e.Index.Data.(type) {
+			case *js_ast.EString:
+				// "a['x' + 'y']" => "a.xy" (this is done late to allow for constant folding)
+				if js_ast.IsIdentifierUTF16(index.Value) {
+					return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EDot{
+						Target:                     e.Target,
+						Name:                       helpers.UTF16ToString(index.Value),
+						NameLoc:                    e.Index.Loc,
+						OptionalChain:              e.OptionalChain,
+						CanBeRemovedIfUnused:       e.CanBeRemovedIfUnused,
+						CallCanBeUnwrappedIfUnused: e.CallCanBeUnwrappedIfUnused,
+					}}, out
+				}
+
+				// "a['123']" => "a[123]" (this is done late to allow "'123'" to be mangled)
+				if numberValue, ok := js_ast.StringToEquivalentNumberValue(index.Value); ok {
+					e.Index.Data = &js_ast.ENumber{Value: numberValue}
+				}
+
+			case *js_ast.ENumber:
+				// "'abc'[1]" => "'b'"
+				if target, ok := e.Target.Data.(*js_ast.EString); ok {
+					if intValue := math.Floor(index.Value); index.Value == intValue && intValue >= 0 && intValue < float64(len(target.Value)) {
+						return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: []uint16{target.Value[int(intValue)]}}}, out
+					}
+				}
+			}
+		}
+
+		return js_ast.Expr{Loc: expr.Loc, Data: e}, out
+
+	case *js_ast.EUnary:
+		switch e.Op {
+		case js_ast.UnOpTypeof:
+			e.Value, _ = p.visitExprInOut(e.Value, exprIn{assignTarget: e.Op.UnaryAssignTarget()})
+
+			// Compile-time "typeof" evaluation
+			if typeof, ok := js_ast.TypeofWithoutSideEffects(e.Value.Data); ok {
+				return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(typeof)}}, exprOut{}
+			}
+
+		case js_ast.UnOpDelete:
+			// Warn about code that tries to do "delete super.foo"
+			var superPropLoc logger.Loc
+			switch e2 := e.Value.Data.(type) {
+			case *js_ast.EDot:
+				if _, ok := e2.Target.Data.(*js_ast.ESuper); ok {
+					superPropLoc = e2.Target.Loc
+				}
+			case *js_ast.EIndex:
+				if _, ok := e2.Target.Data.(*js_ast.ESuper); ok {
+					superPropLoc = e2.Target.Loc
+				}
+			case *js_ast.EIdentifier:
+				p.markStrictModeFeature(deleteBareName, js_lexer.RangeOfIdentifier(p.source, e.Value.Loc), "")
+			}
+			if superPropLoc.Start != 0 {
+				r := js_lexer.RangeOfIdentifier(p.source, superPropLoc)
+				text := "Attempting to delete a property of \"super\" will throw a ReferenceError"
+				kind := logger.Warning
+				if p.suppressWarningsAboutWeirdCode {
+					kind = logger.Debug
+				}
+				p.log.AddID(logger.MsgID_JS_DeleteSuperProperty, kind, &p.tracker, r, text)
+			}
+
+			p.deleteTarget = e.Value.Data
+			value, out := p.visitExprInOut(e.Value, exprIn{hasChainParent: true})
+			e.Value = value
+
+			// Lower optional chaining if present since we're guaranteed to be the
+			// end of the chain
+			if out.childContainsOptionalChain {
+				return p.lowerOptionalChain(expr, in, out)
+			}
+
+		default:
+			e.Value, _ = p.visitExprInOut(e.Value, exprIn{assignTarget: e.Op.UnaryAssignTarget()})
+
+			// Post-process the unary expression
+			switch e.Op {
+			case js_ast.UnOpNot:
+				if p.options.minifySyntax {
+					e.Value = p.astHelpers.SimplifyBooleanExpr(e.Value)
+				}
+
+				if boolean, sideEffects, ok := js_ast.ToBooleanWithSideEffects(e.Value.Data); ok && sideEffects == js_ast.NoSideEffects {
+					return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EBoolean{Value: !boolean}}, exprOut{}
+				}
+
+				if p.options.minifySyntax {
+					if result, ok := js_ast.MaybeSimplifyNot(e.Value); ok {
+						return result, exprOut{}
+					}
+				}
+
+			case js_ast.UnOpVoid:
+				if p.astHelpers.ExprCanBeRemovedIfUnused(e.Value) {
+					return js_ast.Expr{Loc: expr.Loc, Data: js_ast.EUndefinedShared}, exprOut{}
+				}
+
+			case js_ast.UnOpPos:
+				if number, ok := js_ast.ToNumberWithoutSideEffects(e.Value.Data); ok {
+					return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: number}}, exprOut{}
+				}
+
+			case js_ast.UnOpNeg:
+				if number, ok := js_ast.ToNumberWithoutSideEffects(e.Value.Data); ok {
+					return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: -number}}, exprOut{}
+				}
+
+			case js_ast.UnOpCpl:
+				if p.shouldFoldTypeScriptConstantExpressions || p.options.minifySyntax {
+					// Minification folds complement operations since they are unlikely to result in larger output
+					if number, ok := js_ast.ToNumberWithoutSideEffects(e.Value.Data); ok {
+						return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: float64(^js_ast.ToInt32(number))}}, exprOut{}
+					}
+				}
+
+				////////////////////////////////////////////////////////////////////////////////
+				// All assignment operators below here
+
+			case js_ast.UnOpPreDec, js_ast.UnOpPreInc, js_ast.UnOpPostDec, js_ast.UnOpPostInc:
+				if target, loc, private := p.extractPrivateIndex(e.Value); private != nil {
+					return p.lowerPrivateSetUnOp(target, loc, private, e.Op), exprOut{}
+				}
+				if property := p.extractSuperProperty(e.Value); property.Data != nil {
+					e.Value = p.callSuperPropertyWrapper(expr.Loc, property)
+				}
+			}
+		}
+
+		// "-(a, b)" => "a, -b"
+		if p.options.minifySyntax && e.Op != js_ast.UnOpDelete && e.Op != js_ast.UnOpTypeof {
+			if comma, ok := e.Value.Data.(*js_ast.EBinary); ok && comma.Op == js_ast.BinOpComma {
+				return js_ast.JoinWithComma(comma.Left, js_ast.Expr{
+					Loc: comma.Right.Loc,
+					Data: &js_ast.EUnary{
+						Op:    e.Op,
+						Value: comma.Right,
+					},
+				}), exprOut{}
+			}
+		}
+
+	case *js_ast.EIf:
+		e.Test = p.visitExpr(e.Test)
+
+		if p.options.minifySyntax {
+			e.Test = p.astHelpers.SimplifyBooleanExpr(e.Test)
+		}
+
+		// Propagate these flags into the branches
+		childIn := exprIn{
+			shouldMangleStringsAsProps: in.shouldMangleStringsAsProps,
+		}
+
+		// Fold constants
+		if boolean, sideEffects, ok := js_ast.ToBooleanWithSideEffects(e.Test.Data); !ok {
+			e.Yes, _ = p.visitExprInOut(e.Yes, childIn)
+			e.No, _ = p.visitExprInOut(e.No, childIn)
+		} else {
+			// Mark the control flow as dead if the branch is never taken
+			if boolean {
+				// "true ? live : dead"
+				e.Yes, _ = p.visitExprInOut(e.Yes, childIn)
+				old := p.isControlFlowDead
+				p.isControlFlowDead = true
+				e.No, _ = p.visitExprInOut(e.No, childIn)
+				p.isControlFlowDead = old
+
+				if p.options.minifySyntax {
+					// "(a, true) ? b : c" => "a, b"
+					if sideEffects == js_ast.CouldHaveSideEffects {
+						return js_ast.JoinWithComma(p.astHelpers.SimplifyUnusedExpr(e.Test, p.options.unsupportedJSFeatures), e.Yes), exprOut{}
+					}
+
+					return e.Yes, exprOut{}
+				}
+			} else {
+				// "false ? dead : live"
+				old := p.isControlFlowDead
+				p.isControlFlowDead = true
+				e.Yes, _ = p.visitExprInOut(e.Yes, childIn)
+				p.isControlFlowDead = old
+				e.No, _ = p.visitExprInOut(e.No, childIn)
+
+				if p.options.minifySyntax {
+					// "(a, false) ? b : c" => "a, c"
+					if sideEffects == js_ast.CouldHaveSideEffects {
+						return js_ast.JoinWithComma(p.astHelpers.SimplifyUnusedExpr(e.Test, p.options.unsupportedJSFeatures), e.No), exprOut{}
+					}
+
+					return e.No, exprOut{}
+				}
+			}
+		}
+
+		if p.options.minifySyntax {
+			return p.astHelpers.MangleIfExpr(expr.Loc, e, p.options.unsupportedJSFeatures), exprOut{}
+		}
+
+	case *js_ast.EAwait:
+		// Silently remove unsupported top-level "await" in dead code branches
+		if p.fnOrArrowDataVisit.isOutsideFnOrArrow {
+			if p.isControlFlowDead && (p.options.unsupportedJSFeatures.Has(compat.TopLevelAwait) || !p.options.outputFormat.KeepESMImportExportSyntax()) {
+				return p.visitExprInOut(e.Value, in)
+			} else {
+				p.liveTopLevelAwaitKeyword = logger.Range{Loc: expr.Loc, Len: 5}
+				p.markSyntaxFeature(compat.TopLevelAwait, logger.Range{Loc: expr.Loc, Len: 5})
+			}
+		}
+
+		p.awaitTarget = e.Value.Data
+		e.Value = p.visitExpr(e.Value)
+
+		// "await" expressions turn into "yield" expressions when lowering
+		return p.maybeLowerAwait(expr.Loc, e), exprOut{}
+
+	case *js_ast.EYield:
+		if e.ValueOrNil.Data != nil {
+			e.ValueOrNil = p.visitExpr(e.ValueOrNil)
+		}
+
+		// "yield* x" turns into "yield* __yieldStar(x)" when lowering async generator functions
+		if e.IsStar && p.options.unsupportedJSFeatures.Has(compat.AsyncGenerator) && p.fnOrArrowDataVisit.isGenerator {
+			e.ValueOrNil = p.callRuntime(expr.Loc, "__yieldStar", []js_ast.Expr{e.ValueOrNil})
+		}
+
+	case *js_ast.EArray:
+		if in.assignTarget != js_ast.AssignTargetNone {
+			if e.CommaAfterSpread.Start != 0 {
+				p.log.AddError(&p.tracker, logger.Range{Loc: e.CommaAfterSpread, Len: 1}, "Unexpected \",\" after rest pattern")
+			}
+			p.markSyntaxFeature(compat.Destructuring, logger.Range{Loc: expr.Loc, Len: 1})
+		}
+		hasSpread := false
+		for i, item := range e.Items {
+			switch e2 := item.Data.(type) {
+			case *js_ast.EMissing:
+			case *js_ast.ESpread:
+				e2.Value, _ = p.visitExprInOut(e2.Value, exprIn{assignTarget: in.assignTarget})
+				hasSpread = true
+			case *js_ast.EBinary:
+				if in.assignTarget != js_ast.AssignTargetNone && e2.Op == js_ast.BinOpAssign {
+					e2.Left, _ = p.visitExprInOut(e2.Left, exprIn{assignTarget: js_ast.AssignTargetReplace})
+
+					// Propagate the name to keep from the binding into the initializer
+					if id, ok := e2.Left.Data.(*js_ast.EIdentifier); ok {
+						p.nameToKeep = p.symbols[id.Ref.InnerIndex].OriginalName
+						p.nameToKeepIsFor = e2.Right.Data
+					}
+
+					e2.Right = p.visitExpr(e2.Right)
+				} else {
+					item, _ = p.visitExprInOut(item, exprIn{assignTarget: in.assignTarget})
+				}
+			default:
+				item, _ = p.visitExprInOut(item, exprIn{assignTarget: in.assignTarget})
+			}
+			e.Items[i] = item
+		}
+
+		// "[1, ...[2, 3], 4]" => "[1, 2, 3, 4]"
+		if p.options.minifySyntax && hasSpread && in.assignTarget == js_ast.AssignTargetNone {
+			e.Items = js_ast.InlineSpreadsOfArrayLiterals(e.Items)
+		}
+
+	case *js_ast.EObject:
+		if in.assignTarget != js_ast.AssignTargetNone {
+			if e.CommaAfterSpread.Start != 0 {
+				p.log.AddError(&p.tracker, logger.Range{Loc: e.CommaAfterSpread, Len: 1}, "Unexpected \",\" after rest pattern")
+			}
+			p.markSyntaxFeature(compat.Destructuring, logger.Range{Loc: expr.Loc, Len: 1})
+		}
+
+		hasSpread := false
+		protoRange := logger.Range{}
+		innerClassNameRef := ast.InvalidRef
+
+		for i := range e.Properties {
+			property := &e.Properties[i]
+
+			if property.Kind != js_ast.PropertySpread {
+				key := property.Key
+				if mangled, ok := key.Data.(*js_ast.ENameOfSymbol); ok {
+					mangled.Ref = p.symbolForMangledProp(p.loadNameFromRef(mangled.Ref))
+				} else {
+					key, _ = p.visitExprInOut(property.Key, exprIn{
+						shouldMangleStringsAsProps: true,
+					})
+					property.Key = key
+				}
+
+				// Forbid duplicate "__proto__" properties according to the specification
+				if !property.Flags.Has(js_ast.PropertyIsComputed) && !property.Flags.Has(js_ast.PropertyWasShorthand) &&
+					property.Kind == js_ast.PropertyField && in.assignTarget == js_ast.AssignTargetNone {
+					if str, ok := key.Data.(*js_ast.EString); ok && helpers.UTF16EqualsString(str.Value, "__proto__") {
+						r := js_lexer.RangeOfIdentifier(p.source, key.Loc)
+						if protoRange.Len > 0 {
+							p.log.AddErrorWithNotes(&p.tracker, r,
+								"Cannot specify the \"__proto__\" property more than once per object",
+								[]logger.MsgData{p.tracker.MsgData(protoRange, "The earlier \"__proto__\" property is here:")})
+						} else {
+							protoRange = r
+						}
+					}
+				}
+
+				// "{['x']: y}" => "{x: y}"
+				if p.options.minifySyntax && property.Flags.Has(js_ast.PropertyIsComputed) {
+					if inlined, ok := key.Data.(*js_ast.EInlinedEnum); ok {
+						switch inlined.Value.Data.(type) {
+						case *js_ast.EString, *js_ast.ENumber:
+							key.Data = inlined.Value.Data
+							property.Key.Data = key.Data
+						}
+					}
+					switch k := key.Data.(type) {
+					case *js_ast.ENumber, *js_ast.ENameOfSymbol:
+						property.Flags &= ^js_ast.PropertyIsComputed
+					case *js_ast.EString:
+						if !helpers.UTF16EqualsString(k.Value, "__proto__") {
+							property.Flags &= ^js_ast.PropertyIsComputed
+						}
+					}
+				}
+			} else {
+				hasSpread = true
+			}
+
+			// Extract the initializer for expressions like "({ a: b = c } = d)"
+			if in.assignTarget != js_ast.AssignTargetNone && property.InitializerOrNil.Data == nil && property.ValueOrNil.Data != nil {
+				if binary, ok := property.ValueOrNil.Data.(*js_ast.EBinary); ok && binary.Op == js_ast.BinOpAssign {
+					property.InitializerOrNil = binary.Right
+					property.ValueOrNil = binary.Left
+				}
+			}
+
+			if property.ValueOrNil.Data != nil {
+				oldIsInStaticClassContext := p.fnOnlyDataVisit.isInStaticClassContext
+				oldInnerClassNameRef := p.fnOnlyDataVisit.innerClassNameRef
+
+				// If this is an async method and async methods are unsupported,
+				// generate a temporary variable in case this async method contains a
+				// "super" property reference. If that happens, the "super" expression
+				// must be lowered which will need a reference to this object literal.
+				if property.Kind == js_ast.PropertyMethod && p.options.unsupportedJSFeatures.Has(compat.AsyncAwait) {
+					if fn, ok := property.ValueOrNil.Data.(*js_ast.EFunction); ok && fn.Fn.IsAsync {
+						if innerClassNameRef == ast.InvalidRef {
+							innerClassNameRef = p.generateTempRef(tempRefNeedsDeclareMayBeCapturedInsideLoop, "")
+						}
+						p.fnOnlyDataVisit.isInStaticClassContext = true
+						p.fnOnlyDataVisit.innerClassNameRef = &innerClassNameRef
+					}
+				}
+
+				// Propagate the name to keep from the property into the value
+				if str, ok := property.Key.Data.(*js_ast.EString); ok {
+					p.nameToKeep = helpers.UTF16ToString(str.Value)
+					p.nameToKeepIsFor = property.ValueOrNil.Data
+				}
+
+				property.ValueOrNil, _ = p.visitExprInOut(property.ValueOrNil, exprIn{
+					isMethod:     property.Kind.IsMethodDefinition(),
+					assignTarget: in.assignTarget,
+				})
+
+				p.fnOnlyDataVisit.innerClassNameRef = oldInnerClassNameRef
+				p.fnOnlyDataVisit.isInStaticClassContext = oldIsInStaticClassContext
+			}
+
+			if property.InitializerOrNil.Data != nil {
+				// Propagate the name to keep from the binding into the initializer
+				if id, ok := property.ValueOrNil.Data.(*js_ast.EIdentifier); ok {
+					p.nameToKeep = p.symbols[id.Ref.InnerIndex].OriginalName
+					p.nameToKeepIsFor = property.InitializerOrNil.Data
+				}
+
+				property.InitializerOrNil = p.visitExpr(property.InitializerOrNil)
+			}
+
+			// "{ '123': 4 }" => "{ 123: 4 }" (this is done late to allow "'123'" to be mangled)
+			if p.options.minifySyntax {
+				if str, ok := property.Key.Data.(*js_ast.EString); ok {
+					if numberValue, ok := js_ast.StringToEquivalentNumberValue(str.Value); ok && numberValue >= 0 {
+						property.Key.Data = &js_ast.ENumber{Value: numberValue}
+					}
+				}
+			}
+		}
+
+		// Check for and warn about duplicate keys in object literals
+		if !p.suppressWarningsAboutWeirdCode {
+			p.warnAboutDuplicateProperties(e.Properties, duplicatePropertiesInObject)
+		}
+
+		if in.assignTarget == js_ast.AssignTargetNone {
+			// "{a, ...{b, c}, d}" => "{a, b, c, d}"
+			if p.options.minifySyntax && hasSpread {
+				e.Properties = js_ast.MangleObjectSpread(e.Properties)
+			}
+
+			// Object expressions represent both object literals and binding patterns.
+			// Only lower object spread if we're an object literal, not a binding pattern.
+			value := p.lowerObjectSpread(expr.Loc, e)
+
+			// If we generated and used the temporary variable for a lowered "super"
+			// property reference inside a lowered "async" method, then initialize
+			// the temporary with this object literal.
+			if innerClassNameRef != ast.InvalidRef && p.symbols[innerClassNameRef.InnerIndex].UseCountEstimate > 0 {
+				p.recordUsage(innerClassNameRef)
+				value = js_ast.Assign(js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: innerClassNameRef}}, value)
+			}
+
+			return value, exprOut{}
+		}
+
+	case *js_ast.EImportCall:
+		isAwaitTarget := e == p.awaitTarget
+		isThenCatchTarget := e == p.thenCatchChain.nextTarget && p.thenCatchChain.hasCatch
+		e.Expr = p.visitExpr(e.Expr)
+
+		var assertOrWith *ast.ImportAssertOrWith
+		var flags ast.ImportRecordFlags
+		if e.OptionsOrNil.Data != nil {
+			e.OptionsOrNil = p.visitExpr(e.OptionsOrNil)
+
+			// If there's an additional argument, this can't be split because the
+			// additional argument requires evaluation and our AST nodes can't be
+			// reused in different places in the AST (e.g. function scopes must be
+			// unique). Also the additional argument may have side effects and we
+			// don't currently account for that.
+			why := "the second argument was not an object literal"
+			whyLoc := e.OptionsOrNil.Loc
+
+			// However, make a special case for an additional argument that contains
+			// only an "assert" or a "with" clause. In that case we can split this
+			// AST node.
+			if object, ok := e.OptionsOrNil.Data.(*js_ast.EObject); ok {
+				if len(object.Properties) == 1 {
+					if prop := object.Properties[0]; prop.Kind == js_ast.PropertyField && !prop.Flags.Has(js_ast.PropertyIsComputed) {
+						if str, ok := prop.Key.Data.(*js_ast.EString); ok && (helpers.UTF16EqualsString(str.Value, "assert") || helpers.UTF16EqualsString(str.Value, "with")) {
+							keyword := ast.WithKeyword
+							if helpers.UTF16EqualsString(str.Value, "assert") {
+								keyword = ast.AssertKeyword
+							}
+							if value, ok := prop.ValueOrNil.Data.(*js_ast.EObject); ok {
+								entries := []ast.AssertOrWithEntry{}
+								for _, p := range value.Properties {
+									if p.Kind == js_ast.PropertyField && !p.Flags.Has(js_ast.PropertyIsComputed) {
+										if key, ok := p.Key.Data.(*js_ast.EString); ok {
+											if value, ok := p.ValueOrNil.Data.(*js_ast.EString); ok {
+												entries = append(entries, ast.AssertOrWithEntry{
+													Key:             key.Value,
+													KeyLoc:          p.Key.Loc,
+													Value:           value.Value,
+													ValueLoc:        p.ValueOrNil.Loc,
+													PreferQuotedKey: p.Flags.Has(js_ast.PropertyPreferQuotedKey),
+												})
+												if keyword == ast.AssertKeyword && helpers.UTF16EqualsString(key.Value, "type") && helpers.UTF16EqualsString(value.Value, "json") {
+													flags |= ast.AssertTypeJSON
+												}
+												continue
+											} else {
+												why = fmt.Sprintf("the value for the property %q was not a string literal",
+													helpers.UTF16ToString(key.Value))
+												whyLoc = p.ValueOrNil.Loc
+											}
+										} else {
+											why = "this property was not a string literal"
+											whyLoc = p.Key.Loc
+										}
+									} else {
+										why = "this property was invalid"
+										whyLoc = p.Key.Loc
+									}
+									entries = nil
+									break
+								}
+								if entries != nil {
+									if keyword == ast.AssertKeyword {
+										p.maybeWarnAboutAssertKeyword(prop.Key.Loc)
+									}
+									assertOrWith = &ast.ImportAssertOrWith{
+										Entries:            entries,
+										Keyword:            keyword,
+										KeywordLoc:         prop.Key.Loc,
+										InnerOpenBraceLoc:  prop.ValueOrNil.Loc,
+										InnerCloseBraceLoc: value.CloseBraceLoc,
+										OuterOpenBraceLoc:  e.OptionsOrNil.Loc,
+										OuterCloseBraceLoc: object.CloseBraceLoc,
+									}
+									why = ""
+								}
+							} else {
+								why = "the value for \"assert\" was not an object literal"
+								whyLoc = prop.ValueOrNil.Loc
+							}
+						} else {
+							why = "this property was not called \"assert\" or \"with\""
+							whyLoc = prop.Key.Loc
+						}
+					} else {
+						why = "this property was invalid"
+						whyLoc = prop.Key.Loc
+					}
+				} else {
+					why = "the second argument was not an object literal with a single property called \"assert\" or \"with\""
+					whyLoc = e.OptionsOrNil.Loc
+				}
+			}
+
+			// Handle the case that isn't just an import assertion or attribute clause
+			if why != "" {
+				// Only warn when bundling
+				if p.options.mode == config.ModeBundle {
+					text := "This \"import()\" was not recognized because " + why
+					kind := logger.Warning
+					if p.suppressWarningsAboutWeirdCode {
+						kind = logger.Debug
+					}
+					p.log.AddID(logger.MsgID_JS_UnsupportedDynamicImport, kind, &p.tracker, logger.Range{Loc: whyLoc}, text)
+				}
+
+				// If import assertions and/attributes are both not supported in the
+				// target platform, then "import()" cannot accept a second argument
+				// and keeping them would be a syntax error, so we need to get rid of
+				// them. We can't just not print them because they may have important
+				// side effects. Attempt to discard them without changing side effects
+				// and generate an error if that isn't possible.
+				if p.options.unsupportedJSFeatures.Has(compat.ImportAssertions) && p.options.unsupportedJSFeatures.Has(compat.ImportAttributes) {
+					if p.astHelpers.ExprCanBeRemovedIfUnused(e.OptionsOrNil) {
+						e.OptionsOrNil = js_ast.Expr{}
+					} else {
+						p.markSyntaxFeature(compat.ImportAttributes, logger.Range{Loc: e.OptionsOrNil.Loc})
+					}
+				}
+
+				// Stop now so we don't try to split "?:" expressions below and
+				// potentially end up with an AST node reused multiple times
+				break
+			}
+		}
+
+		return p.maybeTransposeIfExprChain(e.Expr, func(arg js_ast.Expr) js_ast.Expr {
+			// The argument must be a string
+			if str, ok := arg.Data.(*js_ast.EString); ok {
+				// Ignore calls to import() if the control flow is provably dead here.
+				// We don't want to spend time scanning the required files if they will
+				// never be used.
+				if p.isControlFlowDead {
+					return js_ast.Expr{Loc: arg.Loc, Data: js_ast.ENullShared}
+				}
+
+				importRecordIndex := p.addImportRecord(ast.ImportDynamic, p.source.RangeOfString(arg.Loc), helpers.UTF16ToString(str.Value), assertOrWith, flags)
+				if isAwaitTarget && p.fnOrArrowDataVisit.tryBodyCount != 0 {
+					record := &p.importRecords[importRecordIndex]
+					record.Flags |= ast.HandlesImportErrors
+					record.ErrorHandlerLoc = p.fnOrArrowDataVisit.tryCatchLoc
+				} else if isThenCatchTarget {
+					record := &p.importRecords[importRecordIndex]
+					record.Flags |= ast.HandlesImportErrors
+					record.ErrorHandlerLoc = p.thenCatchChain.catchLoc
+				}
+				p.importRecordsForCurrentPart = append(p.importRecordsForCurrentPart, importRecordIndex)
+				return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EImportString{
+					ImportRecordIndex: importRecordIndex,
+					CloseParenLoc:     e.CloseParenLoc,
+				}}
+			}
+
+			// Handle glob patterns
+			if p.options.mode == config.ModeBundle {
+				if value := p.handleGlobPattern(arg, ast.ImportDynamic, "globImport", assertOrWith); value.Data != nil {
+					return value
+				}
+			}
+
+			// Use a debug log so people can see this if they want to
+			r := js_lexer.RangeOfIdentifier(p.source, expr.Loc)
+			p.log.AddID(logger.MsgID_JS_UnsupportedDynamicImport, logger.Debug, &p.tracker, r,
+				"This \"import\" expression will not be bundled because the argument is not a string literal")
+
+			// We need to convert this into a call to "require()" if ES6 syntax is
+			// not supported in the current output format. The full conversion:
+			//
+			//   Before:
+			//     import(foo)
+			//
+			//   After:
+			//     Promise.resolve().then(() => __toESM(require(foo)))
+			//
+			// This is normally done by the printer since we don't know during the
+			// parsing stage whether this module is external or not. However, it's
+			// guaranteed to be external if the argument isn't a string. We handle
+			// this case here instead of in the printer because both the printer
+			// and the linker currently need an import record to handle this case
+			// correctly, and you need a string literal to get an import record.
+			if p.options.unsupportedJSFeatures.Has(compat.DynamicImport) {
+				var then js_ast.Expr
+				value := p.callRuntime(arg.Loc, "__toESM", []js_ast.Expr{{Loc: expr.Loc, Data: &js_ast.ECall{
+					Target:        p.valueToSubstituteForRequire(expr.Loc),
+					Args:          []js_ast.Expr{arg},
+					CloseParenLoc: e.CloseParenLoc,
+				}}})
+				body := js_ast.FnBody{Loc: expr.Loc, Block: js_ast.SBlock{Stmts: []js_ast.Stmt{{Loc: expr.Loc, Data: &js_ast.SReturn{ValueOrNil: value}}}}}
+				if p.options.unsupportedJSFeatures.Has(compat.Arrow) {
+					then = js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EFunction{Fn: js_ast.Fn{Body: body}}}
+				} else {
+					then = js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EArrow{Body: body, PreferExpr: true}}
+				}
+				return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ECall{
+					Target: js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EDot{
+						Target: js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ECall{
+							Target: js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EDot{
+								Target:  js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: p.makePromiseRef()}},
+								Name:    "resolve",
+								NameLoc: expr.Loc,
+							}},
+							Kind: js_ast.TargetWasOriginallyPropertyAccess,
+						}},
+						Name:    "then",
+						NameLoc: expr.Loc,
+					}},
+					Args: []js_ast.Expr{then},
+					Kind: js_ast.TargetWasOriginallyPropertyAccess,
+				}}
+			}
+
+			return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EImportCall{
+				Expr:          arg,
+				OptionsOrNil:  e.OptionsOrNil,
+				CloseParenLoc: e.CloseParenLoc,
+			}}
+		}), exprOut{}
+
+	case *js_ast.ECall:
+		p.callTarget = e.Target.Data
+
+		// Track ".then().catch()" chains
+		p.thenCatchChain = thenCatchChain{
+			nextTarget:      e.Target.Data,
+			hasMultipleArgs: len(e.Args) >= 2,
+			hasCatch:        p.thenCatchChain.nextTarget == e && p.thenCatchChain.hasCatch,
+			catchLoc:        p.thenCatchChain.catchLoc,
+		}
+		if p.thenCatchChain.hasMultipleArgs {
+			p.thenCatchChain.catchLoc = e.Args[1].Loc
+		}
+
+		// Prepare to recognize "require.resolve()" and "Object.create" calls
+		couldBeRequireResolve := false
+		couldBeObjectCreate := false
+		if len(e.Args) == 1 {
+			if dot, ok := e.Target.Data.(*js_ast.EDot); ok && dot.OptionalChain == js_ast.OptionalChainNone {
+				if p.options.mode != config.ModePassThrough && dot.Name == "resolve" {
+					couldBeRequireResolve = true
+				} else if dot.Name == "create" {
+					couldBeObjectCreate = true
+				}
+			}
+		}
+
+		wasIdentifierBeforeVisit := false
+		isParenthesizedOptionalChain := false
+		switch e2 := e.Target.Data.(type) {
+		case *js_ast.EIdentifier:
+			wasIdentifierBeforeVisit = true
+		case *js_ast.EDot:
+			isParenthesizedOptionalChain = e.OptionalChain == js_ast.OptionalChainNone && e2.OptionalChain != js_ast.OptionalChainNone
+		case *js_ast.EIndex:
+			isParenthesizedOptionalChain = e.OptionalChain == js_ast.OptionalChainNone && e2.OptionalChain != js_ast.OptionalChainNone
+		}
+		target, out := p.visitExprInOut(e.Target, exprIn{
+			hasChainParent: e.OptionalChain == js_ast.OptionalChainContinue,
+
+			// Signal to our child if this is an ECall at the start of an optional
+			// chain. If so, the child will need to stash the "this" context for us
+			// that we need for the ".call(this, ...args)".
+			storeThisArgForParentOptionalChain: e.OptionalChain == js_ast.OptionalChainStart || isParenthesizedOptionalChain,
+		})
+		e.Target = target
+		p.warnAboutImportNamespaceCall(e.Target, exprKindCall)
+
+		hasSpread := false
+		oldIsControlFlowDead := p.isControlFlowDead
+
+		// If we're removing this call, don't count any arguments as symbol uses
+		if out.methodCallMustBeReplacedWithUndefined {
+			if js_ast.IsPropertyAccess(e.Target) {
+				p.isControlFlowDead = true
+			} else {
+				out.methodCallMustBeReplacedWithUndefined = false
+			}
+		}
+
+		// Visit the arguments
+		for i, arg := range e.Args {
+			arg = p.visitExpr(arg)
+			if _, ok := arg.Data.(*js_ast.ESpread); ok {
+				hasSpread = true
+			}
+			e.Args[i] = arg
+		}
+
+		// Mark side-effect free IIFEs with "/* @__PURE__ */"
+		if !e.CanBeUnwrappedIfUnused {
+			switch target := e.Target.Data.(type) {
+			case *js_ast.EArrow:
+				if !target.IsAsync && p.iifeCanBeRemovedIfUnused(target.Args, target.Body) {
+					e.CanBeUnwrappedIfUnused = true
+				}
+			case *js_ast.EFunction:
+				if !target.Fn.IsAsync && !target.Fn.IsGenerator && p.iifeCanBeRemovedIfUnused(target.Fn.Args, target.Fn.Body) {
+					e.CanBeUnwrappedIfUnused = true
+				}
+			}
+		}
+
+		// Our hack for reading Yarn PnP files is implemented here:
+		if p.options.decodeHydrateRuntimeStateYarnPnP {
+			if id, ok := e.Target.Data.(*js_ast.EIdentifier); ok && p.symbols[id.Ref.InnerIndex].OriginalName == "hydrateRuntimeState" && len(e.Args) >= 1 {
+				switch arg := e.Args[0].Data.(type) {
+				case *js_ast.EObject:
+					// "hydrateRuntimeState(<object literal>)"
+					if arg := e.Args[0]; isValidJSON(arg) {
+						p.manifestForYarnPnP = arg
+					}
+
+				case *js_ast.ECall:
+					// "hydrateRuntimeState(JSON.parse(<something>))"
+					if len(arg.Args) == 1 {
+						if dot, ok := arg.Target.Data.(*js_ast.EDot); ok && dot.Name == "parse" {
+							if id, ok := dot.Target.Data.(*js_ast.EIdentifier); ok {
+								if symbol := &p.symbols[id.Ref.InnerIndex]; symbol.Kind == ast.SymbolUnbound && symbol.OriginalName == "JSON" {
+									arg := arg.Args[0]
+									switch a := arg.Data.(type) {
+									case *js_ast.EString:
+										// "hydrateRuntimeState(JSON.parse(<string literal>))"
+										source := logger.Source{KeyPath: p.source.KeyPath, Contents: helpers.UTF16ToString(a.Value)}
+										stringInJSTable := logger.GenerateStringInJSTable(p.source.Contents, arg.Loc, source.Contents)
+										log := logger.NewStringInJSLog(p.log, &p.tracker, stringInJSTable)
+										p.manifestForYarnPnP, _ = ParseJSON(log, source, JSONOptions{})
+										remapExprLocsInJSON(&p.manifestForYarnPnP, stringInJSTable)
+
+									case *js_ast.EIdentifier:
+										// "hydrateRuntimeState(JSON.parse(<identifier>))"
+										if data, ok := p.stringLocalsForYarnPnP[a.Ref]; ok {
+											source := logger.Source{KeyPath: p.source.KeyPath, Contents: helpers.UTF16ToString(data.value)}
+											stringInJSTable := logger.GenerateStringInJSTable(p.source.Contents, data.loc, source.Contents)
+											log := logger.NewStringInJSLog(p.log, &p.tracker, stringInJSTable)
+											p.manifestForYarnPnP, _ = ParseJSON(log, source, JSONOptions{})
+											remapExprLocsInJSON(&p.manifestForYarnPnP, stringInJSTable)
+										}
+									}
+								}
+							}
+						}
+					}
+				}
+			}
+		}
+
+		// Stop now if this call must be removed
+		if out.methodCallMustBeReplacedWithUndefined {
+			p.isControlFlowDead = oldIsControlFlowDead
+			return js_ast.Expr{Loc: expr.Loc, Data: js_ast.EUndefinedShared}, exprOut{}
+		}
+
+		// "foo(1, ...[2, 3], 4)" => "foo(1, 2, 3, 4)"
+		if p.options.minifySyntax && hasSpread {
+			e.Args = js_ast.InlineSpreadsOfArrayLiterals(e.Args)
+		}
+
+		switch t := target.Data.(type) {
+		case *js_ast.EImportIdentifier:
+			// If this function is inlined, allow it to be tree-shaken
+			if p.options.minifySyntax && !p.isControlFlowDead {
+				p.convertSymbolUseToCall(t.Ref, len(e.Args) == 1 && !hasSpread)
+			}
+
+		case *js_ast.EIdentifier:
+			// Detect if this is a direct eval. Note that "(1 ? eval : 0)(x)" will
+			// become "eval(x)" after we visit the target due to dead code elimination,
+			// but that doesn't mean it should become a direct eval.
+			//
+			// Note that "eval?.(x)" is considered an indirect eval. There was debate
+			// about this after everyone implemented it as a direct eval, but the
+			// language committee said it was indirect and everyone had to change it:
+			// https://github.com/tc39/ecma262/issues/2062.
+			if e.OptionalChain == js_ast.OptionalChainNone {
+				symbol := p.symbols[t.Ref.InnerIndex]
+				if wasIdentifierBeforeVisit && symbol.OriginalName == "eval" {
+					e.Kind = js_ast.DirectEval
+
+					// Pessimistically assume that if this looks like a CommonJS module
+					// (e.g. no "export" keywords), a direct call to "eval" means that
+					// code could potentially access "module" or "exports".
+					if p.options.mode == config.ModeBundle && !p.isFileConsideredToHaveESMExports {
+						p.recordUsage(p.moduleRef)
+						p.recordUsage(p.exportsRef)
+					}
+
+					// Mark this scope and all parent scopes as containing a direct eval.
+					// This will prevent us from renaming any symbols.
+					for s := p.currentScope; s != nil; s = s.Parent {
+						s.ContainsDirectEval = true
+					}
+
+					// Warn when direct eval is used in an ESM file. There is no way we
+					// can guarantee that this will work correctly for top-level imported
+					// and exported symbols due to scope hoisting. Except don't warn when
+					// this code is in a 3rd-party library because there's nothing people
+					// will be able to do about the warning.
+					text := "Using direct eval with a bundler is not recommended and may cause problems"
+					kind := logger.Debug
+					if p.options.mode == config.ModeBundle && p.isFileConsideredESM && !p.suppressWarningsAboutWeirdCode {
+						kind = logger.Warning
+					}
+					p.log.AddIDWithNotes(logger.MsgID_JS_DirectEval, kind, &p.tracker, js_lexer.RangeOfIdentifier(p.source, e.Target.Loc), text,
+						[]logger.MsgData{{Text: "You can read more about direct eval and bundling here: https://esbuild.github.io/link/direct-eval"}})
+				} else if symbol.Flags.Has(ast.CallCanBeUnwrappedIfUnused) {
+					// Automatically add a "/* @__PURE__ */" comment to file-local calls
+					// of functions declared with a "/* @__NO_SIDE_EFFECTS__ */" comment
+					t.CallCanBeUnwrappedIfUnused = true
+				}
+			}
+
+			// Optimize references to global constructors
+			if p.options.minifySyntax && t.CanBeRemovedIfUnused && len(e.Args) <= 1 && !hasSpread {
+				if symbol := &p.symbols[t.Ref.InnerIndex]; symbol.Kind == ast.SymbolUnbound {
+					// Note: We construct expressions by assigning to "expr.Data" so
+					// that the source map position for the constructor is preserved
+					switch symbol.OriginalName {
+					case "Boolean":
+						if len(e.Args) == 0 {
+							return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EBoolean{Value: false}}, exprOut{}
+						} else {
+							expr.Data = &js_ast.EUnary{Value: p.astHelpers.SimplifyBooleanExpr(e.Args[0]), Op: js_ast.UnOpNot}
+							return js_ast.Not(expr), exprOut{}
+						}
+
+					case "Number":
+						if len(e.Args) == 0 {
+							return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: 0}}, exprOut{}
+						} else {
+							arg := e.Args[0]
+
+							switch js_ast.KnownPrimitiveType(arg.Data) {
+							case js_ast.PrimitiveNumber:
+								return arg, exprOut{}
+
+							case
+								js_ast.PrimitiveUndefined, // NaN
+								js_ast.PrimitiveNull,      // 0
+								js_ast.PrimitiveBoolean,   // 0 or 1
+								js_ast.PrimitiveString:    // StringToNumber
+								if number, ok := js_ast.ToNumberWithoutSideEffects(arg.Data); ok {
+									expr.Data = &js_ast.ENumber{Value: number}
+								} else {
+									expr.Data = &js_ast.EUnary{Value: arg, Op: js_ast.UnOpPos}
+								}
+								return expr, exprOut{}
+							}
+						}
+
+					case "String":
+						if len(e.Args) == 0 {
+							return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: nil}}, exprOut{}
+						} else {
+							arg := e.Args[0]
+
+							switch js_ast.KnownPrimitiveType(arg.Data) {
+							case js_ast.PrimitiveString:
+								return arg, exprOut{}
+							}
+						}
+
+					case "BigInt":
+						if len(e.Args) == 1 {
+							arg := e.Args[0]
+
+							switch js_ast.KnownPrimitiveType(arg.Data) {
+							case js_ast.PrimitiveBigInt:
+								return arg, exprOut{}
+							}
+						}
+					}
+				}
+			}
+
+			// Copy the call side effect flag over if this is a known target
+			if t.CallCanBeUnwrappedIfUnused {
+				e.CanBeUnwrappedIfUnused = true
+			}
+
+			// If this function is inlined, allow it to be tree-shaken
+			if p.options.minifySyntax && !p.isControlFlowDead {
+				p.convertSymbolUseToCall(t.Ref, len(e.Args) == 1 && !hasSpread)
+			}
+
+		case *js_ast.EDot:
+			// Recognize "require.resolve()" calls
+			if couldBeRequireResolve && t.Name == "resolve" {
+				if id, ok := t.Target.Data.(*js_ast.EIdentifier); ok && id.Ref == p.requireRef {
+					p.ignoreUsage(p.requireRef)
+					return p.maybeTransposeIfExprChain(e.Args[0], func(arg js_ast.Expr) js_ast.Expr {
+						if str, ok := e.Args[0].Data.(*js_ast.EString); ok {
+							// Ignore calls to require.resolve() if the control flow is provably
+							// dead here. We don't want to spend time scanning the required files
+							// if they will never be used.
+							if p.isControlFlowDead {
+								return js_ast.Expr{Loc: expr.Loc, Data: js_ast.ENullShared}
+							}
+
+							importRecordIndex := p.addImportRecord(ast.ImportRequireResolve, p.source.RangeOfString(e.Args[0].Loc), helpers.UTF16ToString(str.Value), nil, 0)
+							if p.fnOrArrowDataVisit.tryBodyCount != 0 {
+								record := &p.importRecords[importRecordIndex]
+								record.Flags |= ast.HandlesImportErrors
+								record.ErrorHandlerLoc = p.fnOrArrowDataVisit.tryCatchLoc
+							}
+							p.importRecordsForCurrentPart = append(p.importRecordsForCurrentPart, importRecordIndex)
+
+							// Create a new expression to represent the operation
+							return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ERequireResolveString{
+								ImportRecordIndex: importRecordIndex,
+								CloseParenLoc:     e.CloseParenLoc,
+							}}
+						}
+
+						// Otherwise just return a clone of the "require.resolve()" call
+						return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ECall{
+							Target: js_ast.Expr{Loc: e.Target.Loc, Data: &js_ast.EDot{
+								Target:  p.valueToSubstituteForRequire(t.Target.Loc),
+								Name:    t.Name,
+								NameLoc: t.NameLoc,
+							}},
+							Args:          []js_ast.Expr{arg},
+							Kind:          e.Kind,
+							CloseParenLoc: e.CloseParenLoc,
+						}}
+					}), exprOut{}
+				}
+			}
+
+			// Recognize "Object.create()" calls
+			if couldBeObjectCreate && t.Name == "create" {
+				if id, ok := t.Target.Data.(*js_ast.EIdentifier); ok {
+					if symbol := &p.symbols[id.Ref.InnerIndex]; symbol.Kind == ast.SymbolUnbound && symbol.OriginalName == "Object" {
+						switch e.Args[0].Data.(type) {
+						case *js_ast.ENull, *js_ast.EObject:
+							// Mark "Object.create(null)" and "Object.create({})" as pure
+							e.CanBeUnwrappedIfUnused = true
+						}
+					}
+				}
+			}
+
+			if p.options.minifySyntax {
+				switch t.Name {
+				case "charCodeAt":
+					// Recognize "charCodeAt()" calls
+					if str, ok := t.Target.Data.(*js_ast.EString); ok && len(e.Args) <= 1 {
+						index := 0
+						hasIndex := false
+						if len(e.Args) == 0 {
+							hasIndex = true
+						} else if num, ok := e.Args[0].Data.(*js_ast.ENumber); ok && num.Value == math.Trunc(num.Value) && math.Abs(num.Value) <= 0x7FFF_FFFF {
+							index = int(num.Value)
+							hasIndex = true
+						}
+						if hasIndex {
+							if index >= 0 && index < len(str.Value) {
+								return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: float64(str.Value[index])}}, exprOut{}
+							} else {
+								return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: math.NaN()}}, exprOut{}
+							}
+						}
+					}
+
+				case "fromCharCode":
+					// Recognize "fromCharCode()" calls
+					if id, ok := t.Target.Data.(*js_ast.EIdentifier); ok {
+						if symbol := &p.symbols[id.Ref.InnerIndex]; symbol.Kind == ast.SymbolUnbound && symbol.OriginalName == "String" {
+							charCodes := make([]uint16, 0, len(e.Args))
+							for _, arg := range e.Args {
+								arg, ok := js_ast.ToNumberWithoutSideEffects(arg.Data)
+								if !ok {
+									break
+								}
+								charCodes = append(charCodes, uint16(js_ast.ToInt32(arg)))
+							}
+							if len(charCodes) == len(e.Args) {
+								return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: charCodes}}, exprOut{}
+							}
+						}
+					}
+
+				case "toString":
+					switch target := t.Target.Data.(type) {
+					case *js_ast.ENumber:
+						radix := 0
+						if len(e.Args) == 0 {
+							radix = 10
+						} else if len(e.Args) == 1 {
+							if num, ok := e.Args[0].Data.(*js_ast.ENumber); ok && num.Value == math.Trunc(num.Value) && num.Value >= 2 && num.Value <= 36 {
+								radix = int(num.Value)
+							}
+						}
+						if radix != 0 {
+							if str, ok := js_ast.TryToStringOnNumberSafely(target.Value, radix); ok {
+								return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(str)}}, exprOut{}
+							}
+						}
+
+					case *js_ast.ERegExp:
+						if len(e.Args) == 0 {
+							return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(target.Value)}}, exprOut{}
+						}
+
+					case *js_ast.EBoolean:
+						if len(e.Args) == 0 {
+							if target.Value {
+								return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16("true")}}, exprOut{}
+							} else {
+								return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16("false")}}, exprOut{}
+							}
+						}
+
+					case *js_ast.EString:
+						if len(e.Args) == 0 {
+							return t.Target, exprOut{}
+						}
+					}
+				}
+			}
+
+			// Copy the call side effect flag over if this is a known target
+			if t.CallCanBeUnwrappedIfUnused {
+				e.CanBeUnwrappedIfUnused = true
+			}
+
+		case *js_ast.EIndex:
+			// Copy the call side effect flag over if this is a known target
+			if t.CallCanBeUnwrappedIfUnused {
+				e.CanBeUnwrappedIfUnused = true
+			}
+
+		case *js_ast.ESuper:
+			// If we're shimming "super()" calls, replace this call with "__super()"
+			if p.superCtorRef != ast.InvalidRef {
+				p.recordUsage(p.superCtorRef)
+				target.Data = &js_ast.EIdentifier{Ref: p.superCtorRef}
+				e.Target.Data = target.Data
+			}
+		}
+
+		// Handle parenthesized optional chains
+		if isParenthesizedOptionalChain && out.thisArgFunc != nil && out.thisArgWrapFunc != nil {
+			return p.lowerParenthesizedOptionalChain(expr.Loc, e, out), exprOut{}
+		}
+
+		// Lower optional chaining if we're the top of the chain
+		containsOptionalChain := e.OptionalChain == js_ast.OptionalChainStart ||
+			(e.OptionalChain == js_ast.OptionalChainContinue && out.childContainsOptionalChain)
+		if containsOptionalChain && !in.hasChainParent {
+			return p.lowerOptionalChain(expr, in, out)
+		}
+
+		// If this is a plain call expression (instead of an optional chain), lower
+		// private member access in the call target now if there is one
+		if !containsOptionalChain {
+			if target, loc, private := p.extractPrivateIndex(e.Target); private != nil {
+				// "foo.#bar(123)" => "__privateGet(_a = foo, #bar).call(_a, 123)"
+				targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(target.Loc, 2, target, valueCouldBeMutated)
+				return targetWrapFunc(js_ast.Expr{Loc: target.Loc, Data: &js_ast.ECall{
+					Target: js_ast.Expr{Loc: target.Loc, Data: &js_ast.EDot{
+						Target:  p.lowerPrivateGet(targetFunc(), loc, private),
+						Name:    "call",
+						NameLoc: target.Loc,
+					}},
+					Args:                   append([]js_ast.Expr{targetFunc()}, e.Args...),
+					CanBeUnwrappedIfUnused: e.CanBeUnwrappedIfUnused,
+					Kind:                   js_ast.TargetWasOriginallyPropertyAccess,
+				}}), exprOut{}
+			}
+			p.maybeLowerSuperPropertyGetInsideCall(e)
+		}
+
+		// Track calls to require() so we can use them while bundling
+		if p.options.mode != config.ModePassThrough && e.OptionalChain == js_ast.OptionalChainNone {
+			if id, ok := e.Target.Data.(*js_ast.EIdentifier); ok && id.Ref == p.requireRef {
+				// Heuristic: omit warnings inside try/catch blocks because presumably
+				// the try/catch statement is there to handle the potential run-time
+				// error from the unbundled require() call failing.
+				omitWarnings := p.fnOrArrowDataVisit.tryBodyCount != 0
+
+				if p.options.mode != config.ModePassThrough {
+					// There must be one argument
+					if len(e.Args) == 1 {
+						p.ignoreUsage(p.requireRef)
+						return p.maybeTransposeIfExprChain(e.Args[0], func(arg js_ast.Expr) js_ast.Expr {
+							// The argument must be a string
+							if str, ok := arg.Data.(*js_ast.EString); ok {
+								// Ignore calls to require() if the control flow is provably dead here.
+								// We don't want to spend time scanning the required files if they will
+								// never be used.
+								if p.isControlFlowDead {
+									return js_ast.Expr{Loc: expr.Loc, Data: js_ast.ENullShared}
+								}
+
+								importRecordIndex := p.addImportRecord(ast.ImportRequire, p.source.RangeOfString(arg.Loc), helpers.UTF16ToString(str.Value), nil, 0)
+								if p.fnOrArrowDataVisit.tryBodyCount != 0 {
+									record := &p.importRecords[importRecordIndex]
+									record.Flags |= ast.HandlesImportErrors
+									record.ErrorHandlerLoc = p.fnOrArrowDataVisit.tryCatchLoc
+								}
+								p.importRecordsForCurrentPart = append(p.importRecordsForCurrentPart, importRecordIndex)
+
+								// Currently "require" is not converted into "import" for ESM
+								if p.options.mode != config.ModeBundle && p.options.outputFormat == config.FormatESModule && !omitWarnings {
+									r := js_lexer.RangeOfIdentifier(p.source, e.Target.Loc)
+									p.log.AddID(logger.MsgID_JS_UnsupportedRequireCall, logger.Warning, &p.tracker, r, "Converting \"require\" to \"esm\" is currently not supported")
+								}
+
+								// Create a new expression to represent the operation
+								return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ERequireString{
+									ImportRecordIndex: importRecordIndex,
+									CloseParenLoc:     e.CloseParenLoc,
+								}}
+							}
+
+							// Handle glob patterns
+							if p.options.mode == config.ModeBundle {
+								if value := p.handleGlobPattern(arg, ast.ImportRequire, "globRequire", nil); value.Data != nil {
+									return value
+								}
+							}
+
+							// Use a debug log so people can see this if they want to
+							r := js_lexer.RangeOfIdentifier(p.source, e.Target.Loc)
+							p.log.AddID(logger.MsgID_JS_UnsupportedRequireCall, logger.Debug, &p.tracker, r,
+								"This call to \"require\" will not be bundled because the argument is not a string literal")
+
+							// Otherwise just return a clone of the "require()" call
+							return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ECall{
+								Target:        p.valueToSubstituteForRequire(e.Target.Loc),
+								Args:          []js_ast.Expr{arg},
+								CloseParenLoc: e.CloseParenLoc,
+							}}
+						}), exprOut{}
+					} else {
+						// Use a debug log so people can see this if they want to
+						r := js_lexer.RangeOfIdentifier(p.source, e.Target.Loc)
+						p.log.AddIDWithNotes(logger.MsgID_JS_UnsupportedRequireCall, logger.Debug, &p.tracker, r,
+							fmt.Sprintf("This call to \"require\" will not be bundled because it has %d arguments", len(e.Args)),
+							[]logger.MsgData{{Text: "To be bundled by esbuild, a \"require\" call must have exactly 1 argument."}})
+					}
+
+					return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ECall{
+						Target:        p.valueToSubstituteForRequire(e.Target.Loc),
+						Args:          e.Args,
+						CloseParenLoc: e.CloseParenLoc,
+					}}, exprOut{}
+				}
+			}
+		}
+
+		out = exprOut{
+			childContainsOptionalChain: containsOptionalChain,
+			thisArgFunc:                out.thisArgFunc,
+			thisArgWrapFunc:            out.thisArgWrapFunc,
+		}
+		if !in.hasChainParent {
+			out.thisArgFunc = nil
+			out.thisArgWrapFunc = nil
+		}
+		return expr, out
+
+	case *js_ast.ENew:
+		hasSpread := false
+
+		e.Target = p.visitExpr(e.Target)
+		p.warnAboutImportNamespaceCall(e.Target, exprKindNew)
+
+		for i, arg := range e.Args {
+			arg = p.visitExpr(arg)
+			if _, ok := arg.Data.(*js_ast.ESpread); ok {
+				hasSpread = true
+			}
+			e.Args[i] = arg
+		}
+
+		// "new foo(1, ...[2, 3], 4)" => "new foo(1, 2, 3, 4)"
+		if p.options.minifySyntax && hasSpread {
+			e.Args = js_ast.InlineSpreadsOfArrayLiterals(e.Args)
+		}
+
+		p.maybeMarkKnownGlobalConstructorAsPure(e)
+
+	case *js_ast.EArrow:
+		// Check for a propagated name to keep from the parent context
+		var nameToKeep string
+		if p.nameToKeepIsFor == e {
+			nameToKeep = p.nameToKeep
+		}
+
+		// Prepare for suspicious logical operator checking
+		if e.PreferExpr && len(e.Args) == 1 && e.Args[0].DefaultOrNil.Data == nil && len(e.Body.Block.Stmts) == 1 {
+			if _, ok := e.Args[0].Binding.Data.(*js_ast.BIdentifier); ok {
+				if stmt, ok := e.Body.Block.Stmts[0].Data.(*js_ast.SReturn); ok {
+					if binary, ok := stmt.ValueOrNil.Data.(*js_ast.EBinary); ok && (binary.Op == js_ast.BinOpLogicalAnd || binary.Op == js_ast.BinOpLogicalOr) {
+						p.suspiciousLogicalOperatorInsideArrow = binary
+					}
+				}
+			}
+		}
+
+		asyncArrowNeedsToBeLowered := e.IsAsync && p.options.unsupportedJSFeatures.Has(compat.AsyncAwait)
+		oldFnOrArrowData := p.fnOrArrowDataVisit
+		p.fnOrArrowDataVisit = fnOrArrowDataVisit{
+			isArrow:                        true,
+			isAsync:                        e.IsAsync,
+			shouldLowerSuperPropertyAccess: oldFnOrArrowData.shouldLowerSuperPropertyAccess || asyncArrowNeedsToBeLowered,
+		}
+
+		// Mark if we're inside an async arrow function. This value should be true
+		// even if we're inside multiple arrow functions and the closest inclosing
+		// arrow function isn't async, as long as at least one enclosing arrow
+		// function within the current enclosing function is async.
+		oldInsideAsyncArrowFn := p.fnOnlyDataVisit.isInsideAsyncArrowFn
+		if e.IsAsync {
+			p.fnOnlyDataVisit.isInsideAsyncArrowFn = true
+		}
+
+		p.pushScopeForVisitPass(js_ast.ScopeFunctionArgs, expr.Loc)
+		p.visitArgs(e.Args, visitArgsOpts{
+			hasRestArg:               e.HasRestArg,
+			body:                     e.Body.Block.Stmts,
+			isUniqueFormalParameters: true,
+		})
+		p.pushScopeForVisitPass(js_ast.ScopeFunctionBody, e.Body.Loc)
+		e.Body.Block.Stmts = p.visitStmtsAndPrependTempRefs(e.Body.Block.Stmts, prependTempRefsOpts{kind: stmtsFnBody})
+		p.popScope()
+		p.lowerFunction(&e.IsAsync, nil, &e.Args, e.Body.Loc, &e.Body.Block, &e.PreferExpr, &e.HasRestArg, true /* isArrow */)
+		p.popScope()
+
+		if p.options.minifySyntax && len(e.Body.Block.Stmts) == 1 {
+			if s, ok := e.Body.Block.Stmts[0].Data.(*js_ast.SReturn); ok {
+				if s.ValueOrNil.Data == nil {
+					// "() => { return }" => "() => {}"
+					e.Body.Block.Stmts = []js_ast.Stmt{}
+				} else {
+					// "() => { return x }" => "() => x"
+					e.PreferExpr = true
+				}
+			}
+		}
+
+		p.fnOnlyDataVisit.isInsideAsyncArrowFn = oldInsideAsyncArrowFn
+		p.fnOrArrowDataVisit = oldFnOrArrowData
+
+		// Convert arrow functions to function expressions when lowering
+		if p.options.unsupportedJSFeatures.Has(compat.Arrow) {
+			expr.Data = &js_ast.EFunction{Fn: js_ast.Fn{
+				Args:         e.Args,
+				Body:         e.Body,
+				ArgumentsRef: ast.InvalidRef,
+				IsAsync:      e.IsAsync,
+				HasRestArg:   e.HasRestArg,
+			}}
+		}
+
+		// Optionally preserve the name
+		if p.options.keepNames && nameToKeep != "" {
+			expr = p.keepExprSymbolName(expr, nameToKeep)
+		}
+
+	case *js_ast.EFunction:
+		// Check for a propagated name to keep from the parent context
+		var nameToKeep string
+		if p.nameToKeepIsFor == e {
+			nameToKeep = p.nameToKeep
+		}
+
+		p.visitFn(&e.Fn, expr.Loc, visitFnOpts{
+			isMethod:               in.isMethod,
+			isDerivedClassCtor:     e == p.propDerivedCtorValue,
+			isLoweredPrivateMethod: in.isLoweredPrivateMethod,
+		})
+		name := e.Fn.Name
+
+		// Remove unused function names when minifying
+		if p.options.minifySyntax && !p.currentScope.ContainsDirectEval &&
+			name != nil && p.symbols[name.Ref.InnerIndex].UseCountEstimate == 0 {
+			e.Fn.Name = nil
+		}
+
+		// Optionally preserve the name for functions, but not for methods
+		if p.options.keepNames && (!in.isMethod || in.isLoweredPrivateMethod) {
+			if name != nil {
+				expr = p.keepExprSymbolName(expr, p.symbols[name.Ref.InnerIndex].OriginalName)
+			} else if nameToKeep != "" {
+				expr = p.keepExprSymbolName(expr, nameToKeep)
+			}
+		}
+
+	case *js_ast.EClass:
+		// Check for a propagated name to keep from the parent context
+		var nameToKeep string
+		if p.nameToKeepIsFor == e {
+			nameToKeep = p.nameToKeep
+		}
+
+		result := p.visitClass(expr.Loc, &e.Class, ast.InvalidRef, nameToKeep)
+
+		// Lower class field syntax for browsers that don't support it
+		_, expr = p.lowerClass(js_ast.Stmt{}, expr, result, nameToKeep)
+
+		// We may be able to determine that a class is side-effect before lowering
+		// but not after lowering (e.g. due to "--keep-names" mutating the object).
+		// If that's the case, add a special annotation so this doesn't prevent
+		// tree-shaking from happening.
+		if result.canBeRemovedIfUnused {
+			expr.Data = &js_ast.EAnnotation{
+				Value: expr,
+				Flags: js_ast.CanBeRemovedIfUnusedFlag,
+			}
+		}
+
+	default:
+		// Note: EPrivateIdentifier should have already been handled
+		panic(fmt.Sprintf("Unexpected expression of type %T", expr.Data))
+	}
+
+	return expr, exprOut{}
+}
+
+// This exists to handle very deeply-nested ASTs. For example, the "grapheme-splitter"
+// package contains this monstrosity:
+//
+//	if (
+//	  (0x0300 <= code && code <= 0x036F) ||
+//	  (0x0483 <= code && code <= 0x0487) ||
+//	  (0x0488 <= code && code <= 0x0489) ||
+//	  (0x0591 <= code && code <= 0x05BD) ||
+//	  ... many hundreds of lines later ...
+//	) {
+//	  return;
+//	}
+//
+// If "checkAndPrepare" returns non-nil, then the return value is the final
+// expression. Otherwise, the final expression can be obtained by manually
+// visiting the left child and then calling "visitRightAndFinish":
+//
+//	if result := v.checkAndPrepare(p); result.Data != nil {
+//	  return result
+//	}
+//	v.e.Left, _ = p.visitExprInOut(v.e.Left, v.leftIn)
+//	return v.visitRightAndFinish(p)
+//
+// This code is convoluted this way so that we can use our own stack on the
+// heap instead of the call stack when there are additional levels of nesting.
+// Before this transformation, the code previously looked something like this:
+//
+//	... The code in "checkAndPrepare" ...
+//	e.Left, _ = p.visitExprInOut(e.Left, in)
+//	... The code in "visitRightAndFinish" ...
+//
+// If this code is still confusing, it may be helpful to look back in git
+// history at the commit that introduced this transformation.
+//
+// Go normally has growable call stacks so this code transformation normally
+// doesn't do anything, but WebAssembly doesn't allow stack pointer manipulation
+// so Go's WebAssembly implementation doesn't support growable call stacks and
+// is therefore vulnerable to stack overflow. So this code transformation is
+// only really relevant for esbuild's WebAssembly-based API.
+type binaryExprVisitor struct {
+	// Inputs
+	e   *js_ast.EBinary
+	loc logger.Loc
+	in  exprIn
+
+	// Input for visiting the left child
+	leftIn exprIn
+
+	// "Local variables" passed from "checkAndPrepare" to "visitRightAndFinish"
+	isStmtExpr                               bool
+	oldSilenceWarningAboutThisBeingUndefined bool
+}
+
+func (v *binaryExprVisitor) checkAndPrepare(p *parser) js_ast.Expr {
+	e := v.e
+
+	// Special-case EPrivateIdentifier to allow it here
+	if private, ok := e.Left.Data.(*js_ast.EPrivateIdentifier); ok && e.Op == js_ast.BinOpIn {
+		name := p.loadNameFromRef(private.Ref)
+		result := p.findSymbol(e.Left.Loc, name)
+		private.Ref = result.ref
+
+		// Unlike regular identifiers, there are no unbound private identifiers
+		symbol := &p.symbols[result.ref.InnerIndex]
+		if !symbol.Kind.IsPrivate() {
+			r := logger.Range{Loc: e.Left.Loc, Len: int32(len(name))}
+			p.log.AddError(&p.tracker, r, fmt.Sprintf("Private name %q must be declared in an enclosing class", name))
+		}
+
+		e.Right = p.visitExpr(e.Right)
+
+		if p.privateSymbolNeedsToBeLowered(private) {
+			return p.lowerPrivateBrandCheck(e.Right, v.loc, private)
+		}
+		return js_ast.Expr{Loc: v.loc, Data: e}
+	}
+
+	v.isStmtExpr = e == p.stmtExprValue
+	v.oldSilenceWarningAboutThisBeingUndefined = p.fnOnlyDataVisit.silenceMessageAboutThisBeingUndefined
+
+	if _, ok := e.Left.Data.(*js_ast.EThis); ok && e.Op == js_ast.BinOpLogicalAnd {
+		p.fnOnlyDataVisit.silenceMessageAboutThisBeingUndefined = true
+	}
+	v.leftIn = exprIn{
+		assignTarget:               e.Op.BinaryAssignTarget(),
+		shouldMangleStringsAsProps: e.Op == js_ast.BinOpIn,
+	}
+	return js_ast.Expr{}
+}
+
+func (v *binaryExprVisitor) visitRightAndFinish(p *parser) js_ast.Expr {
+	e := v.e
+
+	// Mark the control flow as dead if the branch is never taken
+	switch e.Op {
+	case js_ast.BinOpLogicalOr:
+		if boolean, _, ok := js_ast.ToBooleanWithSideEffects(e.Left.Data); ok && boolean {
+			// "true || dead"
+			old := p.isControlFlowDead
+			p.isControlFlowDead = true
+			e.Right = p.visitExpr(e.Right)
+			p.isControlFlowDead = old
+		} else {
+			e.Right = p.visitExpr(e.Right)
+		}
+
+	case js_ast.BinOpLogicalAnd:
+		if boolean, _, ok := js_ast.ToBooleanWithSideEffects(e.Left.Data); ok && !boolean {
+			// "false && dead"
+			old := p.isControlFlowDead
+			p.isControlFlowDead = true
+			e.Right = p.visitExpr(e.Right)
+			p.isControlFlowDead = old
+		} else {
+			e.Right = p.visitExpr(e.Right)
+		}
+
+	case js_ast.BinOpNullishCoalescing:
+		if isNullOrUndefined, _, ok := js_ast.ToNullOrUndefinedWithSideEffects(e.Left.Data); ok && !isNullOrUndefined {
+			// "notNullOrUndefined ?? dead"
+			old := p.isControlFlowDead
+			p.isControlFlowDead = true
+			e.Right = p.visitExpr(e.Right)
+			p.isControlFlowDead = old
+		} else {
+			e.Right = p.visitExpr(e.Right)
+		}
+
+	case js_ast.BinOpComma:
+		e.Right, _ = p.visitExprInOut(e.Right, exprIn{
+			shouldMangleStringsAsProps: v.in.shouldMangleStringsAsProps,
+		})
+
+	case js_ast.BinOpAssign, js_ast.BinOpLogicalOrAssign, js_ast.BinOpLogicalAndAssign, js_ast.BinOpNullishCoalescingAssign:
+		// Check for a propagated name to keep from the parent context
+		if id, ok := e.Left.Data.(*js_ast.EIdentifier); ok {
+			p.nameToKeep = p.symbols[id.Ref.InnerIndex].OriginalName
+			p.nameToKeepIsFor = e.Right.Data
+		}
+
+		e.Right = p.visitExpr(e.Right)
+
+	default:
+		e.Right = p.visitExpr(e.Right)
+	}
+	p.fnOnlyDataVisit.silenceMessageAboutThisBeingUndefined = v.oldSilenceWarningAboutThisBeingUndefined
+
+	// Always put constants consistently on the same side for equality
+	// comparisons to help improve compression. In theory, dictionary-based
+	// compression methods may already have a dictionary entry for code that
+	// is similar to previous code. Note that we can only reorder expressions
+	// that do not have any side effects.
+	//
+	// Constants are currently ordered on the right instead of the left because
+	// it results in slightly smalller gzip size on our primary benchmark
+	// (although slightly larger uncompressed size). The size difference is
+	// less than 0.1% so it really isn't that important an optimization.
+	if p.options.minifySyntax {
+		switch e.Op {
+		case js_ast.BinOpLooseEq, js_ast.BinOpLooseNe, js_ast.BinOpStrictEq, js_ast.BinOpStrictNe:
+			// "1 === x" => "x === 1"
+			if js_ast.IsPrimitiveLiteral(e.Left.Data) && !js_ast.IsPrimitiveLiteral(e.Right.Data) {
+				e.Left, e.Right = e.Right, e.Left
+			}
+		}
+	}
+
+	if p.shouldFoldTypeScriptConstantExpressions || (p.options.minifySyntax && js_ast.ShouldFoldBinaryOperatorWhenMinifying(e)) {
+		if result := js_ast.FoldBinaryOperator(v.loc, e); result.Data != nil {
+			return result
+		}
+	}
+
+	// Post-process the binary expression
+	switch e.Op {
+	case js_ast.BinOpComma:
+		// "(1, 2)" => "2"
+		// "(sideEffects(), 2)" => "(sideEffects(), 2)"
+		if p.options.minifySyntax {
+			e.Left = p.astHelpers.SimplifyUnusedExpr(e.Left, p.options.unsupportedJSFeatures)
+			if e.Left.Data == nil {
+				return e.Right
+			}
+		}
+
+	case js_ast.BinOpLooseEq:
+		if result, ok := js_ast.CheckEqualityIfNoSideEffects(e.Left.Data, e.Right.Data, js_ast.LooseEquality); ok {
+			return js_ast.Expr{Loc: v.loc, Data: &js_ast.EBoolean{Value: result}}
+		}
+		afterOpLoc := locAfterOp(e)
+		if !p.warnAboutEqualityCheck("==", e.Left, afterOpLoc) {
+			p.warnAboutEqualityCheck("==", e.Right, afterOpLoc)
+		}
+		p.warnAboutTypeofAndString(e.Left, e.Right, checkBothOrders)
+
+		if p.options.minifySyntax {
+			// "x == void 0" => "x == null"
+			if _, ok := e.Left.Data.(*js_ast.EUndefined); ok {
+				e.Left.Data = js_ast.ENullShared
+			} else if _, ok := e.Right.Data.(*js_ast.EUndefined); ok {
+				e.Right.Data = js_ast.ENullShared
+			}
+
+			if result, ok := js_ast.MaybeSimplifyEqualityComparison(v.loc, e, p.options.unsupportedJSFeatures); ok {
+				return result
+			}
+		}
+
+	case js_ast.BinOpStrictEq:
+		if result, ok := js_ast.CheckEqualityIfNoSideEffects(e.Left.Data, e.Right.Data, js_ast.StrictEquality); ok {
+			return js_ast.Expr{Loc: v.loc, Data: &js_ast.EBoolean{Value: result}}
+		}
+		afterOpLoc := locAfterOp(e)
+		if !p.warnAboutEqualityCheck("===", e.Left, afterOpLoc) {
+			p.warnAboutEqualityCheck("===", e.Right, afterOpLoc)
+		}
+		p.warnAboutTypeofAndString(e.Left, e.Right, checkBothOrders)
+
+		if p.options.minifySyntax {
+			// "typeof x === 'undefined'" => "typeof x == 'undefined'"
+			if js_ast.CanChangeStrictToLoose(e.Left, e.Right) {
+				e.Op = js_ast.BinOpLooseEq
+			}
+
+			if result, ok := js_ast.MaybeSimplifyEqualityComparison(v.loc, e, p.options.unsupportedJSFeatures); ok {
+				return result
+			}
+		}
+
+	case js_ast.BinOpLooseNe:
+		if result, ok := js_ast.CheckEqualityIfNoSideEffects(e.Left.Data, e.Right.Data, js_ast.LooseEquality); ok {
+			return js_ast.Expr{Loc: v.loc, Data: &js_ast.EBoolean{Value: !result}}
+		}
+		afterOpLoc := locAfterOp(e)
+		if !p.warnAboutEqualityCheck("!=", e.Left, afterOpLoc) {
+			p.warnAboutEqualityCheck("!=", e.Right, afterOpLoc)
+		}
+		p.warnAboutTypeofAndString(e.Left, e.Right, checkBothOrders)
+
+		if p.options.minifySyntax {
+			// "x != void 0" => "x != null"
+			if _, ok := e.Left.Data.(*js_ast.EUndefined); ok {
+				e.Left.Data = js_ast.ENullShared
+			} else if _, ok := e.Right.Data.(*js_ast.EUndefined); ok {
+				e.Right.Data = js_ast.ENullShared
+			}
+
+			if result, ok := js_ast.MaybeSimplifyEqualityComparison(v.loc, e, p.options.unsupportedJSFeatures); ok {
+				return result
+			}
+		}
+
+	case js_ast.BinOpStrictNe:
+		if result, ok := js_ast.CheckEqualityIfNoSideEffects(e.Left.Data, e.Right.Data, js_ast.StrictEquality); ok {
+			return js_ast.Expr{Loc: v.loc, Data: &js_ast.EBoolean{Value: !result}}
+		}
+		afterOpLoc := locAfterOp(e)
+		if !p.warnAboutEqualityCheck("!==", e.Left, afterOpLoc) {
+			p.warnAboutEqualityCheck("!==", e.Right, afterOpLoc)
+		}
+		p.warnAboutTypeofAndString(e.Left, e.Right, checkBothOrders)
+
+		if p.options.minifySyntax {
+			// "typeof x !== 'undefined'" => "typeof x != 'undefined'"
+			if js_ast.CanChangeStrictToLoose(e.Left, e.Right) {
+				e.Op = js_ast.BinOpLooseNe
+			}
+
+			if result, ok := js_ast.MaybeSimplifyEqualityComparison(v.loc, e, p.options.unsupportedJSFeatures); ok {
+				return result
+			}
+		}
+
+	case js_ast.BinOpNullishCoalescing:
+		if isNullOrUndefined, sideEffects, ok := js_ast.ToNullOrUndefinedWithSideEffects(e.Left.Data); ok {
+			// Warn about potential bugs
+			if !js_ast.IsPrimitiveLiteral(e.Left.Data) {
+				// "return props.flag === flag ?? true" is "return (props.flag === flag) ?? true" not "return props.flag === (flag ?? true)"
+				var which string
+				var leftIsNullOrUndefined string
+				var leftIsReturned string
+				if !isNullOrUndefined {
+					which = "left"
+					leftIsNullOrUndefined = "never"
+					leftIsReturned = "always"
+				} else {
+					which = "right"
+					leftIsNullOrUndefined = "always"
+					leftIsReturned = "never"
+				}
+				kind := logger.Warning
+				if p.suppressWarningsAboutWeirdCode {
+					kind = logger.Debug
+				}
+				rOp := p.source.RangeOfOperatorBefore(e.Right.Loc, "??")
+				rLeft := logger.Range{Loc: e.Left.Loc, Len: p.source.LocBeforeWhitespace(rOp.Loc).Start - e.Left.Loc.Start}
+				p.log.AddIDWithNotes(logger.MsgID_JS_SuspiciousNullishCoalescing, kind, &p.tracker, rOp,
+					fmt.Sprintf("The \"??\" operator here will always return the %s operand", which), []logger.MsgData{
+						p.tracker.MsgData(rLeft, fmt.Sprintf(
+							"The left operand of the \"??\" operator here will %s be null or undefined, so it will %s be returned. This usually indicates a bug in your code:",
+							leftIsNullOrUndefined, leftIsReturned))})
+			}
+
+			if !isNullOrUndefined {
+				return e.Left
+			} else if sideEffects == js_ast.NoSideEffects {
+				return e.Right
+			}
+		}
+
+		if p.options.minifySyntax {
+			// "a ?? (b ?? c)" => "a ?? b ?? c"
+			if right, ok := e.Right.Data.(*js_ast.EBinary); ok && right.Op == js_ast.BinOpNullishCoalescing {
+				e.Left = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpNullishCoalescing, e.Left, right.Left)
+				e.Right = right.Right
+			}
+		}
+
+		if p.options.unsupportedJSFeatures.Has(compat.NullishCoalescing) {
+			return p.lowerNullishCoalescing(v.loc, e.Left, e.Right)
+		}
+
+	case js_ast.BinOpLogicalOr:
+		if boolean, sideEffects, ok := js_ast.ToBooleanWithSideEffects(e.Left.Data); ok {
+			// Warn about potential bugs
+			if e == p.suspiciousLogicalOperatorInsideArrow {
+				if arrowLoc := p.source.RangeOfOperatorBefore(v.loc, "=>"); arrowLoc.Loc.Start+2 == p.source.LocBeforeWhitespace(v.loc).Start {
+					// "return foo => 1 || foo <= 0"
+					var which string
+					if boolean {
+						which = "left"
+					} else {
+						which = "right"
+					}
+					kind := logger.Warning
+					if p.suppressWarningsAboutWeirdCode {
+						kind = logger.Debug
+					}
+					note := p.tracker.MsgData(arrowLoc,
+						"The \"=>\" symbol creates an arrow function expression in JavaScript. Did you mean to use the greater-than-or-equal-to operator \">=\" here instead?")
+					note.Location.Suggestion = ">="
+					rOp := p.source.RangeOfOperatorBefore(e.Right.Loc, "||")
+					p.log.AddIDWithNotes(logger.MsgID_JS_SuspiciousLogicalOperator, kind, &p.tracker, rOp,
+						fmt.Sprintf("The \"||\" operator here will always return the %s operand", which), []logger.MsgData{note})
+				}
+			}
+
+			if boolean {
+				return e.Left
+			} else if sideEffects == js_ast.NoSideEffects {
+				return e.Right
+			}
+		}
+
+		if p.options.minifySyntax {
+			// "a || (b || c)" => "a || b || c"
+			if right, ok := e.Right.Data.(*js_ast.EBinary); ok && right.Op == js_ast.BinOpLogicalOr {
+				e.Left = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpLogicalOr, e.Left, right.Left)
+				e.Right = right.Right
+			}
+
+			// "a === null || a === undefined" => "a == null"
+			if left, right, ok := js_ast.IsBinaryNullAndUndefined(e.Left, e.Right, js_ast.BinOpStrictEq); ok {
+				e.Op = js_ast.BinOpLooseEq
+				e.Left = left
+				e.Right = right
+			}
+		}
+
+	case js_ast.BinOpLogicalAnd:
+		if boolean, sideEffects, ok := js_ast.ToBooleanWithSideEffects(e.Left.Data); ok {
+			// Warn about potential bugs
+			if e == p.suspiciousLogicalOperatorInsideArrow {
+				if arrowLoc := p.source.RangeOfOperatorBefore(v.loc, "=>"); arrowLoc.Loc.Start+2 == p.source.LocBeforeWhitespace(v.loc).Start {
+					// "return foo => 0 && foo <= 1"
+					var which string
+					if !boolean {
+						which = "left"
+					} else {
+						which = "right"
+					}
+					kind := logger.Warning
+					if p.suppressWarningsAboutWeirdCode {
+						kind = logger.Debug
+					}
+					note := p.tracker.MsgData(arrowLoc,
+						"The \"=>\" symbol creates an arrow function expression in JavaScript. Did you mean to use the greater-than-or-equal-to operator \">=\" here instead?")
+					note.Location.Suggestion = ">="
+					rOp := p.source.RangeOfOperatorBefore(e.Right.Loc, "&&")
+					p.log.AddIDWithNotes(logger.MsgID_JS_SuspiciousLogicalOperator, kind, &p.tracker, rOp,
+						fmt.Sprintf("The \"&&\" operator here will always return the %s operand", which), []logger.MsgData{note})
+				}
+			}
+
+			if !boolean {
+				return e.Left
+			} else if sideEffects == js_ast.NoSideEffects {
+				return e.Right
+			}
+		}
+
+		if p.options.minifySyntax {
+			// "a && (b && c)" => "a && b && c"
+			if right, ok := e.Right.Data.(*js_ast.EBinary); ok && right.Op == js_ast.BinOpLogicalAnd {
+				e.Left = js_ast.JoinWithLeftAssociativeOp(js_ast.BinOpLogicalAnd, e.Left, right.Left)
+				e.Right = right.Right
+			}
+
+			// "a !== null && a !== undefined" => "a != null"
+			if left, right, ok := js_ast.IsBinaryNullAndUndefined(e.Left, e.Right, js_ast.BinOpStrictNe); ok {
+				e.Op = js_ast.BinOpLooseNe
+				e.Left = left
+				e.Right = right
+			}
+		}
+
+	case js_ast.BinOpAdd:
+		// "'abc' + 'xyz'" => "'abcxyz'"
+		if result := js_ast.FoldStringAddition(e.Left, e.Right, js_ast.StringAdditionNormal); result.Data != nil {
+			return result
+		}
+
+		if left, ok := e.Left.Data.(*js_ast.EBinary); ok && left.Op == js_ast.BinOpAdd {
+			// "x + 'abc' + 'xyz'" => "x + 'abcxyz'"
+			if result := js_ast.FoldStringAddition(left.Right, e.Right, js_ast.StringAdditionWithNestedLeft); result.Data != nil {
+				return js_ast.Expr{Loc: v.loc, Data: &js_ast.EBinary{Op: left.Op, Left: left.Left, Right: result}}
+			}
+		}
+
+	case js_ast.BinOpPow:
+		// Lower the exponentiation operator for browsers that don't support it
+		if p.options.unsupportedJSFeatures.Has(compat.ExponentOperator) {
+			return p.callRuntime(v.loc, "__pow", []js_ast.Expr{e.Left, e.Right})
+		}
+
+		////////////////////////////////////////////////////////////////////////////////
+		// All assignment operators below here
+
+	case js_ast.BinOpAssign:
+		if target, loc, private := p.extractPrivateIndex(e.Left); private != nil {
+			return p.lowerPrivateSet(target, loc, private, e.Right)
+		}
+
+		if property := p.extractSuperProperty(e.Left); property.Data != nil {
+			return p.lowerSuperPropertySet(e.Left.Loc, property, e.Right)
+		}
+
+		// Lower assignment destructuring patterns for browsers that don't
+		// support them. Note that assignment expressions are used to represent
+		// initializers in binding patterns, so only do this if we're not
+		// ourselves the target of an assignment. Example: "[a = b] = c"
+		if v.in.assignTarget == js_ast.AssignTargetNone {
+			mode := objRestMustReturnInitExpr
+			if v.isStmtExpr {
+				mode = objRestReturnValueIsUnused
+			}
+			if result, ok := p.lowerAssign(e.Left, e.Right, mode); ok {
+				return result
+			}
+
+			// If CommonJS-style exports are disabled, then references to them are
+			// treated as global variable references. This is consistent with how
+			// they work in node and the browser, so it's the correct interpretation.
+			//
+			// However, people sometimes try to use both types of exports within the
+			// same module and expect it to work. We warn about this when module
+			// format conversion is enabled.
+			//
+			// Only warn about this for uses in assignment position since there are
+			// some legitimate other uses. For example, some people do "typeof module"
+			// to check for a CommonJS environment, and we shouldn't warn on that.
+			if p.options.mode != config.ModePassThrough && p.isFileConsideredToHaveESMExports && !p.isControlFlowDead {
+				if dot, ok := e.Left.Data.(*js_ast.EDot); ok {
+					var name string
+					var loc logger.Loc
+
+					switch target := dot.Target.Data.(type) {
+					case *js_ast.EIdentifier:
+						if symbol := &p.symbols[target.Ref.InnerIndex]; symbol.Kind == ast.SymbolUnbound &&
+							((symbol.OriginalName == "module" && dot.Name == "exports") || symbol.OriginalName == "exports") &&
+							!symbol.Flags.Has(ast.DidWarnAboutCommonJSInESM) {
+							// "module.exports = ..."
+							// "exports.something = ..."
+							name = symbol.OriginalName
+							loc = dot.Target.Loc
+							symbol.Flags |= ast.DidWarnAboutCommonJSInESM
+						}
+
+					case *js_ast.EDot:
+						if target.Name == "exports" {
+							if id, ok := target.Target.Data.(*js_ast.EIdentifier); ok {
+								if symbol := &p.symbols[id.Ref.InnerIndex]; symbol.Kind == ast.SymbolUnbound &&
+									symbol.OriginalName == "module" && !symbol.Flags.Has(ast.DidWarnAboutCommonJSInESM) {
+									// "module.exports.foo = ..."
+									name = symbol.OriginalName
+									loc = target.Target.Loc
+									symbol.Flags |= ast.DidWarnAboutCommonJSInESM
+								}
+							}
+						}
+					}
+
+					if name != "" {
+						kind := logger.Warning
+						if p.suppressWarningsAboutWeirdCode {
+							kind = logger.Debug
+						}
+						why, notes := p.whyESModule()
+						if why == whyESMTypeModulePackageJSON {
+							text := "Node's package format requires that CommonJS files in a \"type\": \"module\" package use the \".cjs\" file extension."
+							if p.options.ts.Parse {
+								text += " If you are using TypeScript, you can use the \".cts\" file extension with esbuild instead."
+							}
+							notes = append(notes, logger.MsgData{Text: text})
+						}
+						p.log.AddIDWithNotes(logger.MsgID_JS_CommonJSVariableInESM, kind, &p.tracker, js_lexer.RangeOfIdentifier(p.source, loc),
+							fmt.Sprintf("The CommonJS %q variable is treated as a global variable in an ECMAScript module and may not work as expected", name),
+							notes)
+					}
+				}
+			}
+		}
+
+	case js_ast.BinOpAddAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpAdd, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpSubAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpSub, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpMulAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpMul, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpDivAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpDiv, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpRemAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpRem, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpPowAssign:
+		// Lower the exponentiation operator for browsers that don't support it
+		if p.options.unsupportedJSFeatures.Has(compat.ExponentOperator) {
+			return p.lowerExponentiationAssignmentOperator(v.loc, e)
+		}
+
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpPow, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpShlAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpShl, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpShrAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpShr, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpUShrAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpUShr, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpBitwiseOrAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpBitwiseOr, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpBitwiseAndAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpBitwiseAnd, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpBitwiseXorAssign:
+		if result := p.maybeLowerSetBinOp(e.Left, js_ast.BinOpBitwiseXor, e.Right); result.Data != nil {
+			return result
+		}
+
+	case js_ast.BinOpNullishCoalescingAssign:
+		if value, ok := p.lowerNullishCoalescingAssignmentOperator(v.loc, e); ok {
+			return value
+		}
+
+	case js_ast.BinOpLogicalAndAssign:
+		if value, ok := p.lowerLogicalAssignmentOperator(v.loc, e, js_ast.BinOpLogicalAnd); ok {
+			return value
+		}
+
+	case js_ast.BinOpLogicalOrAssign:
+		if value, ok := p.lowerLogicalAssignmentOperator(v.loc, e, js_ast.BinOpLogicalOr); ok {
+			return value
+		}
+	}
+
+	// "(a, b) + c" => "a, b + c"
+	if p.options.minifySyntax && e.Op != js_ast.BinOpComma {
+		if comma, ok := e.Left.Data.(*js_ast.EBinary); ok && comma.Op == js_ast.BinOpComma {
+			return js_ast.JoinWithComma(comma.Left, js_ast.Expr{
+				Loc: comma.Right.Loc,
+				Data: &js_ast.EBinary{
+					Op:    e.Op,
+					Left:  comma.Right,
+					Right: e.Right,
+				},
+			})
+		}
+	}
+
+	return js_ast.Expr{Loc: v.loc, Data: e}
+}
+
+func remapExprLocsInJSON(expr *js_ast.Expr, table []logger.StringInJSTableEntry) {
+	expr.Loc = logger.RemapStringInJSLoc(table, expr.Loc)
+
+	switch e := expr.Data.(type) {
+	case *js_ast.EArray:
+		e.CloseBracketLoc = logger.RemapStringInJSLoc(table, e.CloseBracketLoc)
+		for i := range e.Items {
+			remapExprLocsInJSON(&e.Items[i], table)
+		}
+
+	case *js_ast.EObject:
+		e.CloseBraceLoc = logger.RemapStringInJSLoc(table, e.CloseBraceLoc)
+		for i := range e.Properties {
+			remapExprLocsInJSON(&e.Properties[i].Key, table)
+			remapExprLocsInJSON(&e.Properties[i].ValueOrNil, table)
+		}
+	}
+}
+
+func (p *parser) handleGlobPattern(expr js_ast.Expr, kind ast.ImportKind, prefix string, assertOrWith *ast.ImportAssertOrWith) js_ast.Expr {
+	pattern, approximateRange := p.globPatternFromExpr(expr)
+	if pattern == nil {
+		return js_ast.Expr{}
+	}
+
+	var last helpers.GlobPart
+	var parts []helpers.GlobPart
+
+	for _, part := range pattern {
+		if part.isWildcard {
+			if last.Wildcard == helpers.GlobNone {
+				if !strings.HasSuffix(last.Prefix, "/") {
+					// "`a${b}c`" => "a*c"
+					last.Wildcard = helpers.GlobAllExceptSlash
+				} else {
+					// "`a/${b}c`" => "a/**/*c"
+					last.Wildcard = helpers.GlobAllIncludingSlash
+					parts = append(parts, last)
+					last = helpers.GlobPart{Prefix: "/", Wildcard: helpers.GlobAllExceptSlash}
+				}
+			}
+		} else if part.text != "" {
+			if last.Wildcard != helpers.GlobNone {
+				parts = append(parts, last)
+				last = helpers.GlobPart{}
+			}
+			last.Prefix += part.text
+		}
+	}
+
+	parts = append(parts, last)
+
+	// Don't handle this if it's a string constant
+	if len(parts) == 1 && parts[0].Wildcard == helpers.GlobNone {
+		return js_ast.Expr{}
+	}
+
+	// We currently only support relative globs
+	if prefix := parts[0].Prefix; !strings.HasPrefix(prefix, "./") && !strings.HasPrefix(prefix, "../") {
+		return js_ast.Expr{}
+	}
+
+	ref := ast.InvalidRef
+
+	// Don't generate duplicate glob imports
+outer:
+	for _, globPattern := range p.globPatternImports {
+		// Check the kind
+		if globPattern.kind != kind {
+			continue
+		}
+
+		// Check the parts
+		if len(globPattern.parts) != len(parts) {
+			continue
+		}
+		for i := range parts {
+			if globPattern.parts[i] != parts[i] {
+				continue outer
+			}
+		}
+
+		// Check the import assertions/attributes
+		if assertOrWith == nil {
+			if globPattern.assertOrWith != nil {
+				continue
+			}
+		} else {
+			if globPattern.assertOrWith == nil {
+				continue
+			}
+			if assertOrWith.Keyword != globPattern.assertOrWith.Keyword {
+				continue
+			}
+			a := assertOrWith.Entries
+			b := globPattern.assertOrWith.Entries
+			if len(a) != len(b) {
+				continue
+			}
+			for i := range a {
+				ai := a[i]
+				bi := b[i]
+				if !helpers.UTF16EqualsUTF16(ai.Key, bi.Key) || !helpers.UTF16EqualsUTF16(ai.Value, bi.Value) {
+					continue outer
+				}
+			}
+		}
+
+		// If we get here, then these are the same glob pattern
+		ref = globPattern.ref
+		break
+	}
+
+	// If there's no duplicate glob import, then generate a new glob import
+	if ref == ast.InvalidRef && prefix != "" {
+		sb := strings.Builder{}
+		sb.WriteString(prefix)
+
+		for _, part := range parts {
+			gap := true
+			for _, c := range part.Prefix {
+				if !js_ast.IsIdentifierContinue(c) {
+					gap = true
+				} else {
+					if gap {
+						sb.WriteByte('_')
+						gap = false
+					}
+					sb.WriteRune(c)
+				}
+			}
+		}
+
+		name := sb.String()
+		ref = p.newSymbol(ast.SymbolOther, name)
+		p.moduleScope.Generated = append(p.moduleScope.Generated, ref)
+
+		p.globPatternImports = append(p.globPatternImports, globPatternImport{
+			assertOrWith:     assertOrWith,
+			parts:            parts,
+			name:             name,
+			approximateRange: approximateRange,
+			ref:              ref,
+			kind:             kind,
+		})
+	}
+
+	p.recordUsage(ref)
+	return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ECall{
+		Target: js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: ref}},
+		Args:   []js_ast.Expr{expr},
+	}}
+}
+
+type globPart struct {
+	text       string
+	isWildcard bool
+}
+
+func (p *parser) globPatternFromExpr(expr js_ast.Expr) ([]globPart, logger.Range) {
+	switch e := expr.Data.(type) {
+	case *js_ast.EString:
+		return []globPart{{text: helpers.UTF16ToString(e.Value)}}, p.source.RangeOfString(expr.Loc)
+
+	case *js_ast.ETemplate:
+		if e.TagOrNil.Data != nil {
+			break
+		}
+
+		pattern := make([]globPart, 0, 1+2*len(e.Parts))
+		pattern = append(pattern, globPart{text: helpers.UTF16ToString(e.HeadCooked)})
+
+		for _, part := range e.Parts {
+			if partPattern, _ := p.globPatternFromExpr(part.Value); partPattern != nil {
+				pattern = append(pattern, partPattern...)
+			} else {
+				pattern = append(pattern, globPart{isWildcard: true})
+			}
+			pattern = append(pattern, globPart{text: helpers.UTF16ToString(part.TailCooked)})
+		}
+
+		if len(e.Parts) == 0 {
+			return pattern, p.source.RangeOfString(expr.Loc)
+		}
+
+		text := p.source.Contents
+		templateRange := logger.Range{Loc: e.HeadLoc}
+
+		for i := e.Parts[len(e.Parts)-1].TailLoc.Start; i < int32(len(text)); i++ {
+			c := text[i]
+			if c == '`' {
+				templateRange.Len = i + 1 - templateRange.Loc.Start
+				break
+			} else if c == '\\' {
+				i += 1
+			}
+		}
+
+		return pattern, templateRange
+
+	case *js_ast.EBinary:
+		if e.Op != js_ast.BinOpAdd {
+			break
+		}
+
+		pattern, leftRange := p.globPatternFromExpr(e.Left)
+		if pattern == nil {
+			break
+		}
+
+		if rightPattern, rightRange := p.globPatternFromExpr(e.Right); rightPattern != nil {
+			pattern = append(pattern, rightPattern...)
+			leftRange.Len = rightRange.End() - leftRange.Loc.Start
+			return pattern, leftRange
+		}
+
+		pattern = append(pattern, globPart{isWildcard: true})
+
+		// Try to extend the left range by the right operand in some common cases
+		switch right := e.Right.Data.(type) {
+		case *js_ast.EIdentifier:
+			leftRange.Len = js_lexer.RangeOfIdentifier(p.source, e.Right.Loc).End() - leftRange.Loc.Start
+
+		case *js_ast.ECall:
+			if right.CloseParenLoc.Start > 0 {
+				leftRange.Len = right.CloseParenLoc.Start + 1 - leftRange.Loc.Start
+			}
+		}
+
+		return pattern, leftRange
+	}
+
+	return nil, logger.Range{}
+}
+
+func (p *parser) convertSymbolUseToCall(ref ast.Ref, isSingleNonSpreadArgCall bool) {
+	// Remove the normal symbol use
+	use := p.symbolUses[ref]
+	use.CountEstimate--
+	if use.CountEstimate == 0 {
+		delete(p.symbolUses, ref)
+	} else {
+		p.symbolUses[ref] = use
+	}
+
+	// Add a special symbol use instead
+	if p.symbolCallUses == nil {
+		p.symbolCallUses = make(map[ast.Ref]js_ast.SymbolCallUse)
+	}
+	callUse := p.symbolCallUses[ref]
+	callUse.CallCountEstimate++
+	if isSingleNonSpreadArgCall {
+		callUse.SingleArgNonSpreadCallCountEstimate++
+	}
+	p.symbolCallUses[ref] = callUse
+}
+
+func (p *parser) warnAboutImportNamespaceCall(target js_ast.Expr, kind importNamespaceCallKind) {
+	if p.options.outputFormat != config.FormatPreserve {
+		if id, ok := target.Data.(*js_ast.EIdentifier); ok && p.importItemsForNamespace[id.Ref].entries != nil {
+			key := importNamespaceCall{
+				ref:  id.Ref,
+				kind: kind,
+			}
+			if p.importNamespaceCCMap == nil {
+				p.importNamespaceCCMap = make(map[importNamespaceCall]bool)
+			}
+
+			// Don't log a warning for the same identifier more than once
+			if _, ok := p.importNamespaceCCMap[key]; ok {
+				return
+			}
+
+			p.importNamespaceCCMap[key] = true
+			r := js_lexer.RangeOfIdentifier(p.source, target.Loc)
+
+			var notes []logger.MsgData
+			name := p.symbols[id.Ref.InnerIndex].OriginalName
+			if member, ok := p.moduleScope.Members[name]; ok && member.Ref == id.Ref {
+				if star := p.source.RangeOfOperatorBefore(member.Loc, "*"); star.Len > 0 {
+					if as := p.source.RangeOfOperatorBefore(member.Loc, "as"); as.Len > 0 && as.Loc.Start > star.Loc.Start {
+						note := p.tracker.MsgData(
+							logger.Range{Loc: star.Loc, Len: js_lexer.RangeOfIdentifier(p.source, member.Loc).End() - star.Loc.Start},
+							fmt.Sprintf("Consider changing %q to a default import instead:", name))
+						note.Location.Suggestion = name
+						notes = append(notes, note)
+					}
+				}
+			}
+
+			if p.options.ts.Parse {
+				notes = append(notes, logger.MsgData{
+					Text: "Make sure to enable TypeScript's \"esModuleInterop\" setting so that TypeScript's type checker generates an error when you try to do this. " +
+						"You can read more about this setting here: https://www.typescriptlang.org/tsconfig#esModuleInterop",
+				})
+			}
+
+			var verb string
+			var where string
+			var noun string
+
+			switch kind {
+			case exprKindCall:
+				verb = "Calling"
+				noun = "function"
+
+			case exprKindNew:
+				verb = "Constructing"
+				noun = "constructor"
+
+			case exprKindJSXTag:
+				verb = "Using"
+				where = " in a JSX expression"
+				noun = "component"
+			}
+
+			p.log.AddIDWithNotes(logger.MsgID_JS_CallImportNamespace, logger.Warning, &p.tracker, r, fmt.Sprintf(
+				"%s %q%s will crash at run-time because it's an import namespace object, not a %s",
+				verb,
+				p.symbols[id.Ref.InnerIndex].OriginalName,
+				where,
+				noun,
+			), notes)
+		}
+	}
+}
+
+func (p *parser) maybeMarkKnownGlobalConstructorAsPure(e *js_ast.ENew) {
+	if id, ok := e.Target.Data.(*js_ast.EIdentifier); ok {
+		if symbol := p.symbols[id.Ref.InnerIndex]; symbol.Kind == ast.SymbolUnbound {
+			switch symbol.OriginalName {
+			case "WeakSet", "WeakMap":
+				n := len(e.Args)
+
+				if n == 0 {
+					// "new WeakSet()" is pure
+					e.CanBeUnwrappedIfUnused = true
+					break
+				}
+
+				if n == 1 {
+					switch arg := e.Args[0].Data.(type) {
+					case *js_ast.ENull, *js_ast.EUndefined:
+						// "new WeakSet(null)" is pure
+						// "new WeakSet(void 0)" is pure
+						e.CanBeUnwrappedIfUnused = true
+
+					case *js_ast.EArray:
+						if len(arg.Items) == 0 {
+							// "new WeakSet([])" is pure
+							e.CanBeUnwrappedIfUnused = true
+						} else {
+							// "new WeakSet([x])" is impure because an exception is thrown if "x" is not an object
+						}
+
+					default:
+						// "new WeakSet(x)" is impure because the iterator for "x" could have side effects
+					}
+				}
+
+			case "Date":
+				n := len(e.Args)
+
+				if n == 0 {
+					// "new Date()" is pure
+					e.CanBeUnwrappedIfUnused = true
+					break
+				}
+
+				if n == 1 {
+					switch js_ast.KnownPrimitiveType(e.Args[0].Data) {
+					case js_ast.PrimitiveNull, js_ast.PrimitiveUndefined, js_ast.PrimitiveBoolean, js_ast.PrimitiveNumber, js_ast.PrimitiveString:
+						// "new Date('')" is pure
+						// "new Date(0)" is pure
+						// "new Date(null)" is pure
+						// "new Date(true)" is pure
+						// "new Date(false)" is pure
+						// "new Date(undefined)" is pure
+						e.CanBeUnwrappedIfUnused = true
+
+					default:
+						// "new Date(x)" is impure because converting "x" to a string could have side effects
+					}
+				}
+
+			case "Set":
+				n := len(e.Args)
+
+				if n == 0 {
+					// "new Set()" is pure
+					e.CanBeUnwrappedIfUnused = true
+					break
+				}
+
+				if n == 1 {
+					switch e.Args[0].Data.(type) {
+					case *js_ast.EArray, *js_ast.ENull, *js_ast.EUndefined:
+						// "new Set([a, b, c])" is pure
+						// "new Set(null)" is pure
+						// "new Set(void 0)" is pure
+						e.CanBeUnwrappedIfUnused = true
+
+					default:
+						// "new Set(x)" is impure because the iterator for "x" could have side effects
+					}
+				}
+
+			case "Map":
+				n := len(e.Args)
+
+				if n == 0 {
+					// "new Map()" is pure
+					e.CanBeUnwrappedIfUnused = true
+					break
+				}
+
+				if n == 1 {
+					switch arg := e.Args[0].Data.(type) {
+					case *js_ast.ENull, *js_ast.EUndefined:
+						// "new Map(null)" is pure
+						// "new Map(void 0)" is pure
+						e.CanBeUnwrappedIfUnused = true
+
+					case *js_ast.EArray:
+						allEntriesAreArrays := true
+						for _, item := range arg.Items {
+							if _, ok := item.Data.(*js_ast.EArray); !ok {
+								// "new Map([x])" is impure because "x[0]" could have side effects
+								allEntriesAreArrays = false
+								break
+							}
+						}
+
+						// "new Map([[a, b], [c, d]])" is pure
+						if allEntriesAreArrays {
+							e.CanBeUnwrappedIfUnused = true
+						}
+
+					default:
+						// "new Map(x)" is impure because the iterator for "x" could have side effects
+					}
+				}
+			}
+		}
+	}
+}
+
+type identifierOpts struct {
+	assignTarget            js_ast.AssignTarget
+	isCallTarget            bool
+	isDeleteTarget          bool
+	preferQuotedKey         bool
+	wasOriginallyIdentifier bool
+	matchAgainstDefines     bool
+}
+
+func (p *parser) handleIdentifier(loc logger.Loc, e *js_ast.EIdentifier, opts identifierOpts) js_ast.Expr {
+	ref := e.Ref
+
+	// Substitute inlined constants
+	if p.options.minifySyntax {
+		if value, ok := p.constValues[ref]; ok {
+			p.ignoreUsage(ref)
+			return js_ast.ConstValueToExpr(loc, value)
+		}
+	}
+
+	// Capture the "arguments" variable if necessary
+	if p.fnOnlyDataVisit.argumentsRef != nil && ref == *p.fnOnlyDataVisit.argumentsRef {
+		isInsideUnsupportedArrow := p.fnOrArrowDataVisit.isArrow && p.options.unsupportedJSFeatures.Has(compat.Arrow)
+		isInsideUnsupportedAsyncArrow := p.fnOnlyDataVisit.isInsideAsyncArrowFn && p.options.unsupportedJSFeatures.Has(compat.AsyncAwait)
+		if isInsideUnsupportedArrow || isInsideUnsupportedAsyncArrow {
+			return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: p.captureArguments()}}
+		}
+	}
+
+	// Create an error for assigning to an import namespace
+	if (opts.assignTarget != js_ast.AssignTargetNone ||
+		(opts.isDeleteTarget && p.symbols[ref.InnerIndex].ImportItemStatus == ast.ImportItemGenerated)) &&
+		p.symbols[ref.InnerIndex].Kind == ast.SymbolImport {
+		r := js_lexer.RangeOfIdentifier(p.source, loc)
+
+		// Try to come up with a setter name to try to make this message more understandable
+		var setterHint string
+		originalName := p.symbols[ref.InnerIndex].OriginalName
+		if js_ast.IsIdentifier(originalName) && originalName != "_" {
+			if len(originalName) == 1 || (len(originalName) > 1 && originalName[0] < utf8.RuneSelf) {
+				setterHint = fmt.Sprintf(" (e.g. \"set%s%s\")", strings.ToUpper(originalName[:1]), originalName[1:])
+			} else {
+				setterHint = fmt.Sprintf(" (e.g. \"set_%s\")", originalName)
+			}
+		}
+
+		notes := []logger.MsgData{{Text: "Imports are immutable in JavaScript. " +
+			fmt.Sprintf("To modify the value of this import, you must export a setter function in the "+
+				"imported file%s and then import and call that function here instead.", setterHint)}}
+
+		if p.options.mode == config.ModeBundle {
+			p.log.AddErrorWithNotes(&p.tracker, r, fmt.Sprintf("Cannot assign to import %q", originalName), notes)
+		} else {
+			kind := logger.Warning
+			if p.suppressWarningsAboutWeirdCode {
+				kind = logger.Debug
+			}
+			p.log.AddIDWithNotes(logger.MsgID_JS_AssignToImport, kind, &p.tracker, r,
+				fmt.Sprintf("This assignment will throw because %q is an import", originalName), notes)
+		}
+	}
+
+	// Substitute an EImportIdentifier now if this has a namespace alias
+	if opts.assignTarget == js_ast.AssignTargetNone && !opts.isDeleteTarget {
+		symbol := &p.symbols[ref.InnerIndex]
+		if nsAlias := symbol.NamespaceAlias; nsAlias != nil {
+			data := p.dotOrMangledPropVisit(
+				js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: nsAlias.NamespaceRef}},
+				symbol.OriginalName, loc)
+
+			// Handle references to namespaces or namespace members
+			if tsMemberData, ok := p.refToTSNamespaceMemberData[nsAlias.NamespaceRef]; ok {
+				if ns, ok := tsMemberData.(*js_ast.TSNamespaceMemberNamespace); ok {
+					if member, ok := ns.ExportedMembers[nsAlias.Alias]; ok {
+						switch m := member.Data.(type) {
+						case *js_ast.TSNamespaceMemberEnumNumber:
+							return p.wrapInlinedEnum(js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: m.Value}}, nsAlias.Alias)
+
+						case *js_ast.TSNamespaceMemberEnumString:
+							return p.wrapInlinedEnum(js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: m.Value}}, nsAlias.Alias)
+
+						case *js_ast.TSNamespaceMemberNamespace:
+							p.tsNamespaceTarget = data
+							p.tsNamespaceMemberData = member.Data
+						}
+					}
+				}
+			}
+
+			return js_ast.Expr{Loc: loc, Data: data}
+		}
+	}
+
+	// Substitute an EImportIdentifier now if this is an import item
+	if p.isImportItem[ref] {
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EImportIdentifier{
+			Ref:                     ref,
+			PreferQuotedKey:         opts.preferQuotedKey,
+			WasOriginallyIdentifier: opts.wasOriginallyIdentifier,
+		}}
+	}
+
+	// Handle references to namespaces or namespace members
+	if tsMemberData, ok := p.refToTSNamespaceMemberData[ref]; ok {
+		switch m := tsMemberData.(type) {
+		case *js_ast.TSNamespaceMemberEnumNumber:
+			return p.wrapInlinedEnum(js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: m.Value}}, p.symbols[ref.InnerIndex].OriginalName)
+
+		case *js_ast.TSNamespaceMemberEnumString:
+			return p.wrapInlinedEnum(js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: m.Value}}, p.symbols[ref.InnerIndex].OriginalName)
+
+		case *js_ast.TSNamespaceMemberNamespace:
+			p.tsNamespaceTarget = e
+			p.tsNamespaceMemberData = tsMemberData
+		}
+	}
+
+	// Substitute a namespace export reference now if appropriate
+	if p.options.ts.Parse {
+		if nsRef, ok := p.isExportedInsideNamespace[ref]; ok {
+			name := p.symbols[ref.InnerIndex].OriginalName
+
+			// Otherwise, create a property access on the namespace
+			p.recordUsage(nsRef)
+			propertyAccess := p.dotOrMangledPropVisit(js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: nsRef}}, name, loc)
+			if p.tsNamespaceTarget == e {
+				p.tsNamespaceTarget = propertyAccess
+			}
+			return js_ast.Expr{Loc: loc, Data: propertyAccess}
+		}
+	}
+
+	// Swap references to the global "require" function with our "__require" stub
+	if ref == p.requireRef && !opts.isCallTarget {
+		if p.options.mode == config.ModeBundle && p.source.Index != runtime.SourceIndex && e != p.dotOrIndexTarget {
+			p.log.AddID(logger.MsgID_JS_IndirectRequire, logger.Debug, &p.tracker, js_lexer.RangeOfIdentifier(p.source, loc),
+				"Indirect calls to \"require\" will not be bundled")
+		}
+
+		return p.valueToSubstituteForRequire(loc)
+	}
+
+	// Mark any mutated symbols as mutable
+	if opts.assignTarget != js_ast.AssignTargetNone {
+		p.symbols[e.Ref.InnerIndex].Flags |= ast.CouldPotentiallyBeMutated
+	}
+
+	return js_ast.Expr{Loc: loc, Data: e}
+}
+
+type visitFnOpts struct {
+	isMethod               bool
+	isDerivedClassCtor     bool
+	isLoweredPrivateMethod bool
+}
+
+func (p *parser) visitFn(fn *js_ast.Fn, scopeLoc logger.Loc, opts visitFnOpts) {
+	var decoratorScope *js_ast.Scope
+	oldFnOrArrowData := p.fnOrArrowDataVisit
+	oldFnOnlyData := p.fnOnlyDataVisit
+	p.fnOrArrowDataVisit = fnOrArrowDataVisit{
+		isAsync:                        fn.IsAsync,
+		isGenerator:                    fn.IsGenerator,
+		isDerivedClassCtor:             opts.isDerivedClassCtor,
+		shouldLowerSuperPropertyAccess: (fn.IsAsync && p.options.unsupportedJSFeatures.Has(compat.AsyncAwait)) || opts.isLoweredPrivateMethod,
+	}
+	p.fnOnlyDataVisit = fnOnlyDataVisit{
+		isThisNested:       true,
+		isNewTargetAllowed: true,
+		argumentsRef:       &fn.ArgumentsRef,
+	}
+
+	if opts.isMethod {
+		decoratorScope = p.propMethodDecoratorScope
+		p.fnOnlyDataVisit.innerClassNameRef = oldFnOnlyData.innerClassNameRef
+		p.fnOnlyDataVisit.isInStaticClassContext = oldFnOnlyData.isInStaticClassContext
+	}
+
+	if fn.Name != nil {
+		p.recordDeclaredSymbol(fn.Name.Ref)
+	}
+
+	p.pushScopeForVisitPass(js_ast.ScopeFunctionArgs, scopeLoc)
+	p.visitArgs(fn.Args, visitArgsOpts{
+		hasRestArg:               fn.HasRestArg,
+		body:                     fn.Body.Block.Stmts,
+		isUniqueFormalParameters: fn.IsUniqueFormalParameters,
+		decoratorScope:           decoratorScope,
+	})
+	p.pushScopeForVisitPass(js_ast.ScopeFunctionBody, fn.Body.Loc)
+	if fn.Name != nil {
+		p.validateDeclaredSymbolName(fn.Name.Loc, p.symbols[fn.Name.Ref.InnerIndex].OriginalName)
+	}
+	fn.Body.Block.Stmts = p.visitStmtsAndPrependTempRefs(fn.Body.Block.Stmts, prependTempRefsOpts{fnBodyLoc: &fn.Body.Loc, kind: stmtsFnBody})
+	p.popScope()
+	p.lowerFunction(&fn.IsAsync, &fn.IsGenerator, &fn.Args, fn.Body.Loc, &fn.Body.Block, nil, &fn.HasRestArg, false /* isArrow */)
+	p.popScope()
+
+	p.fnOrArrowDataVisit = oldFnOrArrowData
+	p.fnOnlyDataVisit = oldFnOnlyData
+}
+
+func (p *parser) recordExport(loc logger.Loc, alias string, ref ast.Ref) {
+	if name, ok := p.namedExports[alias]; ok {
+		// Duplicate exports are an error
+		p.log.AddErrorWithNotes(&p.tracker, js_lexer.RangeOfIdentifier(p.source, loc),
+			fmt.Sprintf("Multiple exports with the same name %q", alias),
+			[]logger.MsgData{p.tracker.MsgData(js_lexer.RangeOfIdentifier(p.source, name.AliasLoc),
+				fmt.Sprintf("The name %q was originally exported here:", alias))})
+	} else {
+		p.namedExports[alias] = js_ast.NamedExport{AliasLoc: loc, Ref: ref}
+	}
+}
+
+type importsExportsScanResult struct {
+	stmts               []js_ast.Stmt
+	keptImportEquals    bool
+	removedImportEquals bool
+}
+
+// Returns true if this is an unused TypeScript import-equals statement
+func (p *parser) checkForUnusedTSImportEquals(s *js_ast.SLocal, result *importsExportsScanResult) bool {
+	if s.WasTSImportEquals && !s.IsExport {
+		decl := s.Decls[0]
+
+		// Skip to the underlying reference
+		value := s.Decls[0].ValueOrNil
+		for {
+			if dot, ok := value.Data.(*js_ast.EDot); ok {
+				value = dot.Target
+			} else {
+				break
+			}
+		}
+
+		// Is this an identifier reference and not a require() call?
+		valueRef := ast.InvalidRef
+		switch v := value.Data.(type) {
+		case *js_ast.EIdentifier:
+			valueRef = v.Ref
+		case *js_ast.EImportIdentifier:
+			valueRef = v.Ref
+		}
+		if valueRef != ast.InvalidRef {
+			// Is this import statement unused?
+			if ref := decl.Binding.Data.(*js_ast.BIdentifier).Ref; p.symbols[ref.InnerIndex].UseCountEstimate == 0 {
+				// Also don't count the referenced identifier
+				p.ignoreUsage(valueRef)
+
+				// Import-equals statements can come in any order. Removing one
+				// could potentially cause another one to be removable too.
+				// Continue iterating until a fixed point has been reached to make
+				// sure we get them all.
+				result.removedImportEquals = true
+				return true
+			} else {
+				result.keptImportEquals = true
+			}
+		}
+	}
+
+	return false
+}
+
+func (p *parser) scanForUnusedTSImportEquals(stmts []js_ast.Stmt) (result importsExportsScanResult) {
+	stmtsEnd := 0
+
+	for _, stmt := range stmts {
+		if s, ok := stmt.Data.(*js_ast.SLocal); ok && p.checkForUnusedTSImportEquals(s, &result) {
+			// Remove unused import-equals statements, since those likely
+			// correspond to types instead of values
+			continue
+		}
+
+		// Filter out statements we skipped over
+		stmts[stmtsEnd] = stmt
+		stmtsEnd++
+	}
+
+	result.stmts = stmts[:stmtsEnd]
+	return
+}
+
+func (p *parser) scanForImportsAndExports(stmts []js_ast.Stmt) (result importsExportsScanResult) {
+	unusedImportFlags := p.options.ts.Config.UnusedImportFlags()
+	stmtsEnd := 0
+
+	for _, stmt := range stmts {
+		switch s := stmt.Data.(type) {
+		case *js_ast.SImport:
+			record := &p.importRecords[s.ImportRecordIndex]
+
+			// We implement TypeScript's "preserveValueImports" tsconfig.json setting
+			// to support the use case of compiling partial modules for compile-to-
+			// JavaScript languages such as Svelte. These languages try to reference
+			// imports in ways that are impossible for TypeScript and esbuild to know
+			// about when they are only given a partial module to compile. Here is an
+			// example of some Svelte code that contains a TypeScript snippet:
+			//
+			//   <script lang="ts">
+			//     import Counter from './Counter.svelte';
+			//     export let name: string = 'world';
+			//   </script>
+			//   <main>
+			//     <h1>Hello {name}!</h1>
+			//     <Counter />
+			//   </main>
+			//
+			// Tools that use esbuild to compile TypeScript code inside a Svelte
+			// file like this only give esbuild the contents of the <script> tag.
+			// The "preserveValueImports" setting avoids removing unused import
+			// names, which means additional code appended after the TypeScript-
+			// to-JavaScript conversion can still access those unused imports.
+			//
+			// There are two scenarios where we don't do this:
+			//
+			//   * If we're bundling, then we know we aren't being used to compile
+			//     a partial module. The parser is seeing the entire code for the
+			//     module so it's safe to remove unused imports. And also we don't
+			//     want the linker to generate errors about missing imports if the
+			//     imported file is also in the bundle.
+			//
+			//   * If identifier minification is enabled, then using esbuild as a
+			//     partial-module transform library wouldn't work anyway because
+			//     the names wouldn't match. And that means we're minifying so the
+			//     user is expecting the output to be as small as possible. So we
+			//     should omit unused imports.
+			//
+			keepUnusedImports := p.options.ts.Parse && (unusedImportFlags&config.TSUnusedImport_KeepValues) != 0 &&
+				p.options.mode != config.ModeBundle && !p.options.minifyIdentifiers
+
+			// Forbid non-default imports for JSON import assertions
+			if (record.Flags&ast.AssertTypeJSON) != 0 && p.options.mode == config.ModeBundle && s.Items != nil {
+				for _, item := range *s.Items {
+					if p.options.ts.Parse && p.tsUseCounts[item.Name.Ref.InnerIndex] == 0 && (unusedImportFlags&config.TSUnusedImport_KeepValues) == 0 {
+						// Do not count imports that TypeScript interprets as type annotations
+						continue
+					}
+					if item.Alias != "default" {
+						p.log.AddErrorWithNotes(&p.tracker, js_lexer.RangeOfIdentifier(p.source, item.AliasLoc),
+							fmt.Sprintf("Cannot use non-default import %q with a JSON import assertion", item.Alias),
+							p.notesForAssertTypeJSON(record, item.Alias))
+					}
+				}
+			}
+
+			// TypeScript always trims unused imports. This is important for
+			// correctness since some imports might be fake (only in the type
+			// system and used for type-only imports).
+			if (p.options.minifySyntax || p.options.ts.Parse) && !keepUnusedImports {
+				foundImports := false
+				isUnusedInTypeScript := true
+
+				// Remove the default name if it's unused
+				if s.DefaultName != nil {
+					foundImports = true
+					symbol := p.symbols[s.DefaultName.Ref.InnerIndex]
+
+					// TypeScript has a separate definition of unused
+					if p.options.ts.Parse && (p.tsUseCounts[s.DefaultName.Ref.InnerIndex] != 0 || (p.options.ts.Config.UnusedImportFlags()&config.TSUnusedImport_KeepValues) != 0) {
+						isUnusedInTypeScript = false
+					}
+
+					// Remove the symbol if it's never used outside a dead code region
+					if symbol.UseCountEstimate == 0 && (p.options.ts.Parse || !p.moduleScope.ContainsDirectEval) {
+						s.DefaultName = nil
+					}
+				}
+
+				// Remove the star import if it's unused
+				if s.StarNameLoc != nil {
+					foundImports = true
+					symbol := p.symbols[s.NamespaceRef.InnerIndex]
+
+					// TypeScript has a separate definition of unused
+					if p.options.ts.Parse && (p.tsUseCounts[s.NamespaceRef.InnerIndex] != 0 || (p.options.ts.Config.UnusedImportFlags()&config.TSUnusedImport_KeepValues) != 0) {
+						isUnusedInTypeScript = false
+					}
+
+					// Remove the symbol if it's never used outside a dead code region
+					if symbol.UseCountEstimate == 0 && (p.options.ts.Parse || !p.moduleScope.ContainsDirectEval) {
+						// Make sure we don't remove this if it was used for a property
+						// access while bundling
+						if importItems, ok := p.importItemsForNamespace[s.NamespaceRef]; ok && len(importItems.entries) == 0 {
+							s.StarNameLoc = nil
+						}
+					}
+				}
+
+				// Remove items if they are unused
+				if s.Items != nil {
+					foundImports = true
+					itemsEnd := 0
+
+					for _, item := range *s.Items {
+						symbol := p.symbols[item.Name.Ref.InnerIndex]
+
+						// TypeScript has a separate definition of unused
+						if p.options.ts.Parse && (p.tsUseCounts[item.Name.Ref.InnerIndex] != 0 || (p.options.ts.Config.UnusedImportFlags()&config.TSUnusedImport_KeepValues) != 0) {
+							isUnusedInTypeScript = false
+						}
+
+						// Remove the symbol if it's never used outside a dead code region
+						if symbol.UseCountEstimate != 0 || (!p.options.ts.Parse && p.moduleScope.ContainsDirectEval) {
+							(*s.Items)[itemsEnd] = item
+							itemsEnd++
+						}
+					}
+
+					// Filter the array by taking a slice
+					if itemsEnd == 0 {
+						s.Items = nil
+					} else {
+						*s.Items = (*s.Items)[:itemsEnd]
+					}
+				}
+
+				// Omit this statement if we're parsing TypeScript and all imports are
+				// unused. Note that this is distinct from the case where there were
+				// no imports at all (e.g. "import 'foo'"). In that case we want to keep
+				// the statement because the user is clearly trying to import the module
+				// for side effects.
+				//
+				// This culling is important for correctness when parsing TypeScript
+				// because a) the TypeScript compiler does ths and we want to match it
+				// and b) this may be a fake module that only exists in the type system
+				// and doesn't actually exist in reality.
+				//
+				// We do not want to do this culling in JavaScript though because the
+				// module may have side effects even if all imports are unused.
+				if p.options.ts.Parse && foundImports && isUnusedInTypeScript && (unusedImportFlags&config.TSUnusedImport_KeepStmt) == 0 {
+					// Ignore import records with a pre-filled source index. These are
+					// for injected files and we definitely do not want to trim these.
+					if !record.SourceIndex.IsValid() && !record.CopySourceIndex.IsValid() {
+						record.Flags |= ast.IsUnused
+						continue
+					}
+				}
+			}
+
+			if p.options.mode != config.ModePassThrough {
+				if s.StarNameLoc != nil {
+					// "importItemsForNamespace" has property accesses off the namespace
+					if importItems, ok := p.importItemsForNamespace[s.NamespaceRef]; ok && len(importItems.entries) > 0 {
+						// Sort keys for determinism
+						sorted := make([]string, 0, len(importItems.entries))
+						for alias := range importItems.entries {
+							sorted = append(sorted, alias)
+						}
+						sort.Strings(sorted)
+
+						// Create named imports for these property accesses. This will
+						// cause missing imports to generate useful warnings.
+						//
+						// It will also improve bundling efficiency for internal imports
+						// by still converting property accesses off the namespace into
+						// bare identifiers even if the namespace is still needed.
+						for _, alias := range sorted {
+							name := importItems.entries[alias]
+							p.namedImports[name.Ref] = js_ast.NamedImport{
+								Alias:             alias,
+								AliasLoc:          name.Loc,
+								NamespaceRef:      s.NamespaceRef,
+								ImportRecordIndex: s.ImportRecordIndex,
+							}
+
+							// Make sure the printer prints this as a property access
+							p.symbols[name.Ref.InnerIndex].NamespaceAlias = &ast.NamespaceAlias{
+								NamespaceRef: s.NamespaceRef,
+								Alias:        alias,
+							}
+
+							// Also record these automatically-generated top-level namespace alias symbols
+							p.declaredSymbols = append(p.declaredSymbols, js_ast.DeclaredSymbol{
+								Ref:        name.Ref,
+								IsTopLevel: true,
+							})
+						}
+					}
+				}
+
+				if s.DefaultName != nil {
+					p.namedImports[s.DefaultName.Ref] = js_ast.NamedImport{
+						Alias:             "default",
+						AliasLoc:          s.DefaultName.Loc,
+						NamespaceRef:      s.NamespaceRef,
+						ImportRecordIndex: s.ImportRecordIndex,
+					}
+				}
+
+				if s.StarNameLoc != nil {
+					p.namedImports[s.NamespaceRef] = js_ast.NamedImport{
+						AliasIsStar:       true,
+						AliasLoc:          *s.StarNameLoc,
+						NamespaceRef:      ast.InvalidRef,
+						ImportRecordIndex: s.ImportRecordIndex,
+					}
+				}
+
+				if s.Items != nil {
+					for _, item := range *s.Items {
+						p.namedImports[item.Name.Ref] = js_ast.NamedImport{
+							Alias:             item.Alias,
+							AliasLoc:          item.AliasLoc,
+							NamespaceRef:      s.NamespaceRef,
+							ImportRecordIndex: s.ImportRecordIndex,
+						}
+					}
+				}
+			}
+
+			p.importRecordsForCurrentPart = append(p.importRecordsForCurrentPart, s.ImportRecordIndex)
+
+			if s.StarNameLoc != nil {
+				record.Flags |= ast.ContainsImportStar
+			}
+
+			if s.DefaultName != nil {
+				record.Flags |= ast.ContainsDefaultAlias
+			} else if s.Items != nil {
+				for _, item := range *s.Items {
+					if item.Alias == "default" {
+						record.Flags |= ast.ContainsDefaultAlias
+					} else if item.Alias == "__esModule" {
+						record.Flags |= ast.ContainsESModuleAlias
+					}
+				}
+			}
+
+		case *js_ast.SFunction:
+			if s.IsExport {
+				p.recordExport(s.Fn.Name.Loc, p.symbols[s.Fn.Name.Ref.InnerIndex].OriginalName, s.Fn.Name.Ref)
+			}
+
+		case *js_ast.SClass:
+			if s.IsExport {
+				p.recordExport(s.Class.Name.Loc, p.symbols[s.Class.Name.Ref.InnerIndex].OriginalName, s.Class.Name.Ref)
+			}
+
+		case *js_ast.SLocal:
+			if s.IsExport {
+				js_ast.ForEachIdentifierBindingInDecls(s.Decls, func(loc logger.Loc, b *js_ast.BIdentifier) {
+					p.recordExport(loc, p.symbols[b.Ref.InnerIndex].OriginalName, b.Ref)
+				})
+			}
+
+			// Remove unused import-equals statements, since those likely
+			// correspond to types instead of values
+			if p.checkForUnusedTSImportEquals(s, &result) {
+				continue
+			}
+
+		case *js_ast.SExportDefault:
+			p.recordExport(s.DefaultName.Loc, "default", s.DefaultName.Ref)
+
+		case *js_ast.SExportClause:
+			for _, item := range s.Items {
+				p.recordExport(item.AliasLoc, item.Alias, item.Name.Ref)
+			}
+
+		case *js_ast.SExportStar:
+			record := &p.importRecords[s.ImportRecordIndex]
+			p.importRecordsForCurrentPart = append(p.importRecordsForCurrentPart, s.ImportRecordIndex)
+
+			if s.Alias != nil {
+				// "export * as ns from 'path'"
+				p.namedImports[s.NamespaceRef] = js_ast.NamedImport{
+					AliasIsStar:       true,
+					AliasLoc:          s.Alias.Loc,
+					NamespaceRef:      ast.InvalidRef,
+					ImportRecordIndex: s.ImportRecordIndex,
+					IsExported:        true,
+				}
+				p.recordExport(s.Alias.Loc, s.Alias.OriginalName, s.NamespaceRef)
+
+				record.Flags |= ast.ContainsImportStar
+			} else {
+				// "export * from 'path'"
+				p.exportStarImportRecords = append(p.exportStarImportRecords, s.ImportRecordIndex)
+			}
+
+		case *js_ast.SExportFrom:
+			record := &p.importRecords[s.ImportRecordIndex]
+			p.importRecordsForCurrentPart = append(p.importRecordsForCurrentPart, s.ImportRecordIndex)
+
+			for _, item := range s.Items {
+				// Note that the imported alias is not item.Alias, which is the
+				// exported alias. This is somewhat confusing because each
+				// SExportFrom statement is basically SImport + SExportClause in one.
+				p.namedImports[item.Name.Ref] = js_ast.NamedImport{
+					Alias:             item.OriginalName,
+					AliasLoc:          item.Name.Loc,
+					NamespaceRef:      s.NamespaceRef,
+					ImportRecordIndex: s.ImportRecordIndex,
+					IsExported:        true,
+				}
+				p.recordExport(item.Name.Loc, item.Alias, item.Name.Ref)
+
+				if item.OriginalName == "default" {
+					record.Flags |= ast.ContainsDefaultAlias
+				} else if item.OriginalName == "__esModule" {
+					record.Flags |= ast.ContainsESModuleAlias
+				}
+			}
+
+			// Forbid non-default imports for JSON import assertions
+			if (record.Flags&ast.AssertTypeJSON) != 0 && p.options.mode == config.ModeBundle {
+				for _, item := range s.Items {
+					if item.OriginalName != "default" {
+						p.log.AddErrorWithNotes(&p.tracker, js_lexer.RangeOfIdentifier(p.source, item.Name.Loc),
+							fmt.Sprintf("Cannot use non-default import %q with a JSON import assertion", item.OriginalName),
+							p.notesForAssertTypeJSON(record, item.OriginalName))
+					}
+				}
+			}
+
+			// TypeScript always trims unused re-exports. This is important for
+			// correctness since some re-exports might be fake (only in the type
+			// system and used for type-only stuff).
+			if p.options.ts.Parse && len(s.Items) == 0 && (unusedImportFlags&config.TSUnusedImport_KeepStmt) == 0 {
+				continue
+			}
+		}
+
+		// Filter out statements we skipped over
+		stmts[stmtsEnd] = stmt
+		stmtsEnd++
+	}
+
+	result.stmts = stmts[:stmtsEnd]
+	return
+}
+
+func (p *parser) appendPart(parts []js_ast.Part, stmts []js_ast.Stmt) []js_ast.Part {
+	p.symbolUses = make(map[ast.Ref]js_ast.SymbolUse)
+	p.importSymbolPropertyUses = nil
+	p.symbolCallUses = nil
+	p.declaredSymbols = nil
+	p.importRecordsForCurrentPart = nil
+	p.scopesForCurrentPart = nil
+
+	part := js_ast.Part{
+		Stmts:      p.visitStmtsAndPrependTempRefs(stmts, prependTempRefsOpts{}),
+		SymbolUses: p.symbolUses,
+	}
+
+	// Sanity check
+	if p.currentScope != p.moduleScope {
+		panic("Internal error: Scope stack imbalance")
+	}
+
+	// Insert any relocated variable statements now
+	if len(p.relocatedTopLevelVars) > 0 {
+		alreadyDeclared := make(map[ast.Ref]bool)
+		for _, local := range p.relocatedTopLevelVars {
+			// Follow links because "var" declarations may be merged due to hoisting
+			for {
+				link := p.symbols[local.Ref.InnerIndex].Link
+				if link == ast.InvalidRef {
+					break
+				}
+				local.Ref = link
+			}
+
+			// Only declare a given relocated variable once
+			if !alreadyDeclared[local.Ref] {
+				alreadyDeclared[local.Ref] = true
+				part.Stmts = append(part.Stmts, js_ast.Stmt{Loc: local.Loc, Data: &js_ast.SLocal{
+					Decls: []js_ast.Decl{{
+						Binding: js_ast.Binding{Loc: local.Loc, Data: &js_ast.BIdentifier{Ref: local.Ref}},
+					}},
+				}})
+			}
+		}
+		p.relocatedTopLevelVars = nil
+	}
+
+	if len(part.Stmts) > 0 {
+		var flags js_ast.StmtsCanBeRemovedIfUnusedFlags
+		if p.options.mode == config.ModePassThrough {
+			// Exports are tracked separately, so export clauses can normally always
+			// be removed. Except we should keep them if we're not doing any format
+			// conversion because exports are not re-emitted in that case.
+			flags |= js_ast.KeepExportClauses
+		}
+		part.CanBeRemovedIfUnused = p.astHelpers.StmtsCanBeRemovedIfUnused(part.Stmts, flags)
+		part.DeclaredSymbols = p.declaredSymbols
+		part.ImportRecordIndices = p.importRecordsForCurrentPart
+		part.ImportSymbolPropertyUses = p.importSymbolPropertyUses
+		part.SymbolCallUses = p.symbolCallUses
+		part.Scopes = p.scopesForCurrentPart
+		parts = append(parts, part)
+	}
+	return parts
+}
+
+func newParser(log logger.Log, source logger.Source, lexer js_lexer.Lexer, options *Options) *parser {
+	if options.defines == nil {
+		defaultDefines := config.ProcessDefines(nil)
+		options.defines = &defaultDefines
+	}
+
+	p := &parser{
+		log:                log,
+		source:             source,
+		tracker:            logger.MakeLineColumnTracker(&source),
+		lexer:              lexer,
+		allowIn:            true,
+		options:            *options,
+		runtimeImports:     make(map[string]ast.LocRef),
+		promiseRef:         ast.InvalidRef,
+		regExpRef:          ast.InvalidRef,
+		afterArrowBodyLoc:  logger.Loc{Start: -1},
+		firstJSXElementLoc: logger.Loc{Start: -1},
+		importMetaRef:      ast.InvalidRef,
+		superCtorRef:       ast.InvalidRef,
+
+		// For lowering private methods
+		weakMapRef:     ast.InvalidRef,
+		weakSetRef:     ast.InvalidRef,
+		privateGetters: make(map[ast.Ref]ast.Ref),
+		privateSetters: make(map[ast.Ref]ast.Ref),
+
+		// These are for TypeScript
+		refToTSNamespaceMemberData: make(map[ast.Ref]js_ast.TSNamespaceMemberData),
+		emittedNamespaceVars:       make(map[ast.Ref]bool),
+		isExportedInsideNamespace:  make(map[ast.Ref]ast.Ref),
+		localTypeNames:             make(map[string]bool),
+
+		// These are for handling ES6 imports and exports
+		importItemsForNamespace: make(map[ast.Ref]namespaceImportItems),
+		isImportItem:            make(map[ast.Ref]bool),
+		namedImports:            make(map[ast.Ref]js_ast.NamedImport),
+		namedExports:            make(map[string]js_ast.NamedExport),
+
+		// For JSX runtime imports
+		jsxRuntimeImports: make(map[string]ast.LocRef),
+		jsxLegacyImports:  make(map[string]ast.LocRef),
+
+		suppressWarningsAboutWeirdCode: helpers.IsInsideNodeModules(source.KeyPath.Text),
+	}
+
+	if len(options.dropLabels) > 0 {
+		p.dropLabelsMap = make(map[string]struct{})
+		for _, name := range options.dropLabels {
+			p.dropLabelsMap[name] = struct{}{}
+		}
+	}
+
+	if !options.minifyWhitespace {
+		p.exprComments = make(map[logger.Loc][]string)
+	}
+
+	p.astHelpers = js_ast.MakeHelperContext(func(ref ast.Ref) bool {
+		return p.symbols[ref.InnerIndex].Kind == ast.SymbolUnbound
+	})
+
+	p.pushScopeForParsePass(js_ast.ScopeEntry, logger.Loc{Start: locModuleScope})
+
+	return p
+}
+
+var defaultJSXFactory = []string{"React", "createElement"}
+var defaultJSXFragment = []string{"React", "Fragment"}
+
+const defaultJSXImportSource = "react"
+
+func Parse(log logger.Log, source logger.Source, options Options) (result js_ast.AST, ok bool) {
+	ok = true
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			ok = false
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	// Default options for JSX elements
+	if len(options.jsx.Factory.Parts) == 0 {
+		options.jsx.Factory = config.DefineExpr{Parts: defaultJSXFactory}
+	}
+	if len(options.jsx.Fragment.Parts) == 0 && options.jsx.Fragment.Constant == nil {
+		options.jsx.Fragment = config.DefineExpr{Parts: defaultJSXFragment}
+	}
+	if len(options.jsx.ImportSource) == 0 {
+		options.jsx.ImportSource = defaultJSXImportSource
+	}
+
+	p := newParser(log, source, js_lexer.NewLexer(log, source, options.ts), &options)
+
+	// Consume a leading hashbang comment
+	hashbang := ""
+	if p.lexer.Token == js_lexer.THashbang {
+		hashbang = p.lexer.Identifier.String
+		p.lexer.Next()
+	}
+
+	// Allow top-level await
+	p.fnOrArrowDataParse.await = allowExpr
+	p.fnOrArrowDataParse.isTopLevel = true
+
+	// Parse the file in the first pass, but do not bind symbols
+	stmts := p.parseStmtsUpTo(js_lexer.TEndOfFile, parseStmtOpts{
+		isModuleScope:          true,
+		allowDirectivePrologue: true,
+	})
+	p.prepareForVisitPass()
+
+	// Insert a "use strict" directive if "alwaysStrict" is active
+	var directives []string
+	if tsAlwaysStrict := p.options.tsAlwaysStrict; tsAlwaysStrict != nil && tsAlwaysStrict.Value {
+		directives = append(directives, "use strict")
+	}
+
+	// Strip off all leading directives
+	{
+		totalCount := 0
+		keptCount := 0
+
+		for _, stmt := range stmts {
+			switch s := stmt.Data.(type) {
+			case *js_ast.SComment:
+				stmts[keptCount] = stmt
+				keptCount++
+				totalCount++
+				continue
+
+			case *js_ast.SDirective:
+				if p.isStrictMode() && s.LegacyOctalLoc.Start > 0 {
+					p.markStrictModeFeature(legacyOctalEscape, p.source.RangeOfLegacyOctalEscape(s.LegacyOctalLoc), "")
+				}
+				directive := helpers.UTF16ToString(s.Value)
+
+				// Remove duplicate directives
+				found := false
+				for _, existing := range directives {
+					if existing == directive {
+						found = true
+						break
+					}
+				}
+				if !found {
+					directives = append(directives, directive)
+				}
+
+				// Remove this directive from the statement list
+				totalCount++
+				continue
+			}
+
+			// Stop when the directive prologue ends
+			break
+		}
+
+		if keptCount < totalCount {
+			stmts = append(stmts[:keptCount], stmts[totalCount:]...)
+		}
+	}
+
+	// Add an empty part for the namespace export that we can fill in later
+	nsExportPart := js_ast.Part{
+		SymbolUses:           make(map[ast.Ref]js_ast.SymbolUse),
+		CanBeRemovedIfUnused: true,
+	}
+
+	var before = []js_ast.Part{nsExportPart}
+	var parts []js_ast.Part
+	var after []js_ast.Part
+
+	// Insert any injected import statements now that symbols have been declared
+	for _, file := range p.options.injectedFiles {
+		exportsNoConflict := make([]string, 0, len(file.Exports))
+		symbols := make(map[string]ast.LocRef)
+
+		if file.DefineName != "" {
+			ref := p.newSymbol(ast.SymbolOther, file.DefineName)
+			p.moduleScope.Generated = append(p.moduleScope.Generated, ref)
+			symbols["default"] = ast.LocRef{Ref: ref}
+			exportsNoConflict = append(exportsNoConflict, "default")
+			p.injectedDefineSymbols = append(p.injectedDefineSymbols, ref)
+		} else {
+		nextExport:
+			for _, export := range file.Exports {
+				// Skip injecting this symbol if it's already declared locally (i.e. it's not a reference to a global)
+				if _, ok := p.moduleScope.Members[export.Alias]; ok {
+					continue
+				}
+
+				parts := strings.Split(export.Alias, ".")
+
+				// The key must be a dot-separated identifier list
+				for _, part := range parts {
+					if !js_ast.IsIdentifier(part) {
+						continue nextExport
+					}
+				}
+
+				ref := p.newSymbol(ast.SymbolInjected, export.Alias)
+				symbols[export.Alias] = ast.LocRef{Ref: ref}
+				if len(parts) == 1 {
+					// Handle the identifier case by generating an injected symbol directly
+					p.moduleScope.Members[export.Alias] = js_ast.ScopeMember{Ref: ref}
+				} else {
+					// Handle the dot case using a map. This map is similar to the map
+					// "options.defines.DotDefines" but is kept separate instead of being
+					// implemented using the same mechanism because we allow you to use
+					// "define" to rewrite something to an injected symbol (i.e. we allow
+					// two levels of mappings). This was historically necessary to be able
+					// to map a dot name to an injected symbol because we previously didn't
+					// support dot names as injected symbols. But now dot names as injected
+					// symbols has been implemented, so supporting two levels of mappings
+					// is only for backward-compatibility.
+					if p.injectedDotNames == nil {
+						p.injectedDotNames = make(map[string][]injectedDotName)
+					}
+					tail := parts[len(parts)-1]
+					p.injectedDotNames[tail] = append(p.injectedDotNames[tail], injectedDotName{parts: parts, injectedDefineIndex: uint32(len(p.injectedDefineSymbols))})
+					p.injectedDefineSymbols = append(p.injectedDefineSymbols, ref)
+				}
+				exportsNoConflict = append(exportsNoConflict, export.Alias)
+				if p.injectedSymbolSources == nil {
+					p.injectedSymbolSources = make(map[ast.Ref]injectedSymbolSource)
+				}
+				p.injectedSymbolSources[ref] = injectedSymbolSource{
+					source: file.Source,
+					loc:    export.Loc,
+				}
+			}
+		}
+
+		if file.IsCopyLoader {
+			before, _ = p.generateImportStmt(file.Source.KeyPath.Text, logger.Range{}, exportsNoConflict, before, symbols, nil, &file.Source.Index)
+		} else {
+			before, _ = p.generateImportStmt(file.Source.KeyPath.Text, logger.Range{}, exportsNoConflict, before, symbols, &file.Source.Index, nil)
+		}
+	}
+
+	// When "using" declarations appear at the top level, we change all TDZ
+	// variables in the top-level scope into "var" so that they aren't harmed
+	// when they are moved into the try/catch statement that lowering will
+	// generate.
+	//
+	// This is necessary because exported function declarations must be hoisted
+	// outside of the try/catch statement because they can be evaluated before
+	// this module is evaluated due to ESM cross-file function hoisting. And
+	// these function bodies might reference anything else in this scope, which
+	// must still work when those things are moved inside a try/catch statement.
+	//
+	// Before:
+	//
+	//   using foo = get()
+	//   export function fn() {
+	//     return [foo, new Bar]
+	//   }
+	//   class Bar {}
+	//
+	// After ("fn" is hoisted, "Bar" is converted to "var"):
+	//
+	//   export function fn() {
+	//     return [foo, new Bar]
+	//   }
+	//   try {
+	//     var foo = get();
+	//     var Bar = class {};
+	//   } catch (_) {
+	//     ...
+	//   } finally {
+	//     ...
+	//   }
+	//
+	// This is also necessary because other code might be appended to the code
+	// that we're processing and expect to be able to access top-level variables.
+	p.willWrapModuleInTryCatchForUsing = p.shouldLowerUsingDeclarations(stmts)
+
+	// Bind symbols in a second pass over the AST. I started off doing this in a
+	// single pass, but it turns out it's pretty much impossible to do this
+	// correctly while handling arrow functions because of the grammar
+	// ambiguities.
+	//
+	// Note that top-level lowered "using" declarations disable tree-shaking
+	// because we only do tree-shaking on top-level statements and lowering
+	// a top-level "using" declaration moves all top-level statements into a
+	// nested scope.
+	if !p.options.treeShaking || p.willWrapModuleInTryCatchForUsing {
+		// When tree shaking is disabled, everything comes in a single part
+		parts = p.appendPart(parts, stmts)
+	} else {
+		var preprocessedEnums map[int][]js_ast.Part
+		if p.scopesInOrderForEnum != nil {
+			// Preprocess TypeScript enums to improve code generation. Otherwise
+			// uses of an enum before that enum has been declared won't be inlined:
+			//
+			//   console.log(Foo.FOO) // We want "FOO" to be inlined here
+			//   const enum Foo { FOO = 0 }
+			//
+			// The TypeScript compiler itself contains code with this pattern, so
+			// it's important to implement this optimization.
+			for i, stmt := range stmts {
+				if _, ok := stmt.Data.(*js_ast.SEnum); ok {
+					if preprocessedEnums == nil {
+						preprocessedEnums = make(map[int][]js_ast.Part)
+					}
+					oldScopesInOrder := p.scopesInOrder
+					p.scopesInOrder = p.scopesInOrderForEnum[stmt.Loc]
+					preprocessedEnums[i] = p.appendPart(nil, []js_ast.Stmt{stmt})
+					p.scopesInOrder = oldScopesInOrder
+				}
+			}
+		}
+
+		// When tree shaking is enabled, each top-level statement is potentially a separate part
+		for i, stmt := range stmts {
+			switch s := stmt.Data.(type) {
+			case *js_ast.SLocal:
+				// Split up top-level multi-declaration variable statements
+				for _, decl := range s.Decls {
+					clone := *s
+					clone.Decls = []js_ast.Decl{decl}
+					parts = p.appendPart(parts, []js_ast.Stmt{{Loc: stmt.Loc, Data: &clone}})
+				}
+
+			case *js_ast.SImport, *js_ast.SExportFrom, *js_ast.SExportStar:
+				if p.options.mode != config.ModePassThrough {
+					// Move imports (and import-like exports) to the top of the file to
+					// ensure that if they are converted to a require() call, the effects
+					// will take place before any other statements are evaluated.
+					before = p.appendPart(before, []js_ast.Stmt{stmt})
+				} else {
+					// If we aren't doing any format conversion, just keep these statements
+					// inline where they were. Exports are sorted so order doesn't matter:
+					// https://262.ecma-international.org/6.0/#sec-module-namespace-exotic-objects.
+					// However, this is likely an aesthetic issue that some people will
+					// complain about. In addition, there are code transformation tools
+					// such as TypeScript and Babel with bugs where the order of exports
+					// in the file is incorrectly preserved instead of sorted, so preserving
+					// the order of exports ourselves here may be preferable.
+					parts = p.appendPart(parts, []js_ast.Stmt{stmt})
+				}
+
+			case *js_ast.SExportEquals:
+				// TypeScript "export = value;" becomes "module.exports = value;". This
+				// must happen at the end after everything is parsed because TypeScript
+				// moves this statement to the end when it generates code.
+				after = p.appendPart(after, []js_ast.Stmt{stmt})
+
+			case *js_ast.SEnum:
+				parts = append(parts, preprocessedEnums[i]...)
+				p.scopesInOrder = p.scopesInOrder[len(p.scopesInOrderForEnum[stmt.Loc]):]
+
+			default:
+				parts = p.appendPart(parts, []js_ast.Stmt{stmt})
+			}
+		}
+	}
+
+	// Insert a variable for "import.meta" at the top of the file if it was used.
+	// We don't need to worry about "use strict" directives because this only
+	// happens when bundling, in which case we are flatting the module scopes of
+	// all modules together anyway so such directives are meaningless.
+	if p.importMetaRef != ast.InvalidRef {
+		importMetaStmt := js_ast.Stmt{Data: &js_ast.SLocal{
+			Kind: p.selectLocalKind(js_ast.LocalConst),
+			Decls: []js_ast.Decl{{
+				Binding:    js_ast.Binding{Data: &js_ast.BIdentifier{Ref: p.importMetaRef}},
+				ValueOrNil: js_ast.Expr{Data: &js_ast.EObject{}},
+			}},
+		}}
+		before = append(before, js_ast.Part{
+			Stmts:                []js_ast.Stmt{importMetaStmt},
+			SymbolUses:           make(map[ast.Ref]js_ast.SymbolUse),
+			DeclaredSymbols:      []js_ast.DeclaredSymbol{{Ref: p.importMetaRef, IsTopLevel: true}},
+			CanBeRemovedIfUnused: true,
+		})
+	}
+
+	// Pop the module scope to apply the "ContainsDirectEval" rules
+	p.popScope()
+
+	result = p.toAST(before, parts, after, hashbang, directives)
+	result.SourceMapComment = p.lexer.SourceMappingURL
+	return
+}
+
+func LazyExportAST(log logger.Log, source logger.Source, options Options, expr js_ast.Expr, apiCall string) js_ast.AST {
+	// Don't create a new lexer using js_lexer.NewLexer() here since that will
+	// actually attempt to parse the first token, which might cause a syntax
+	// error.
+	p := newParser(log, source, js_lexer.Lexer{}, &options)
+	p.prepareForVisitPass()
+
+	// Optionally call a runtime API function to transform the expression
+	if apiCall != "" {
+		p.symbolUses = make(map[ast.Ref]js_ast.SymbolUse)
+		expr = p.callRuntime(expr.Loc, apiCall, []js_ast.Expr{expr})
+	}
+
+	// Add an empty part for the namespace export that we can fill in later
+	nsExportPart := js_ast.Part{
+		SymbolUses:           make(map[ast.Ref]js_ast.SymbolUse),
+		CanBeRemovedIfUnused: true,
+	}
+
+	// Defer the actual code generation until linking
+	part := js_ast.Part{
+		Stmts:      []js_ast.Stmt{{Loc: expr.Loc, Data: &js_ast.SLazyExport{Value: expr}}},
+		SymbolUses: p.symbolUses,
+	}
+	p.symbolUses = nil
+
+	ast := p.toAST([]js_ast.Part{nsExportPart}, []js_ast.Part{part}, nil, "", nil)
+	ast.HasLazyExport = true
+	return ast
+}
+
+func GlobResolveAST(log logger.Log, source logger.Source, importRecords []ast.ImportRecord, object *js_ast.EObject, name string) js_ast.AST {
+	// Don't create a new lexer using js_lexer.NewLexer() here since that will
+	// actually attempt to parse the first token, which might cause a syntax
+	// error.
+	p := newParser(log, source, js_lexer.Lexer{}, &Options{})
+	p.prepareForVisitPass()
+
+	// Add an empty part for the namespace export that we can fill in later
+	nsExportPart := js_ast.Part{
+		SymbolUses:           make(map[ast.Ref]js_ast.SymbolUse),
+		CanBeRemovedIfUnused: true,
+	}
+
+	if len(p.importRecords) != 0 {
+		panic("Internal error")
+	}
+	p.importRecords = importRecords
+
+	importRecordIndices := make([]uint32, 0, len(importRecords))
+	for importRecordIndex := range importRecords {
+		importRecordIndices = append(importRecordIndices, uint32(importRecordIndex))
+	}
+
+	p.symbolUses = make(map[ast.Ref]js_ast.SymbolUse)
+	ref := p.newSymbol(ast.SymbolOther, name)
+	p.moduleScope.Generated = append(p.moduleScope.Generated, ref)
+
+	part := js_ast.Part{
+		Stmts: []js_ast.Stmt{{Data: &js_ast.SLocal{
+			IsExport: true,
+			Decls: []js_ast.Decl{{
+				Binding:    js_ast.Binding{Data: &js_ast.BIdentifier{Ref: ref}},
+				ValueOrNil: p.callRuntime(logger.Loc{}, "__glob", []js_ast.Expr{{Data: object}}),
+			}},
+		}}},
+		ImportRecordIndices: importRecordIndices,
+		SymbolUses:          p.symbolUses,
+	}
+	p.symbolUses = nil
+
+	p.esmExportKeyword.Len = 1
+	return p.toAST([]js_ast.Part{nsExportPart}, []js_ast.Part{part}, nil, "", nil)
+}
+
+func ParseDefineExprOrJSON(text string) (config.DefineExpr, js_ast.E) {
+	if text == "" {
+		return config.DefineExpr{}, nil
+	}
+
+	// Try a property chain
+	parts := strings.Split(text, ".")
+	for i, part := range parts {
+		if !js_ast.IsIdentifier(part) {
+			parts = nil
+			break
+		}
+
+		// Don't allow most keywords as the identifier
+		if i == 0 {
+			if token, ok := js_lexer.Keywords[part]; ok && token != js_lexer.TNull && token != js_lexer.TThis &&
+				(token != js_lexer.TImport || len(parts) < 2 || parts[1] != "meta") {
+				parts = nil
+				break
+			}
+		}
+	}
+	if parts != nil {
+		return config.DefineExpr{Parts: parts}, nil
+	}
+
+	// Try parsing a JSON value
+	log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
+	expr, ok := ParseJSON(log, logger.Source{Contents: text}, JSONOptions{})
+	if !ok {
+		return config.DefineExpr{}, nil
+	}
+
+	// Only primitive literals are inlined directly
+	switch expr.Data.(type) {
+	case *js_ast.ENull, *js_ast.EBoolean, *js_ast.EString, *js_ast.ENumber:
+		return config.DefineExpr{Constant: expr.Data}, nil
+	}
+
+	// If it's not a primitive, return the whole compound JSON value to be injected out-of-line
+	return config.DefineExpr{}, expr.Data
+}
+
+type whyESM uint8
+
+const (
+	whyESMUnknown whyESM = iota
+	whyESMExportKeyword
+	whyESMImportMeta
+	whyESMTopLevelAwait
+	whyESMFileMJS
+	whyESMFileMTS
+	whyESMTypeModulePackageJSON
+	whyESMImportStatement
+)
+
+// Say why this the current file is being considered an ES module
+func (p *parser) whyESModule() (whyESM, []logger.MsgData) {
+	because := "This file is considered to be an ECMAScript module because"
+	switch {
+	case p.esmExportKeyword.Len > 0:
+		return whyESMExportKeyword, []logger.MsgData{p.tracker.MsgData(p.esmExportKeyword,
+			because+" of the \"export\" keyword here:")}
+
+	case p.esmImportMeta.Len > 0:
+		return whyESMImportMeta, []logger.MsgData{p.tracker.MsgData(p.esmImportMeta,
+			because+" of the use of \"import.meta\" here:")}
+
+	case p.topLevelAwaitKeyword.Len > 0:
+		return whyESMTopLevelAwait, []logger.MsgData{p.tracker.MsgData(p.topLevelAwaitKeyword,
+			because+" of the top-level \"await\" keyword here:")}
+
+	case p.options.moduleTypeData.Type == js_ast.ModuleESM_MJS:
+		return whyESMFileMJS, []logger.MsgData{{Text: because + " the file name ends in \".mjs\"."}}
+
+	case p.options.moduleTypeData.Type == js_ast.ModuleESM_MTS:
+		return whyESMFileMTS, []logger.MsgData{{Text: because + " the file name ends in \".mts\"."}}
+
+	case p.options.moduleTypeData.Type == js_ast.ModuleESM_PackageJSON:
+		tracker := logger.MakeLineColumnTracker(p.options.moduleTypeData.Source)
+		return whyESMTypeModulePackageJSON, []logger.MsgData{tracker.MsgData(p.options.moduleTypeData.Range,
+			because+" the enclosing \"package.json\" file sets the type of this file to \"module\":")}
+
+	// This case must come last because some code cares about the "import"
+	// statement keyword and some doesn't, and we don't want to give code
+	// that doesn't care about the "import" statement the wrong error message.
+	case p.esmImportStatementKeyword.Len > 0:
+		return whyESMImportStatement, []logger.MsgData{p.tracker.MsgData(p.esmImportStatementKeyword,
+			because+" of the \"import\" keyword here:")}
+	}
+	return whyESMUnknown, nil
+}
+
+func (p *parser) prepareForVisitPass() {
+	p.pushScopeForVisitPass(js_ast.ScopeEntry, logger.Loc{Start: locModuleScope})
+	p.fnOrArrowDataVisit.isOutsideFnOrArrow = true
+	p.moduleScope = p.currentScope
+
+	// Force-enable strict mode if that's the way TypeScript is configured
+	if tsAlwaysStrict := p.options.tsAlwaysStrict; tsAlwaysStrict != nil && tsAlwaysStrict.Value {
+		p.currentScope.StrictMode = js_ast.ImplicitStrictModeTSAlwaysStrict
+	}
+
+	// Determine whether or not this file is ESM
+	p.isFileConsideredToHaveESMExports =
+		p.esmExportKeyword.Len > 0 ||
+			p.esmImportMeta.Len > 0 ||
+			p.topLevelAwaitKeyword.Len > 0 ||
+			p.options.moduleTypeData.Type.IsESM()
+	p.isFileConsideredESM =
+		p.isFileConsideredToHaveESMExports ||
+			p.esmImportStatementKeyword.Len > 0
+
+	// Legacy HTML comments are not allowed in ESM files
+	if p.isFileConsideredESM && p.lexer.LegacyHTMLCommentRange.Len > 0 {
+		_, notes := p.whyESModule()
+		p.log.AddErrorWithNotes(&p.tracker, p.lexer.LegacyHTMLCommentRange,
+			"Legacy HTML single-line comments are not allowed in ECMAScript modules", notes)
+	}
+
+	// ECMAScript modules are always interpreted as strict mode. This has to be
+	// done before "hoistSymbols" because strict mode can alter hoisting (!).
+	if p.isFileConsideredESM {
+		p.moduleScope.RecursiveSetStrictMode(js_ast.ImplicitStrictModeESM)
+	}
+
+	p.hoistSymbols(p.moduleScope)
+
+	if p.options.mode != config.ModePassThrough {
+		p.requireRef = p.declareCommonJSSymbol(ast.SymbolUnbound, "require")
+	} else {
+		p.requireRef = p.newSymbol(ast.SymbolUnbound, "require")
+	}
+
+	// CommonJS-style exports are only enabled if this isn't using ECMAScript-
+	// style exports. You can still use "require" in ESM, just not "module" or
+	// "exports". You can also still use "import" in CommonJS.
+	if p.options.mode != config.ModePassThrough && !p.isFileConsideredToHaveESMExports {
+		// CommonJS-style exports
+		p.exportsRef = p.declareCommonJSSymbol(ast.SymbolHoisted, "exports")
+		p.moduleRef = p.declareCommonJSSymbol(ast.SymbolHoisted, "module")
+	} else {
+		// ESM-style exports
+		p.exportsRef = p.newSymbol(ast.SymbolHoisted, "exports")
+		p.moduleRef = p.newSymbol(ast.SymbolHoisted, "module")
+	}
+
+	// Handle "@jsx" and "@jsxFrag" pragmas now that lexing is done
+	if p.options.jsx.Parse {
+		if jsxRuntime := p.lexer.JSXRuntimePragmaComment; jsxRuntime.Text != "" {
+			if jsxRuntime.Text == "automatic" {
+				p.options.jsx.AutomaticRuntime = true
+			} else if jsxRuntime.Text == "classic" {
+				p.options.jsx.AutomaticRuntime = false
+			} else {
+				p.log.AddIDWithNotes(logger.MsgID_JS_UnsupportedJSXComment, logger.Warning, &p.tracker, jsxRuntime.Range,
+					fmt.Sprintf("Invalid JSX runtime: %q", jsxRuntime.Text),
+					[]logger.MsgData{{Text: "The JSX runtime can only be set to either \"classic\" or \"automatic\"."}})
+			}
+		}
+
+		if jsxFactory := p.lexer.JSXFactoryPragmaComment; jsxFactory.Text != "" {
+			if p.options.jsx.AutomaticRuntime {
+				p.log.AddID(logger.MsgID_JS_UnsupportedJSXComment, logger.Warning, &p.tracker, jsxFactory.Range,
+					"The JSX factory cannot be set when using React's \"automatic\" JSX transform")
+			} else if expr, _ := ParseDefineExprOrJSON(jsxFactory.Text); len(expr.Parts) > 0 {
+				p.options.jsx.Factory = expr
+			} else {
+				p.log.AddID(logger.MsgID_JS_UnsupportedJSXComment, logger.Warning, &p.tracker, jsxFactory.Range,
+					fmt.Sprintf("Invalid JSX factory: %s", jsxFactory.Text))
+			}
+		}
+
+		if jsxFragment := p.lexer.JSXFragmentPragmaComment; jsxFragment.Text != "" {
+			if p.options.jsx.AutomaticRuntime {
+				p.log.AddID(logger.MsgID_JS_UnsupportedJSXComment, logger.Warning, &p.tracker, jsxFragment.Range,
+					"The JSX fragment cannot be set when using React's \"automatic\" JSX transform")
+			} else if expr, _ := ParseDefineExprOrJSON(jsxFragment.Text); len(expr.Parts) > 0 || expr.Constant != nil {
+				p.options.jsx.Fragment = expr
+			} else {
+				p.log.AddID(logger.MsgID_JS_UnsupportedJSXComment, logger.Warning, &p.tracker, jsxFragment.Range,
+					fmt.Sprintf("Invalid JSX fragment: %s", jsxFragment.Text))
+			}
+		}
+
+		if jsxImportSource := p.lexer.JSXImportSourcePragmaComment; jsxImportSource.Text != "" {
+			if !p.options.jsx.AutomaticRuntime {
+				p.log.AddIDWithNotes(logger.MsgID_JS_UnsupportedJSXComment, logger.Warning, &p.tracker, jsxImportSource.Range,
+					"The JSX import source cannot be set without also enabling React's \"automatic\" JSX transform",
+					[]logger.MsgData{{Text: "You can enable React's \"automatic\" JSX transform for this file by using a \"@jsxRuntime automatic\" comment."}})
+			} else {
+				p.options.jsx.ImportSource = jsxImportSource.Text
+			}
+		}
+	}
+
+	// Force-enable strict mode if the JSX "automatic" runtime is enabled and
+	// there is at least one JSX element. This is because the automatically-
+	// generated import statement turns the file into an ES module. This behavior
+	// matches TypeScript which also does this. See this PR for more information:
+	// https://github.com/microsoft/TypeScript/pull/39199
+	if p.currentScope.StrictMode == js_ast.SloppyMode && p.options.jsx.AutomaticRuntime && p.firstJSXElementLoc.Start != -1 {
+		p.currentScope.StrictMode = js_ast.ImplicitStrictModeJSXAutomaticRuntime
+	}
+}
+
+func (p *parser) declareCommonJSSymbol(kind ast.SymbolKind, name string) ast.Ref {
+	member, ok := p.moduleScope.Members[name]
+
+	// If the code declared this symbol using "var name", then this is actually
+	// not a collision. For example, node will let you do this:
+	//
+	//   var exports;
+	//   module.exports.foo = 123;
+	//   console.log(exports.foo);
+	//
+	// This works because node's implementation of CommonJS wraps the entire
+	// source file like this:
+	//
+	//   (function(require, exports, module, __filename, __dirname) {
+	//     var exports;
+	//     module.exports.foo = 123;
+	//     console.log(exports.foo);
+	//   })
+	//
+	// Both the "exports" argument and "var exports" are hoisted variables, so
+	// they don't collide.
+	if ok && p.symbols[member.Ref.InnerIndex].Kind == ast.SymbolHoisted &&
+		kind == ast.SymbolHoisted && !p.isFileConsideredToHaveESMExports {
+		return member.Ref
+	}
+
+	// Create a new symbol if we didn't merge with an existing one above
+	ref := p.newSymbol(kind, name)
+
+	// If the variable wasn't declared, declare it now. This means any references
+	// to this name will become bound to this symbol after this (since we haven't
+	// run the visit pass yet).
+	if !ok {
+		p.moduleScope.Members[name] = js_ast.ScopeMember{Ref: ref, Loc: logger.Loc{Start: -1}}
+		return ref
+	}
+
+	// If the variable was declared, then it shadows this symbol. The code in
+	// this module will be unable to reference this symbol. However, we must
+	// still add the symbol to the scope so it gets minified (automatically-
+	// generated code may still reference the symbol).
+	p.moduleScope.Generated = append(p.moduleScope.Generated, ref)
+	return ref
+}
+
+// Compute a character frequency histogram for everything that's not a bound
+// symbol. This is used to modify how minified names are generated for slightly
+// better gzip compression. Even though it's a very small win, we still do it
+// because it's simple to do and very cheap to compute.
+func (p *parser) computeCharacterFrequency() *ast.CharFreq {
+	if !p.options.minifyIdentifiers || p.source.Index == runtime.SourceIndex {
+		return nil
+	}
+
+	// Add everything in the file to the histogram
+	charFreq := &ast.CharFreq{}
+	charFreq.Scan(p.source.Contents, 1)
+
+	// Subtract out all comments
+	for _, commentRange := range p.lexer.AllComments {
+		charFreq.Scan(p.source.TextForRange(commentRange), -1)
+	}
+
+	// Subtract out all import paths
+	for _, record := range p.importRecords {
+		if !record.SourceIndex.IsValid() {
+			charFreq.Scan(record.Path.Text, -1)
+		}
+	}
+
+	// Subtract out all symbols that will be minified
+	var visit func(*js_ast.Scope)
+	visit = func(scope *js_ast.Scope) {
+		for _, member := range scope.Members {
+			symbol := &p.symbols[member.Ref.InnerIndex]
+			if symbol.SlotNamespace() != ast.SlotMustNotBeRenamed {
+				charFreq.Scan(symbol.OriginalName, -int32(symbol.UseCountEstimate))
+			}
+		}
+		if scope.Label.Ref != ast.InvalidRef {
+			symbol := &p.symbols[scope.Label.Ref.InnerIndex]
+			if symbol.SlotNamespace() != ast.SlotMustNotBeRenamed {
+				charFreq.Scan(symbol.OriginalName, -int32(symbol.UseCountEstimate)-1)
+			}
+		}
+		for _, child := range scope.Children {
+			visit(child)
+		}
+	}
+	visit(p.moduleScope)
+
+	// Subtract out all properties that will be mangled
+	for _, ref := range p.mangledProps {
+		symbol := &p.symbols[ref.InnerIndex]
+		charFreq.Scan(symbol.OriginalName, -int32(symbol.UseCountEstimate))
+	}
+
+	return charFreq
+}
+
+func (p *parser) generateImportStmt(
+	path string,
+	pathRange logger.Range,
+	imports []string,
+	parts []js_ast.Part,
+	symbols map[string]ast.LocRef,
+	sourceIndex *uint32,
+	copySourceIndex *uint32,
+) ([]js_ast.Part, uint32) {
+	if pathRange.Len == 0 {
+		isFirst := true
+		for _, it := range symbols {
+			if isFirst || it.Loc.Start < pathRange.Loc.Start {
+				pathRange.Loc = it.Loc
+			}
+			isFirst = false
+		}
+	}
+
+	namespaceRef := p.newSymbol(ast.SymbolOther, "import_"+js_ast.GenerateNonUniqueNameFromPath(path))
+	p.moduleScope.Generated = append(p.moduleScope.Generated, namespaceRef)
+	declaredSymbols := make([]js_ast.DeclaredSymbol, 1+len(imports))
+	clauseItems := make([]js_ast.ClauseItem, len(imports))
+	importRecordIndex := p.addImportRecord(ast.ImportStmt, pathRange, path, nil, 0)
+	if sourceIndex != nil {
+		p.importRecords[importRecordIndex].SourceIndex = ast.MakeIndex32(*sourceIndex)
+	}
+	if copySourceIndex != nil {
+		p.importRecords[importRecordIndex].CopySourceIndex = ast.MakeIndex32(*copySourceIndex)
+	}
+	declaredSymbols[0] = js_ast.DeclaredSymbol{Ref: namespaceRef, IsTopLevel: true}
+
+	// Create per-import information
+	for i, alias := range imports {
+		it := symbols[alias]
+		declaredSymbols[i+1] = js_ast.DeclaredSymbol{Ref: it.Ref, IsTopLevel: true}
+		clauseItems[i] = js_ast.ClauseItem{
+			Alias:    alias,
+			AliasLoc: it.Loc,
+			Name:     ast.LocRef{Loc: it.Loc, Ref: it.Ref},
+		}
+		p.isImportItem[it.Ref] = true
+		p.namedImports[it.Ref] = js_ast.NamedImport{
+			Alias:             alias,
+			AliasLoc:          it.Loc,
+			NamespaceRef:      namespaceRef,
+			ImportRecordIndex: importRecordIndex,
+		}
+	}
+
+	// Append a single import to the end of the file (ES6 imports are hoisted
+	// so we don't need to worry about where the import statement goes)
+	return append(parts, js_ast.Part{
+		DeclaredSymbols:     declaredSymbols,
+		ImportRecordIndices: []uint32{importRecordIndex},
+		Stmts: []js_ast.Stmt{{Loc: pathRange.Loc, Data: &js_ast.SImport{
+			NamespaceRef:      namespaceRef,
+			Items:             &clauseItems,
+			ImportRecordIndex: importRecordIndex,
+			IsSingleLine:      true,
+		}}},
+	}), importRecordIndex
+}
+
+// Sort the keys for determinism
+func sortedKeysOfMapStringLocRef(in map[string]ast.LocRef) []string {
+	keys := make([]string, 0, len(in))
+	for key := range in {
+		keys = append(keys, key)
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+func (p *parser) toAST(before, parts, after []js_ast.Part, hashbang string, directives []string) js_ast.AST {
+	// Insert an import statement for any runtime imports we generated
+	if len(p.runtimeImports) > 0 && !p.options.omitRuntimeForTests {
+		keys := sortedKeysOfMapStringLocRef(p.runtimeImports)
+		sourceIndex := runtime.SourceIndex
+		before, _ = p.generateImportStmt("<runtime>", logger.Range{}, keys, before, p.runtimeImports, &sourceIndex, nil)
+	}
+
+	// Insert an import statement for any jsx runtime imports we generated
+	if len(p.jsxRuntimeImports) > 0 && !p.options.omitJSXRuntimeForTests {
+		keys := sortedKeysOfMapStringLocRef(p.jsxRuntimeImports)
+
+		// Determine the runtime source and whether it's prod or dev
+		path := p.options.jsx.ImportSource
+		if p.options.jsx.Development {
+			path = path + "/jsx-dev-runtime"
+		} else {
+			path = path + "/jsx-runtime"
+		}
+
+		before, _ = p.generateImportStmt(path, logger.Range{}, keys, before, p.jsxRuntimeImports, nil, nil)
+	}
+
+	// Insert an import statement for any legacy jsx imports we generated (i.e., createElement)
+	if len(p.jsxLegacyImports) > 0 && !p.options.omitJSXRuntimeForTests {
+		keys := sortedKeysOfMapStringLocRef(p.jsxLegacyImports)
+		path := p.options.jsx.ImportSource
+		before, _ = p.generateImportStmt(path, logger.Range{}, keys, before, p.jsxLegacyImports, nil, nil)
+	}
+
+	// Insert imports for each glob pattern
+	for _, glob := range p.globPatternImports {
+		symbols := map[string]ast.LocRef{glob.name: {Loc: glob.approximateRange.Loc, Ref: glob.ref}}
+		var importRecordIndex uint32
+		before, importRecordIndex = p.generateImportStmt(helpers.GlobPatternToString(glob.parts), glob.approximateRange, []string{glob.name}, before, symbols, nil, nil)
+		record := &p.importRecords[importRecordIndex]
+		record.AssertOrWith = glob.assertOrWith
+		record.GlobPattern = &ast.GlobPattern{
+			Parts:       glob.parts,
+			ExportAlias: glob.name,
+			Kind:        glob.kind,
+		}
+	}
+
+	// Generated imports are inserted before other code instead of appending them
+	// to the end of the file. Appending them should work fine because JavaScript
+	// import statements are "hoisted" to run before the importing file. However,
+	// some buggy JavaScript toolchains such as the TypeScript compiler convert
+	// ESM into CommonJS by replacing "import" statements inline without doing
+	// any hoisting, which is incorrect. See the following issue for more info:
+	// https://github.com/microsoft/TypeScript/issues/16166. Since JSX-related
+	// imports are present in the generated code when bundling is disabled, and
+	// could therefore be processed by these buggy tools, it's more robust to put
+	// them at the top even though it means potentially reallocating almost the
+	// entire array of parts.
+	if len(before) > 0 {
+		parts = append(before, parts...)
+	}
+	parts = append(parts, after...)
+
+	// Handle import paths after the whole file has been visited because we need
+	// symbol usage counts to be able to remove unused type-only imports in
+	// TypeScript code.
+	keptImportEquals := false
+	removedImportEquals := false
+	partsEnd := 0
+	for partIndex, part := range parts {
+		p.importRecordsForCurrentPart = nil
+		p.declaredSymbols = nil
+
+		result := p.scanForImportsAndExports(part.Stmts)
+		part.Stmts = result.stmts
+		keptImportEquals = keptImportEquals || result.keptImportEquals
+		removedImportEquals = removedImportEquals || result.removedImportEquals
+
+		part.ImportRecordIndices = append(part.ImportRecordIndices, p.importRecordsForCurrentPart...)
+		part.DeclaredSymbols = append(part.DeclaredSymbols, p.declaredSymbols...)
+
+		if len(part.Stmts) > 0 || uint32(partIndex) == js_ast.NSExportPartIndex {
+			if p.moduleScope.ContainsDirectEval && len(part.DeclaredSymbols) > 0 {
+				// If this file contains a direct call to "eval()", all parts that
+				// declare top-level symbols must be kept since the eval'd code may
+				// reference those symbols.
+				part.CanBeRemovedIfUnused = false
+			}
+			parts[partsEnd] = part
+			partsEnd++
+		}
+	}
+	parts = parts[:partsEnd]
+
+	// We need to iterate multiple times if an import-equals statement was
+	// removed and there are more import-equals statements that may be removed.
+	// In the example below, a/b/c should be kept but x/y/z should be removed
+	// (and removal requires multiple passes):
+	//
+	//   import a = foo.a
+	//   import b = a.b
+	//   import c = b.c
+	//
+	//   import x = foo.x
+	//   import y = x.y
+	//   import z = y.z
+	//
+	//   export let bar = c
+	//
+	// This is a smaller version of the general import/export scanning loop above.
+	// We only want to repeat the code that eliminates TypeScript import-equals
+	// statements, not the other code in the loop above.
+	for keptImportEquals && removedImportEquals {
+		keptImportEquals = false
+		removedImportEquals = false
+		partsEnd := 0
+		for partIndex, part := range parts {
+			result := p.scanForUnusedTSImportEquals(part.Stmts)
+			part.Stmts = result.stmts
+			keptImportEquals = keptImportEquals || result.keptImportEquals
+			removedImportEquals = removedImportEquals || result.removedImportEquals
+			if len(part.Stmts) > 0 || uint32(partIndex) == js_ast.NSExportPartIndex {
+				parts[partsEnd] = part
+				partsEnd++
+			}
+		}
+		parts = parts[:partsEnd]
+	}
+
+	// Do a second pass for exported items now that imported items are filled out
+	for _, part := range parts {
+		for _, stmt := range part.Stmts {
+			if s, ok := stmt.Data.(*js_ast.SExportClause); ok {
+				for _, item := range s.Items {
+					// Mark re-exported imports as such
+					if namedImport, ok := p.namedImports[item.Name.Ref]; ok {
+						namedImport.IsExported = true
+						p.namedImports[item.Name.Ref] = namedImport
+					}
+				}
+			}
+		}
+	}
+
+	// Analyze cross-part dependencies for tree shaking and code splitting
+	{
+		// Map locals to parts
+		p.topLevelSymbolToParts = make(map[ast.Ref][]uint32)
+		for partIndex, part := range parts {
+			for _, declared := range part.DeclaredSymbols {
+				if declared.IsTopLevel {
+					// If this symbol was merged, use the symbol at the end of the
+					// linked list in the map. This is the case for multiple "var"
+					// declarations with the same name, for example.
+					ref := declared.Ref
+					for p.symbols[ref.InnerIndex].Link != ast.InvalidRef {
+						ref = p.symbols[ref.InnerIndex].Link
+					}
+					p.topLevelSymbolToParts[ref] = append(
+						p.topLevelSymbolToParts[ref], uint32(partIndex))
+				}
+			}
+		}
+
+		// Pulling in the exports of this module always pulls in the export part
+		p.topLevelSymbolToParts[p.exportsRef] = append(p.topLevelSymbolToParts[p.exportsRef], js_ast.NSExportPartIndex)
+	}
+
+	// Make a wrapper symbol in case we need to be wrapped in a closure
+	wrapperRef := p.newSymbol(ast.SymbolOther, "require_"+p.source.IdentifierName)
+
+	// Assign slots to symbols in nested scopes. This is some precomputation for
+	// the symbol renaming pass that will happen later in the linker. It's done
+	// now in the parser because we want it to be done in parallel per file and
+	// we're already executing code in a dedicated goroutine for this file.
+	var nestedScopeSlotCounts ast.SlotCounts
+	if p.options.minifyIdentifiers {
+		nestedScopeSlotCounts = renamer.AssignNestedScopeSlots(p.moduleScope, p.symbols)
+	}
+
+	exportsKind := js_ast.ExportsNone
+	usesExportsRef := p.symbols[p.exportsRef.InnerIndex].UseCountEstimate > 0
+	usesModuleRef := p.symbols[p.moduleRef.InnerIndex].UseCountEstimate > 0
+
+	if p.esmExportKeyword.Len > 0 || p.esmImportMeta.Len > 0 || p.topLevelAwaitKeyword.Len > 0 {
+		exportsKind = js_ast.ExportsESM
+	} else if usesExportsRef || usesModuleRef || p.hasTopLevelReturn {
+		exportsKind = js_ast.ExportsCommonJS
+	} else {
+		// If this module has no exports, try to determine what kind of module it
+		// is by looking at node's "type" field in "package.json" and/or whether
+		// the file extension is ".mjs"/".mts" or ".cjs"/".cts".
+		switch {
+		case p.options.moduleTypeData.Type.IsCommonJS():
+			// ".cjs" or ".cts" or ("type: commonjs" and (".js" or ".jsx" or ".ts" or ".tsx"))
+			exportsKind = js_ast.ExportsCommonJS
+
+		case p.options.moduleTypeData.Type.IsESM():
+			// ".mjs" or ".mts" or ("type: module" and (".js" or ".jsx" or ".ts" or ".tsx"))
+			exportsKind = js_ast.ExportsESM
+
+		default:
+			// Treat unknown modules containing an import statement as ESM. Otherwise
+			// the bundler will treat this file as CommonJS if it's imported and ESM
+			// if it's not imported.
+			if p.esmImportStatementKeyword.Len > 0 {
+				exportsKind = js_ast.ExportsESM
+			}
+		}
+	}
+
+	return js_ast.AST{
+		Parts:                           parts,
+		ModuleTypeData:                  p.options.moduleTypeData,
+		ModuleScope:                     p.moduleScope,
+		CharFreq:                        p.computeCharacterFrequency(),
+		Symbols:                         p.symbols,
+		ExportsRef:                      p.exportsRef,
+		ModuleRef:                       p.moduleRef,
+		WrapperRef:                      wrapperRef,
+		Hashbang:                        hashbang,
+		Directives:                      directives,
+		NamedImports:                    p.namedImports,
+		NamedExports:                    p.namedExports,
+		TSEnums:                         p.tsEnums,
+		ConstValues:                     p.constValues,
+		ExprComments:                    p.exprComments,
+		NestedScopeSlotCounts:           nestedScopeSlotCounts,
+		TopLevelSymbolToPartsFromParser: p.topLevelSymbolToParts,
+		ExportStarImportRecords:         p.exportStarImportRecords,
+		ImportRecords:                   p.importRecords,
+		ApproximateLineCount:            int32(p.lexer.ApproximateNewlineCount) + 1,
+		MangledProps:                    p.mangledProps,
+		ReservedProps:                   p.reservedProps,
+		ManifestForYarnPnP:              p.manifestForYarnPnP,
+
+		// CommonJS features
+		UsesExportsRef: usesExportsRef,
+		UsesModuleRef:  usesModuleRef,
+		ExportsKind:    exportsKind,
+
+		// ES6 features
+		ExportKeyword:            p.esmExportKeyword,
+		TopLevelAwaitKeyword:     p.topLevelAwaitKeyword,
+		LiveTopLevelAwaitKeyword: p.liveTopLevelAwaitKeyword,
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower.go b/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower.go
new file mode 100644
index 0000000..3991058
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower.go
@@ -0,0 +1,2131 @@
+// This file contains code for "lowering" syntax, which means converting it to
+// older JavaScript. For example, "a ** b" becomes a call to "Math.pow(a, b)"
+// when lowered. Which syntax is lowered is determined by the language target.
+
+package js_parser
+
+import (
+	"fmt"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+func (p *parser) markSyntaxFeature(feature compat.JSFeature, r logger.Range) (didGenerateError bool) {
+	didGenerateError = true
+
+	if !p.options.unsupportedJSFeatures.Has(feature) {
+		if feature == compat.TopLevelAwait && !p.options.outputFormat.KeepESMImportExportSyntax() {
+			p.log.AddError(&p.tracker, r, fmt.Sprintf(
+				"Top-level await is currently not supported with the %q output format", p.options.outputFormat.String()))
+			return
+		}
+
+		didGenerateError = false
+		return
+	}
+
+	var name string
+	where := config.PrettyPrintTargetEnvironment(p.options.originalTargetEnv, p.options.unsupportedJSFeatureOverridesMask)
+
+	switch feature {
+	case compat.DefaultArgument:
+		name = "default arguments"
+
+	case compat.RestArgument:
+		name = "rest arguments"
+
+	case compat.ArraySpread:
+		name = "array spread"
+
+	case compat.ForOf:
+		name = "for-of loops"
+
+	case compat.ObjectAccessors:
+		name = "object accessors"
+
+	case compat.ObjectExtensions:
+		name = "object literal extensions"
+
+	case compat.Destructuring:
+		name = "destructuring"
+
+	case compat.NewTarget:
+		name = "new.target"
+
+	case compat.ConstAndLet:
+		name = p.source.TextForRange(r)
+
+	case compat.Class:
+		name = "class syntax"
+
+	case compat.Generator:
+		name = "generator functions"
+
+	case compat.AsyncAwait:
+		name = "async functions"
+
+	case compat.AsyncGenerator:
+		name = "async generator functions"
+
+	case compat.ForAwait:
+		name = "for-await loops"
+
+	case compat.NestedRestBinding:
+		name = "non-identifier array rest patterns"
+
+	case compat.ImportAttributes:
+		p.log.AddError(&p.tracker, r, fmt.Sprintf(
+			"Using an arbitrary value as the second argument to \"import()\" is not possible in %s", where))
+		return
+
+	case compat.TopLevelAwait:
+		p.log.AddError(&p.tracker, r, fmt.Sprintf(
+			"Top-level await is not available in %s", where))
+		return
+
+	case compat.Bigint:
+		// Transforming these will never be supported
+		p.log.AddError(&p.tracker, r, fmt.Sprintf(
+			"Big integer literals are not available in %s", where))
+		return
+
+	case compat.ImportMeta:
+		// This can't be polyfilled
+		kind := logger.Warning
+		if p.suppressWarningsAboutWeirdCode || p.fnOrArrowDataVisit.tryBodyCount > 0 {
+			kind = logger.Debug
+		}
+		p.log.AddID(logger.MsgID_JS_EmptyImportMeta, kind, &p.tracker, r, fmt.Sprintf(
+			"\"import.meta\" is not available in %s and will be empty", where))
+		return
+
+	default:
+		p.log.AddError(&p.tracker, r, fmt.Sprintf(
+			"This feature is not available in %s", where))
+		return
+	}
+
+	p.log.AddError(&p.tracker, r, fmt.Sprintf(
+		"Transforming %s to %s is not supported yet", name, where))
+	return
+}
+
+func (p *parser) isStrictMode() bool {
+	return p.currentScope.StrictMode != js_ast.SloppyMode
+}
+
+func (p *parser) isStrictModeOutputFormat() bool {
+	return p.options.outputFormat == config.FormatESModule
+}
+
+type strictModeFeature uint8
+
+const (
+	withStatement strictModeFeature = iota
+	deleteBareName
+	forInVarInit
+	evalOrArguments
+	reservedWord
+	legacyOctalLiteral
+	legacyOctalEscape
+	ifElseFunctionStmt
+	labelFunctionStmt
+	duplicateLexicallyDeclaredNames
+)
+
+func (p *parser) markStrictModeFeature(feature strictModeFeature, r logger.Range, detail string) {
+	var text string
+	canBeTransformed := false
+
+	switch feature {
+	case withStatement:
+		text = "With statements"
+
+	case deleteBareName:
+		text = "Delete of a bare identifier"
+
+	case forInVarInit:
+		text = "Variable initializers inside for-in loops"
+		canBeTransformed = true
+
+	case evalOrArguments:
+		text = fmt.Sprintf("Declarations with the name %q", detail)
+
+	case reservedWord:
+		text = fmt.Sprintf("%q is a reserved word and", detail)
+
+	case legacyOctalLiteral:
+		text = "Legacy octal literals"
+
+	case legacyOctalEscape:
+		text = "Legacy octal escape sequences"
+
+	case ifElseFunctionStmt:
+		text = "Function declarations inside if statements"
+
+	case labelFunctionStmt:
+		text = "Function declarations inside labels"
+
+	case duplicateLexicallyDeclaredNames:
+		text = "Duplicate lexically-declared names"
+
+	default:
+		text = "This feature"
+	}
+
+	if p.isStrictMode() {
+		where, notes := p.whyStrictMode(p.currentScope)
+		p.log.AddErrorWithNotes(&p.tracker, r,
+			fmt.Sprintf("%s cannot be used %s", text, where), notes)
+	} else if !canBeTransformed && p.isStrictModeOutputFormat() {
+		p.log.AddError(&p.tracker, r,
+			fmt.Sprintf("%s cannot be used with the \"esm\" output format due to strict mode", text))
+	}
+}
+
+func (p *parser) whyStrictMode(scope *js_ast.Scope) (where string, notes []logger.MsgData) {
+	where = "in strict mode"
+
+	switch scope.StrictMode {
+	case js_ast.ImplicitStrictModeClass:
+		notes = []logger.MsgData{p.tracker.MsgData(p.enclosingClassKeyword,
+			"All code inside a class is implicitly in strict mode")}
+
+	case js_ast.ImplicitStrictModeTSAlwaysStrict:
+		tsAlwaysStrict := p.options.tsAlwaysStrict
+		t := logger.MakeLineColumnTracker(&tsAlwaysStrict.Source)
+		notes = []logger.MsgData{t.MsgData(tsAlwaysStrict.Range, fmt.Sprintf(
+			"TypeScript's %q setting was enabled here:", tsAlwaysStrict.Name))}
+
+	case js_ast.ImplicitStrictModeJSXAutomaticRuntime:
+		notes = []logger.MsgData{p.tracker.MsgData(logger.Range{Loc: p.firstJSXElementLoc, Len: 1},
+			"This file is implicitly in strict mode due to the JSX element here:"),
+			{Text: "When React's \"automatic\" JSX transform is enabled, using a JSX element automatically inserts " +
+				"an \"import\" statement at the top of the file for the corresponding the JSX helper function. " +
+				"This means the file is considered an ECMAScript module, and all ECMAScript modules use strict mode."}}
+
+	case js_ast.ExplicitStrictMode:
+		notes = []logger.MsgData{p.tracker.MsgData(p.source.RangeOfString(scope.UseStrictLoc),
+			"Strict mode is triggered by the \"use strict\" directive here:")}
+
+	case js_ast.ImplicitStrictModeESM:
+		_, notes = p.whyESModule()
+		where = "in an ECMAScript module"
+	}
+
+	return
+}
+
+func (p *parser) markAsyncFn(asyncRange logger.Range, isGenerator bool) (didGenerateError bool) {
+	// Lowered async functions are implemented in terms of generators. So if
+	// generators aren't supported, async functions aren't supported either.
+	// But if generators are supported, then async functions are unconditionally
+	// supported because we can use generators to implement them.
+	if !p.options.unsupportedJSFeatures.Has(compat.Generator) {
+		return false
+	}
+
+	feature := compat.AsyncAwait
+	if isGenerator {
+		feature = compat.AsyncGenerator
+	}
+	return p.markSyntaxFeature(feature, asyncRange)
+}
+
+func (p *parser) captureThis() ast.Ref {
+	if p.fnOnlyDataVisit.thisCaptureRef == nil {
+		ref := p.newSymbol(ast.SymbolHoisted, "_this")
+		p.fnOnlyDataVisit.thisCaptureRef = &ref
+	}
+
+	ref := *p.fnOnlyDataVisit.thisCaptureRef
+	p.recordUsage(ref)
+	return ref
+}
+
+func (p *parser) captureArguments() ast.Ref {
+	if p.fnOnlyDataVisit.argumentsCaptureRef == nil {
+		ref := p.newSymbol(ast.SymbolHoisted, "_arguments")
+		p.fnOnlyDataVisit.argumentsCaptureRef = &ref
+	}
+
+	ref := *p.fnOnlyDataVisit.argumentsCaptureRef
+	p.recordUsage(ref)
+	return ref
+}
+
+func (p *parser) lowerFunction(
+	isAsync *bool,
+	isGenerator *bool,
+	args *[]js_ast.Arg,
+	bodyLoc logger.Loc,
+	bodyBlock *js_ast.SBlock,
+	preferExpr *bool,
+	hasRestArg *bool,
+	isArrow bool,
+) {
+	// Lower object rest binding patterns in function arguments
+	if p.options.unsupportedJSFeatures.Has(compat.ObjectRestSpread) {
+		var prefixStmts []js_ast.Stmt
+
+		// Lower each argument individually instead of lowering all arguments
+		// together. There is a correctness tradeoff here around default values
+		// for function arguments, with no right answer.
+		//
+		// Lowering all arguments together will preserve the order of side effects
+		// for default values, but will mess up their scope:
+		//
+		//   // Side effect order: a(), b(), c()
+		//   function foo([{[a()]: w, ...x}, y = b()], z = c()) {}
+		//
+		//   // Side effect order is correct but scope is wrong
+		//   function foo(_a, _b) {
+		//     var [[{[a()]: w, ...x}, y = b()], z = c()] = [_a, _b]
+		//   }
+		//
+		// Lowering each argument individually will preserve the scope for default
+		// values that don't contain object rest binding patterns, but will mess up
+		// the side effect order:
+		//
+		//   // Side effect order: a(), b(), c()
+		//   function foo([{[a()]: w, ...x}, y = b()], z = c()) {}
+		//
+		//   // Side effect order is wrong but scope for c() is correct
+		//   function foo(_a, z = c()) {
+		//     var [{[a()]: w, ...x}, y = b()] = _a
+		//   }
+		//
+		// This transform chooses to lower each argument individually with the
+		// thinking that perhaps scope matters more in real-world code than side
+		// effect order.
+		for i, arg := range *args {
+			if bindingHasObjectRest(arg.Binding) {
+				ref := p.generateTempRef(tempRefNoDeclare, "")
+				target := js_ast.ConvertBindingToExpr(arg.Binding, nil)
+				init := js_ast.Expr{Loc: arg.Binding.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
+				p.recordUsage(ref)
+
+				if decls, ok := p.lowerObjectRestToDecls(target, init, nil); ok {
+					// Replace the binding but leave the default value intact
+					(*args)[i].Binding.Data = &js_ast.BIdentifier{Ref: ref}
+
+					// Append a variable declaration to the function body
+					prefixStmts = append(prefixStmts, js_ast.Stmt{Loc: arg.Binding.Loc,
+						Data: &js_ast.SLocal{Kind: js_ast.LocalVar, Decls: decls}})
+				}
+			}
+		}
+
+		if len(prefixStmts) > 0 {
+			bodyBlock.Stmts = append(prefixStmts, bodyBlock.Stmts...)
+		}
+	}
+
+	// Lower async functions and async generator functions
+	if *isAsync && (p.options.unsupportedJSFeatures.Has(compat.AsyncAwait) || (isGenerator != nil && *isGenerator && p.options.unsupportedJSFeatures.Has(compat.AsyncGenerator))) {
+		// Use the shortened form if we're an arrow function
+		if preferExpr != nil {
+			*preferExpr = true
+		}
+
+		// Determine the value for "this"
+		thisValue, hasThisValue := p.valueForThis(
+			bodyLoc,
+			false, /* shouldWarn */
+			js_ast.AssignTargetNone,
+			false, /* isCallTarget */
+			false, /* isDeleteTarget */
+		)
+		if !hasThisValue {
+			thisValue = js_ast.Expr{Loc: bodyLoc, Data: js_ast.EThisShared}
+		}
+
+		// Move the code into a nested generator function
+		fn := js_ast.Fn{
+			IsGenerator: true,
+			Body:        js_ast.FnBody{Loc: bodyLoc, Block: *bodyBlock},
+		}
+		bodyBlock.Stmts = nil
+
+		// Errors thrown during argument evaluation must reject the
+		// resulting promise, which needs more complex code to handle
+		couldThrowErrors := false
+		for _, arg := range *args {
+			if _, ok := arg.Binding.Data.(*js_ast.BIdentifier); !ok ||
+				(arg.DefaultOrNil.Data != nil && couldPotentiallyThrow(arg.DefaultOrNil.Data)) {
+				couldThrowErrors = true
+				break
+			}
+		}
+
+		// Forward the arguments to the wrapper function
+		usesArgumentsRef := !isArrow && p.fnOnlyDataVisit.argumentsRef != nil &&
+			p.symbolUses[*p.fnOnlyDataVisit.argumentsRef].CountEstimate > 0
+		var forwardedArgs js_ast.Expr
+		if !couldThrowErrors && !usesArgumentsRef {
+			// Simple case: the arguments can stay on the outer function. It's
+			// worth separating out the simple case because it's the common case
+			// and it generates smaller code.
+			forwardedArgs = js_ast.Expr{Loc: bodyLoc, Data: js_ast.ENullShared}
+		} else {
+			// If code uses "arguments" then we must move the arguments to the inner
+			// function. This is because you can modify arguments by assigning to
+			// elements in the "arguments" object:
+			//
+			//   async function foo(x) {
+			//     arguments[0] = 1;
+			//     // "x" must be 1 here
+			//   }
+			//
+
+			// Complex case: the arguments must be moved to the inner function
+			fn.Args = *args
+			fn.HasRestArg = *hasRestArg
+			*args = nil
+			*hasRestArg = false
+
+			// Make sure to not change the value of the "length" property. This is
+			// done by generating dummy arguments for the outer function equal to
+			// the expected length of the function:
+			//
+			//   async function foo(a, b, c = d, ...e) {
+			//   }
+			//
+			// This turns into:
+			//
+			//   function foo(_0, _1) {
+			//     return __async(this, arguments, function* (a, b, c = d, ...e) {
+			//     });
+			//   }
+			//
+			// The "_0" and "_1" are dummy variables to ensure "foo.length" is 2.
+			for i, arg := range fn.Args {
+				if arg.DefaultOrNil.Data != nil || fn.HasRestArg && i+1 == len(fn.Args) {
+					// Arguments from here on don't add to the "length"
+					break
+				}
+
+				// Generate a dummy variable
+				argRef := p.newSymbol(ast.SymbolOther, fmt.Sprintf("_%d", i))
+				p.currentScope.Generated = append(p.currentScope.Generated, argRef)
+				*args = append(*args, js_ast.Arg{Binding: js_ast.Binding{Loc: arg.Binding.Loc, Data: &js_ast.BIdentifier{Ref: argRef}}})
+			}
+
+			// Forward all arguments from the outer function to the inner function
+			if !isArrow {
+				// Normal functions can just use "arguments" to forward everything
+				forwardedArgs = js_ast.Expr{Loc: bodyLoc, Data: &js_ast.EIdentifier{Ref: *p.fnOnlyDataVisit.argumentsRef}}
+			} else {
+				// Arrow functions can't use "arguments", so we need to forward
+				// the arguments manually.
+				//
+				// Note that if the arrow function references "arguments" in its body
+				// (even if it's inside another nested arrow function), that reference
+				// to "arguments" will have to be substituted with a captured variable.
+				// This is because we're changing the arrow function into a generator
+				// function, which introduces a variable named "arguments". This is
+				// handled separately during symbol resolution instead of being handled
+				// here so we don't need to re-traverse the arrow function body.
+
+				// If we need to forward more than the current number of arguments,
+				// add a rest argument to the set of forwarding variables. This is the
+				// case if the arrow function has rest or default arguments.
+				if len(*args) < len(fn.Args) {
+					argRef := p.newSymbol(ast.SymbolOther, fmt.Sprintf("_%d", len(*args)))
+					p.currentScope.Generated = append(p.currentScope.Generated, argRef)
+					*args = append(*args, js_ast.Arg{Binding: js_ast.Binding{Loc: bodyLoc, Data: &js_ast.BIdentifier{Ref: argRef}}})
+					*hasRestArg = true
+				}
+
+				// Forward all of the arguments
+				items := make([]js_ast.Expr, 0, len(*args))
+				for i, arg := range *args {
+					id := arg.Binding.Data.(*js_ast.BIdentifier)
+					item := js_ast.Expr{Loc: arg.Binding.Loc, Data: &js_ast.EIdentifier{Ref: id.Ref}}
+					if *hasRestArg && i+1 == len(*args) {
+						item.Data = &js_ast.ESpread{Value: item}
+					}
+					items = append(items, item)
+				}
+				forwardedArgs = js_ast.Expr{Loc: bodyLoc, Data: &js_ast.EArray{Items: items, IsSingleLine: true}}
+			}
+		}
+
+		var name string
+		if isGenerator != nil && *isGenerator {
+			// "async function* foo(a, b) { stmts }" => "function foo(a, b) { return __asyncGenerator(this, null, function* () { stmts }) }"
+			name = "__asyncGenerator"
+			*isGenerator = false
+		} else {
+			// "async function foo(a, b) { stmts }" => "function foo(a, b) { return __async(this, null, function* () { stmts }) }"
+			name = "__async"
+		}
+		*isAsync = false
+		callAsync := p.callRuntime(bodyLoc, name, []js_ast.Expr{
+			thisValue,
+			forwardedArgs,
+			{Loc: bodyLoc, Data: &js_ast.EFunction{Fn: fn}},
+		})
+		bodyBlock.Stmts = []js_ast.Stmt{{Loc: bodyLoc, Data: &js_ast.SReturn{ValueOrNil: callAsync}}}
+	}
+}
+
+func (p *parser) lowerOptionalChain(expr js_ast.Expr, in exprIn, childOut exprOut) (js_ast.Expr, exprOut) {
+	valueWhenUndefined := js_ast.Expr{Loc: expr.Loc, Data: js_ast.EUndefinedShared}
+	endsWithPropertyAccess := false
+	containsPrivateName := false
+	startsWithCall := false
+	originalExpr := expr
+	chain := []js_ast.Expr{}
+	loc := expr.Loc
+
+	// Step 1: Get an array of all expressions in the chain. We're traversing the
+	// chain from the outside in, so the array will be filled in "backwards".
+flatten:
+	for {
+		chain = append(chain, expr)
+
+		switch e := expr.Data.(type) {
+		case *js_ast.EDot:
+			expr = e.Target
+			if len(chain) == 1 {
+				endsWithPropertyAccess = true
+			}
+			if e.OptionalChain == js_ast.OptionalChainStart {
+				break flatten
+			}
+
+		case *js_ast.EIndex:
+			expr = e.Target
+			if len(chain) == 1 {
+				endsWithPropertyAccess = true
+			}
+
+			// If this is a private name that needs to be lowered, the entire chain
+			// itself will have to be lowered even if the language target supports
+			// optional chaining. This is because there's no way to use our shim
+			// function for private names with optional chaining syntax.
+			if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
+				containsPrivateName = true
+			}
+
+			if e.OptionalChain == js_ast.OptionalChainStart {
+				break flatten
+			}
+
+		case *js_ast.ECall:
+			expr = e.Target
+			if e.OptionalChain == js_ast.OptionalChainStart {
+				startsWithCall = true
+				break flatten
+			}
+
+		case *js_ast.EUnary: // UnOpDelete
+			valueWhenUndefined = js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: true}}
+			expr = e.Value
+
+		default:
+			panic("Internal error")
+		}
+	}
+
+	// Stop now if we can strip the whole chain as dead code. Since the chain is
+	// lazily evaluated, it's safe to just drop the code entirely.
+	if p.options.minifySyntax {
+		if isNullOrUndefined, sideEffects, ok := js_ast.ToNullOrUndefinedWithSideEffects(expr.Data); ok && isNullOrUndefined {
+			if sideEffects == js_ast.CouldHaveSideEffects {
+				return js_ast.JoinWithComma(p.astHelpers.SimplifyUnusedExpr(expr, p.options.unsupportedJSFeatures), valueWhenUndefined), exprOut{}
+			}
+			return valueWhenUndefined, exprOut{}
+		}
+	} else {
+		switch expr.Data.(type) {
+		case *js_ast.ENull, *js_ast.EUndefined:
+			return valueWhenUndefined, exprOut{}
+		}
+	}
+
+	// We need to lower this if this is an optional call off of a private name
+	// such as "foo.#bar?.()" because the value of "this" must be captured.
+	if _, _, private := p.extractPrivateIndex(expr); private != nil {
+		containsPrivateName = true
+	}
+
+	// Don't lower this if we don't need to. This check must be done here instead
+	// of earlier so we can do the dead code elimination above when the target is
+	// null or undefined.
+	if !p.options.unsupportedJSFeatures.Has(compat.OptionalChain) && !containsPrivateName {
+		return originalExpr, exprOut{}
+	}
+
+	// Step 2: Figure out if we need to capture the value for "this" for the
+	// initial ECall. This will be passed to ".call(this, ...args)" later.
+	var thisArg js_ast.Expr
+	var targetWrapFunc func(js_ast.Expr) js_ast.Expr
+	if startsWithCall {
+		if childOut.thisArgFunc != nil {
+			// The initial value is a nested optional chain that ended in a property
+			// access. The nested chain was processed first and has saved the
+			// appropriate value for "this". The callback here will return a
+			// reference to that saved location.
+			thisArg = childOut.thisArgFunc()
+		} else {
+			// The initial value is a normal expression. If it's a property access,
+			// strip the property off and save the target of the property access to
+			// be used as the value for "this".
+			switch e := expr.Data.(type) {
+			case *js_ast.EDot:
+				if _, ok := e.Target.Data.(*js_ast.ESuper); ok {
+					// Lower "super.prop" if necessary
+					if p.shouldLowerSuperPropertyAccess(e.Target) {
+						key := js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
+						expr = p.lowerSuperPropertyGet(expr.Loc, key)
+					}
+
+					// Special-case "super.foo?.()" to avoid a syntax error. Without this,
+					// we would generate:
+					//
+					//   (_b = (_a = super).foo) == null ? void 0 : _b.call(_a)
+					//
+					// which is a syntax error. Now we generate this instead:
+					//
+					//   (_a = super.foo) == null ? void 0 : _a.call(this)
+					//
+					thisArg = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+				} else {
+					targetFunc, wrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, e.Target, valueDefinitelyNotMutated)
+					expr = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+						Target:  targetFunc(),
+						Name:    e.Name,
+						NameLoc: e.NameLoc,
+					}}
+					thisArg = targetFunc()
+					targetWrapFunc = wrapFunc
+				}
+
+			case *js_ast.EIndex:
+				if _, ok := e.Target.Data.(*js_ast.ESuper); ok {
+					// Lower "super[prop]" if necessary
+					if p.shouldLowerSuperPropertyAccess(e.Target) {
+						expr = p.lowerSuperPropertyGet(expr.Loc, e.Index)
+					}
+
+					// See the comment above about a similar special case for EDot
+					thisArg = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+				} else {
+					targetFunc, wrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, e.Target, valueDefinitelyNotMutated)
+					targetWrapFunc = wrapFunc
+
+					// Capture the value of "this" if the target of the starting call
+					// expression is a private property access
+					if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
+						// "foo().#bar?.()" must capture "foo()" for "this"
+						expr = p.lowerPrivateGet(targetFunc(), e.Index.Loc, private)
+						thisArg = targetFunc()
+						break
+					}
+
+					expr = js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
+						Target: targetFunc(),
+						Index:  e.Index,
+					}}
+					thisArg = targetFunc()
+				}
+			}
+		}
+	}
+
+	// Step 3: Figure out if we need to capture the starting value. We don't need
+	// to capture it if it doesn't have any side effects (e.g. it's just a bare
+	// identifier). Skipping the capture reduces code size and matches the output
+	// of the TypeScript compiler.
+	exprFunc, exprWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, expr, valueDefinitelyNotMutated)
+	expr = exprFunc()
+	result := exprFunc()
+
+	// Step 4: Wrap the starting value by each expression in the chain. We
+	// traverse the chain in reverse because we want to go from the inside out
+	// and the chain was built from the outside in.
+	var parentThisArgFunc func() js_ast.Expr
+	var parentThisArgWrapFunc func(js_ast.Expr) js_ast.Expr
+	var privateThisFunc func() js_ast.Expr
+	var privateThisWrapFunc func(js_ast.Expr) js_ast.Expr
+	for i := len(chain) - 1; i >= 0; i-- {
+		// Save a reference to the value of "this" for our parent ECall
+		if i == 0 && in.storeThisArgForParentOptionalChain && endsWithPropertyAccess {
+			parentThisArgFunc, parentThisArgWrapFunc = p.captureValueWithPossibleSideEffects(result.Loc, 2, result, valueDefinitelyNotMutated)
+			result = parentThisArgFunc()
+		}
+
+		switch e := chain[i].Data.(type) {
+		case *js_ast.EDot:
+			result = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+				Target:  result,
+				Name:    e.Name,
+				NameLoc: e.NameLoc,
+			}}
+
+		case *js_ast.EIndex:
+			if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
+				// If this is private name property access inside a call expression and
+				// the call expression is part of this chain, then the call expression
+				// is going to need a copy of the property access target as the value
+				// for "this" for the call. Example for this case: "foo.#bar?.()"
+				if i > 0 {
+					if _, ok := chain[i-1].Data.(*js_ast.ECall); ok {
+						privateThisFunc, privateThisWrapFunc = p.captureValueWithPossibleSideEffects(loc, 2, result, valueDefinitelyNotMutated)
+						result = privateThisFunc()
+					}
+				}
+
+				result = p.lowerPrivateGet(result, e.Index.Loc, private)
+				continue
+			}
+
+			result = js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
+				Target: result,
+				Index:  e.Index,
+			}}
+
+		case *js_ast.ECall:
+			// If this is the initial ECall in the chain and it's being called off of
+			// a property access, invoke the function using ".call(this, ...args)" to
+			// explicitly provide the value for "this".
+			if i == len(chain)-1 && thisArg.Data != nil {
+				result = js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+					Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+						Target:  result,
+						Name:    "call",
+						NameLoc: loc,
+					}},
+					Args:                   append([]js_ast.Expr{thisArg}, e.Args...),
+					CanBeUnwrappedIfUnused: e.CanBeUnwrappedIfUnused,
+					IsMultiLine:            e.IsMultiLine,
+					Kind:                   js_ast.TargetWasOriginallyPropertyAccess,
+				}}
+				break
+			}
+
+			// If the target of this call expression is a private name property
+			// access that's also part of this chain, then we must use the copy of
+			// the property access target that was stashed away earlier as the value
+			// for "this" for the call. Example for this case: "foo.#bar?.()"
+			if privateThisFunc != nil {
+				result = privateThisWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+					Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+						Target:  result,
+						Name:    "call",
+						NameLoc: loc,
+					}},
+					Args:                   append([]js_ast.Expr{privateThisFunc()}, e.Args...),
+					CanBeUnwrappedIfUnused: e.CanBeUnwrappedIfUnused,
+					IsMultiLine:            e.IsMultiLine,
+					Kind:                   js_ast.TargetWasOriginallyPropertyAccess,
+				}})
+				privateThisFunc = nil
+				break
+			}
+
+			result = js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+				Target:                 result,
+				Args:                   e.Args,
+				CanBeUnwrappedIfUnused: e.CanBeUnwrappedIfUnused,
+				IsMultiLine:            e.IsMultiLine,
+				Kind:                   e.Kind,
+			}}
+
+		case *js_ast.EUnary:
+			result = js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{
+				Op:    js_ast.UnOpDelete,
+				Value: result,
+
+				// If a delete of an optional chain takes place, it behaves as if the
+				// optional chain isn't there with regard to the "delete" semantics.
+				WasOriginallyDeleteOfIdentifierOrPropertyAccess: e.WasOriginallyDeleteOfIdentifierOrPropertyAccess,
+			}}
+
+		default:
+			panic("Internal error")
+		}
+	}
+
+	// Step 5: Wrap it all in a conditional that returns the chain or the default
+	// value if the initial value is null/undefined. The default value is usually
+	// "undefined" but is "true" if the chain ends in a "delete" operator.
+	// "x?.y" => "x == null ? void 0 : x.y"
+	// "x()?.y()" => "(_a = x()) == null ? void 0 : _a.y()"
+	result = js_ast.Expr{Loc: loc, Data: &js_ast.EIf{
+		Test: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+			Op:    js_ast.BinOpLooseEq,
+			Left:  expr,
+			Right: js_ast.Expr{Loc: loc, Data: js_ast.ENullShared},
+		}},
+		Yes: valueWhenUndefined,
+		No:  result,
+	}}
+	if exprWrapFunc != nil {
+		result = exprWrapFunc(result)
+	}
+	if targetWrapFunc != nil {
+		result = targetWrapFunc(result)
+	}
+	if childOut.thisArgWrapFunc != nil {
+		result = childOut.thisArgWrapFunc(result)
+	}
+	return result, exprOut{
+		thisArgFunc:     parentThisArgFunc,
+		thisArgWrapFunc: parentThisArgWrapFunc,
+	}
+}
+
+func (p *parser) lowerParenthesizedOptionalChain(loc logger.Loc, e *js_ast.ECall, childOut exprOut) js_ast.Expr {
+	return childOut.thisArgWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+		Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+			Target:  e.Target,
+			Name:    "call",
+			NameLoc: loc,
+		}},
+		Args:        append(append(make([]js_ast.Expr, 0, len(e.Args)+1), childOut.thisArgFunc()), e.Args...),
+		IsMultiLine: e.IsMultiLine,
+		Kind:        js_ast.TargetWasOriginallyPropertyAccess,
+	}})
+}
+
+func (p *parser) lowerAssignmentOperator(value js_ast.Expr, callback func(js_ast.Expr, js_ast.Expr) js_ast.Expr) js_ast.Expr {
+	switch left := value.Data.(type) {
+	case *js_ast.EDot:
+		if left.OptionalChain == js_ast.OptionalChainNone {
+			referenceFunc, wrapFunc := p.captureValueWithPossibleSideEffects(value.Loc, 2, left.Target, valueDefinitelyNotMutated)
+			return wrapFunc(callback(
+				js_ast.Expr{Loc: value.Loc, Data: &js_ast.EDot{
+					Target:  referenceFunc(),
+					Name:    left.Name,
+					NameLoc: left.NameLoc,
+				}},
+				js_ast.Expr{Loc: value.Loc, Data: &js_ast.EDot{
+					Target:  referenceFunc(),
+					Name:    left.Name,
+					NameLoc: left.NameLoc,
+				}},
+			))
+		}
+
+	case *js_ast.EIndex:
+		if left.OptionalChain == js_ast.OptionalChainNone {
+			targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(value.Loc, 2, left.Target, valueDefinitelyNotMutated)
+			indexFunc, indexWrapFunc := p.captureValueWithPossibleSideEffects(value.Loc, 2, left.Index, valueDefinitelyNotMutated)
+			return targetWrapFunc(indexWrapFunc(callback(
+				js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIndex{
+					Target: targetFunc(),
+					Index:  indexFunc(),
+				}},
+				js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIndex{
+					Target: targetFunc(),
+					Index:  indexFunc(),
+				}},
+			)))
+		}
+
+	case *js_ast.EIdentifier:
+		return callback(
+			js_ast.Expr{Loc: value.Loc, Data: &js_ast.EIdentifier{Ref: left.Ref}},
+			value,
+		)
+	}
+
+	// We shouldn't get here with valid syntax? Just let this through for now
+	// since there's currently no assignment target validation. Garbage in,
+	// garbage out.
+	return value
+}
+
+func (p *parser) lowerExponentiationAssignmentOperator(loc logger.Loc, e *js_ast.EBinary) js_ast.Expr {
+	if target, privateLoc, private := p.extractPrivateIndex(e.Left); private != nil {
+		// "a.#b **= c" => "__privateSet(a, #b, __pow(__privateGet(a, #b), c))"
+		targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, target, valueDefinitelyNotMutated)
+		return targetWrapFunc(p.lowerPrivateSet(targetFunc(), privateLoc, private,
+			p.callRuntime(loc, "__pow", []js_ast.Expr{
+				p.lowerPrivateGet(targetFunc(), privateLoc, private),
+				e.Right,
+			})))
+	}
+
+	return p.lowerAssignmentOperator(e.Left, func(a js_ast.Expr, b js_ast.Expr) js_ast.Expr {
+		// "a **= b" => "a = __pow(a, b)"
+		return js_ast.Assign(a, p.callRuntime(loc, "__pow", []js_ast.Expr{b, e.Right}))
+	})
+}
+
+func (p *parser) lowerNullishCoalescingAssignmentOperator(loc logger.Loc, e *js_ast.EBinary) (js_ast.Expr, bool) {
+	if target, privateLoc, private := p.extractPrivateIndex(e.Left); private != nil {
+		if p.options.unsupportedJSFeatures.Has(compat.NullishCoalescing) {
+			// "a.#b ??= c" => "(_a = __privateGet(a, #b)) != null ? _a : __privateSet(a, #b, c)"
+			targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, target, valueDefinitelyNotMutated)
+			left := p.lowerPrivateGet(targetFunc(), privateLoc, private)
+			right := p.lowerPrivateSet(targetFunc(), privateLoc, private, e.Right)
+			return targetWrapFunc(p.lowerNullishCoalescing(loc, left, right)), true
+		}
+
+		// "a.#b ??= c" => "__privateGet(a, #b) ?? __privateSet(a, #b, c)"
+		targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, target, valueDefinitelyNotMutated)
+		return targetWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+			Op:    js_ast.BinOpNullishCoalescing,
+			Left:  p.lowerPrivateGet(targetFunc(), privateLoc, private),
+			Right: p.lowerPrivateSet(targetFunc(), privateLoc, private, e.Right),
+		}}), true
+	}
+
+	if p.options.unsupportedJSFeatures.Has(compat.LogicalAssignment) {
+		return p.lowerAssignmentOperator(e.Left, func(a js_ast.Expr, b js_ast.Expr) js_ast.Expr {
+			if p.options.unsupportedJSFeatures.Has(compat.NullishCoalescing) {
+				// "a ??= b" => "(_a = a) != null ? _a : a = b"
+				return p.lowerNullishCoalescing(loc, a, js_ast.Assign(b, e.Right))
+			}
+
+			// "a ??= b" => "a ?? (a = b)"
+			return js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+				Op:    js_ast.BinOpNullishCoalescing,
+				Left:  a,
+				Right: js_ast.Assign(b, e.Right),
+			}}
+		}), true
+	}
+
+	return js_ast.Expr{}, false
+}
+
+func (p *parser) lowerLogicalAssignmentOperator(loc logger.Loc, e *js_ast.EBinary, op js_ast.OpCode) (js_ast.Expr, bool) {
+	if target, privateLoc, private := p.extractPrivateIndex(e.Left); private != nil {
+		// "a.#b &&= c" => "__privateGet(a, #b) && __privateSet(a, #b, c)"
+		// "a.#b ||= c" => "__privateGet(a, #b) || __privateSet(a, #b, c)"
+		targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, target, valueDefinitelyNotMutated)
+		return targetWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+			Op:    op,
+			Left:  p.lowerPrivateGet(targetFunc(), privateLoc, private),
+			Right: p.lowerPrivateSet(targetFunc(), privateLoc, private, e.Right),
+		}}), true
+	}
+
+	if p.options.unsupportedJSFeatures.Has(compat.LogicalAssignment) {
+		return p.lowerAssignmentOperator(e.Left, func(a js_ast.Expr, b js_ast.Expr) js_ast.Expr {
+			// "a &&= b" => "a && (a = b)"
+			// "a ||= b" => "a || (a = b)"
+			return js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+				Op:    op,
+				Left:  a,
+				Right: js_ast.Assign(b, e.Right),
+			}}
+		}), true
+	}
+
+	return js_ast.Expr{}, false
+}
+
+func (p *parser) lowerNullishCoalescing(loc logger.Loc, left js_ast.Expr, right js_ast.Expr) js_ast.Expr {
+	// "x ?? y" => "x != null ? x : y"
+	// "x() ?? y()" => "_a = x(), _a != null ? _a : y"
+	leftFunc, wrapFunc := p.captureValueWithPossibleSideEffects(loc, 2, left, valueDefinitelyNotMutated)
+	return wrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.EIf{
+		Test: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+			Op:    js_ast.BinOpLooseNe,
+			Left:  leftFunc(),
+			Right: js_ast.Expr{Loc: loc, Data: js_ast.ENullShared},
+		}},
+		Yes: leftFunc(),
+		No:  right,
+	}})
+}
+
+// Lower object spread for environments that don't support them. Non-spread
+// properties are grouped into object literals and then passed to the
+// "__spreadValues" and "__spreadProps" functions like this:
+//
+//	"{a, b, ...c, d, e}" => "__spreadProps(__spreadValues(__spreadProps({a, b}, c), {d, e})"
+//
+// If the object literal starts with a spread, then we pass an empty object
+// literal to "__spreadValues" to make sure we clone the object:
+//
+//	"{...a, b}" => "__spreadProps(__spreadValues({}, a), {b})"
+//
+// It's not immediately obvious why we don't compile everything to a single
+// call to a function that takes any number of arguments, since that would be
+// shorter. The reason is to preserve the order of side effects. Consider
+// this code:
+//
+//	let a = {
+//	  get x() {
+//	    b = {y: 2}
+//	    return 1
+//	  }
+//	}
+//	let b = {}
+//	let c = {...a, ...b}
+//
+// Converting the above code to "let c = __spreadFn({}, a, null, b)" means "c"
+// becomes "{x: 1}" which is incorrect. Converting the above code instead to
+// "let c = __spreadProps(__spreadProps({}, a), b)" means "c" becomes
+// "{x: 1, y: 2}" which is correct.
+func (p *parser) lowerObjectSpread(loc logger.Loc, e *js_ast.EObject) js_ast.Expr {
+	needsLowering := false
+
+	if p.options.unsupportedJSFeatures.Has(compat.ObjectRestSpread) {
+		for _, property := range e.Properties {
+			if property.Kind == js_ast.PropertySpread {
+				needsLowering = true
+				break
+			}
+		}
+	}
+
+	if !needsLowering {
+		return js_ast.Expr{Loc: loc, Data: e}
+	}
+
+	var result js_ast.Expr
+	properties := []js_ast.Property{}
+
+	for _, property := range e.Properties {
+		if property.Kind != js_ast.PropertySpread {
+			properties = append(properties, property)
+			continue
+		}
+
+		if len(properties) > 0 || result.Data == nil {
+			if result.Data == nil {
+				// "{a, ...b}" => "__spreadValues({a}, b)"
+				result = js_ast.Expr{Loc: loc, Data: &js_ast.EObject{
+					Properties:   properties,
+					IsSingleLine: e.IsSingleLine,
+				}}
+			} else {
+				// "{...a, b, ...c}" => "__spreadValues(__spreadProps(__spreadValues({}, a), {b}), c)"
+				result = p.callRuntime(loc, "__spreadProps",
+					[]js_ast.Expr{result, {Loc: loc, Data: &js_ast.EObject{
+						Properties:   properties,
+						IsSingleLine: e.IsSingleLine,
+					}}})
+			}
+			properties = []js_ast.Property{}
+		}
+
+		// "{a, ...b}" => "__spreadValues({a}, b)"
+		result = p.callRuntime(loc, "__spreadValues", []js_ast.Expr{result, property.ValueOrNil})
+	}
+
+	if len(properties) > 0 {
+		// "{...a, b}" => "__spreadProps(__spreadValues({}, a), {b})"
+		result = p.callRuntime(loc, "__spreadProps", []js_ast.Expr{result, {Loc: loc, Data: &js_ast.EObject{
+			Properties:    properties,
+			IsSingleLine:  e.IsSingleLine,
+			CloseBraceLoc: e.CloseBraceLoc,
+		}}})
+	}
+
+	return result
+}
+
+func (p *parser) maybeLowerAwait(loc logger.Loc, e *js_ast.EAwait) js_ast.Expr {
+	// "await x" turns into "yield __await(x)" when lowering async generator functions
+	if p.fnOrArrowDataVisit.isGenerator && (p.options.unsupportedJSFeatures.Has(compat.AsyncAwait) || p.options.unsupportedJSFeatures.Has(compat.AsyncGenerator)) {
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EYield{
+			ValueOrNil: js_ast.Expr{Loc: loc, Data: &js_ast.ENew{
+				Target: p.importFromRuntime(loc, "__await"),
+				Args:   []js_ast.Expr{e.Value},
+			}},
+		}}
+	}
+
+	// "await x" turns into "yield x" when lowering async functions
+	if p.options.unsupportedJSFeatures.Has(compat.AsyncAwait) {
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EYield{
+			ValueOrNil: e.Value,
+		}}
+	}
+
+	return js_ast.Expr{Loc: loc, Data: e}
+}
+
+func (p *parser) lowerForAwaitLoop(loc logger.Loc, loop *js_ast.SForOf, stmts []js_ast.Stmt) []js_ast.Stmt {
+	// This code:
+	//
+	//   for await (let x of y) z()
+	//
+	// is transformed into the following code:
+	//
+	//   try {
+	//     for (var iter = __forAwait(y), more, temp, error; more = !(temp = await iter.next()).done; more = false) {
+	//       let x = temp.value;
+	//       z();
+	//     }
+	//   } catch (temp) {
+	//     error = [temp]
+	//   } finally {
+	//     try {
+	//       more && (temp = iter.return) && (await temp.call(iter))
+	//     } finally {
+	//       if (error) throw error[0]
+	//     }
+	//   }
+	//
+	// except that "yield" is used instead of "await" if await is unsupported.
+	// This mostly follows TypeScript's implementation of the syntax transform.
+
+	iterRef := p.generateTempRef(tempRefNoDeclare, "iter")
+	moreRef := p.generateTempRef(tempRefNoDeclare, "more")
+	tempRef := p.generateTempRef(tempRefNoDeclare, "temp")
+	errorRef := p.generateTempRef(tempRefNoDeclare, "error")
+
+	switch init := loop.Init.Data.(type) {
+	case *js_ast.SLocal:
+		if len(init.Decls) == 1 {
+			init.Decls[0].ValueOrNil = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+				Target:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}},
+				NameLoc: loc,
+				Name:    "value",
+			}}
+		}
+	case *js_ast.SExpr:
+		init.Value.Data = &js_ast.EBinary{
+			Op:   js_ast.BinOpAssign,
+			Left: init.Value,
+			Right: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+				Target:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}},
+				NameLoc: loc,
+				Name:    "value",
+			}},
+		}
+	}
+
+	var body []js_ast.Stmt
+	var closeBraceLoc logger.Loc
+	body = append(body, loop.Init)
+
+	if block, ok := loop.Body.Data.(*js_ast.SBlock); ok {
+		body = append(body, block.Stmts...)
+		closeBraceLoc = block.CloseBraceLoc
+	} else {
+		body = append(body, loop.Body)
+	}
+
+	awaitIterNext := js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+		Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+			Target:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: iterRef}},
+			NameLoc: loc,
+			Name:    "next",
+		}},
+		Kind: js_ast.TargetWasOriginallyPropertyAccess,
+	}}
+	awaitTempCallIter := js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+		Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+			Target:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}},
+			NameLoc: loc,
+			Name:    "call",
+		}},
+		Args: []js_ast.Expr{{Loc: loc, Data: &js_ast.EIdentifier{Ref: iterRef}}},
+		Kind: js_ast.TargetWasOriginallyPropertyAccess,
+	}}
+
+	// "await" expressions turn into "yield" expressions when lowering
+	awaitIterNext = p.maybeLowerAwait(awaitIterNext.Loc, &js_ast.EAwait{Value: awaitIterNext})
+	awaitTempCallIter = p.maybeLowerAwait(awaitTempCallIter.Loc, &js_ast.EAwait{Value: awaitTempCallIter})
+
+	return append(stmts, js_ast.Stmt{Loc: loc, Data: &js_ast.STry{
+		BlockLoc: loc,
+		Block: js_ast.SBlock{
+			Stmts: []js_ast.Stmt{{Loc: loc, Data: &js_ast.SFor{
+				InitOrNil: js_ast.Stmt{Loc: loc, Data: &js_ast.SLocal{Kind: js_ast.LocalVar, Decls: []js_ast.Decl{
+					{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: iterRef}},
+						ValueOrNil: p.callRuntime(loc, "__forAwait", []js_ast.Expr{loop.Value})},
+					{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: moreRef}}},
+					{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: tempRef}}},
+					{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: errorRef}}},
+				}}},
+				TestOrNil: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+					Op:   js_ast.BinOpAssign,
+					Left: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: moreRef}},
+					Right: js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{
+						Op: js_ast.UnOpNot,
+						Value: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+							Target: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+								Op:    js_ast.BinOpAssign,
+								Left:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}},
+								Right: awaitIterNext,
+							}},
+							NameLoc: loc,
+							Name:    "done",
+						}},
+					}},
+				}},
+				UpdateOrNil: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+					Op:    js_ast.BinOpAssign,
+					Left:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: moreRef}},
+					Right: js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: false}},
+				}},
+				Body: js_ast.Stmt{Loc: loop.Body.Loc, Data: &js_ast.SBlock{
+					Stmts:         body,
+					CloseBraceLoc: closeBraceLoc,
+				}},
+			}}},
+		},
+
+		Catch: &js_ast.Catch{
+			Loc: loc,
+			BindingOrNil: js_ast.Binding{
+				Loc:  loc,
+				Data: &js_ast.BIdentifier{Ref: tempRef},
+			},
+			Block: js_ast.SBlock{
+				Stmts: []js_ast.Stmt{{Loc: loc, Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+					Op:   js_ast.BinOpAssign,
+					Left: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: errorRef}},
+					Right: js_ast.Expr{Loc: loc, Data: &js_ast.EArray{
+						Items:        []js_ast.Expr{{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}}},
+						IsSingleLine: true,
+					}},
+				}}}}},
+			},
+		},
+
+		Finally: &js_ast.Finally{
+			Loc: loc,
+			Block: js_ast.SBlock{
+				Stmts: []js_ast.Stmt{{Loc: loc, Data: &js_ast.STry{
+					BlockLoc: loc,
+					Block: js_ast.SBlock{Stmts: []js_ast.Stmt{{Loc: loc, Data: &js_ast.SExpr{
+						Value: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+							Op: js_ast.BinOpLogicalAnd,
+							Left: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+								Op:   js_ast.BinOpLogicalAnd,
+								Left: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: moreRef}},
+								Right: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+									Op:   js_ast.BinOpAssign,
+									Left: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}},
+									Right: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+										Target:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: iterRef}},
+										NameLoc: loc,
+										Name:    "return",
+									}},
+								}},
+							}},
+							Right: awaitTempCallIter,
+						}},
+					}}}},
+					Finally: &js_ast.Finally{
+						Loc: loc,
+						Block: js_ast.SBlock{Stmts: []js_ast.Stmt{{Loc: loc, Data: &js_ast.SIf{
+							Test: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: errorRef}},
+							Yes: js_ast.Stmt{Loc: loc, Data: &js_ast.SThrow{Value: js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
+								Target: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: errorRef}},
+								Index:  js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: 0}},
+							}}}},
+						}}}},
+					},
+				}}},
+			},
+		},
+	}})
+}
+
+func bindingHasObjectRest(binding js_ast.Binding) bool {
+	switch b := binding.Data.(type) {
+	case *js_ast.BArray:
+		for _, item := range b.Items {
+			if bindingHasObjectRest(item.Binding) {
+				return true
+			}
+		}
+	case *js_ast.BObject:
+		for _, property := range b.Properties {
+			if property.IsSpread || bindingHasObjectRest(property.Value) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func exprHasObjectRest(expr js_ast.Expr) bool {
+	switch e := expr.Data.(type) {
+	case *js_ast.EBinary:
+		if e.Op == js_ast.BinOpAssign && exprHasObjectRest(e.Left) {
+			return true
+		}
+	case *js_ast.EArray:
+		for _, item := range e.Items {
+			if exprHasObjectRest(item) {
+				return true
+			}
+		}
+	case *js_ast.EObject:
+		for _, property := range e.Properties {
+			if property.Kind == js_ast.PropertySpread || exprHasObjectRest(property.ValueOrNil) {
+				return true
+			}
+		}
+	}
+	return false
+}
+
+func (p *parser) lowerObjectRestInDecls(decls []js_ast.Decl) []js_ast.Decl {
+	if !p.options.unsupportedJSFeatures.Has(compat.ObjectRestSpread) {
+		return decls
+	}
+
+	// Don't do any allocations if there are no object rest patterns. We want as
+	// little overhead as possible in the common case.
+	for i, decl := range decls {
+		if decl.ValueOrNil.Data != nil && bindingHasObjectRest(decl.Binding) {
+			clone := append([]js_ast.Decl{}, decls[:i]...)
+			for _, decl := range decls[i:] {
+				if decl.ValueOrNil.Data != nil {
+					target := js_ast.ConvertBindingToExpr(decl.Binding, nil)
+					if result, ok := p.lowerObjectRestToDecls(target, decl.ValueOrNil, clone); ok {
+						clone = result
+						continue
+					}
+				}
+				clone = append(clone, decl)
+			}
+
+			return clone
+		}
+	}
+
+	return decls
+}
+
+func (p *parser) lowerObjectRestInForLoopInit(init js_ast.Stmt, body *js_ast.Stmt) {
+	if !p.options.unsupportedJSFeatures.Has(compat.ObjectRestSpread) {
+		return
+	}
+
+	var bodyPrefixStmt js_ast.Stmt
+
+	switch s := init.Data.(type) {
+	case *js_ast.SExpr:
+		// "for ({...x} in y) {}"
+		// "for ({...x} of y) {}"
+		if exprHasObjectRest(s.Value) {
+			ref := p.generateTempRef(tempRefNeedsDeclare, "")
+			if expr, ok := p.lowerAssign(s.Value, js_ast.Expr{Loc: init.Loc, Data: &js_ast.EIdentifier{Ref: ref}}, objRestReturnValueIsUnused); ok {
+				p.recordUsage(ref)
+				s.Value.Data = &js_ast.EIdentifier{Ref: ref}
+				bodyPrefixStmt = js_ast.Stmt{Loc: expr.Loc, Data: &js_ast.SExpr{Value: expr}}
+			}
+		}
+
+	case *js_ast.SLocal:
+		// "for (let {...x} in y) {}"
+		// "for (let {...x} of y) {}"
+		if len(s.Decls) == 1 && bindingHasObjectRest(s.Decls[0].Binding) {
+			ref := p.generateTempRef(tempRefNoDeclare, "")
+			decl := js_ast.Decl{Binding: s.Decls[0].Binding, ValueOrNil: js_ast.Expr{Loc: init.Loc, Data: &js_ast.EIdentifier{Ref: ref}}}
+			p.recordUsage(ref)
+			decls := p.lowerObjectRestInDecls([]js_ast.Decl{decl})
+			s.Decls[0].Binding.Data = &js_ast.BIdentifier{Ref: ref}
+			bodyPrefixStmt = js_ast.Stmt{Loc: init.Loc, Data: &js_ast.SLocal{Kind: s.Kind, Decls: decls}}
+		}
+	}
+
+	if bodyPrefixStmt.Data != nil {
+		if block, ok := body.Data.(*js_ast.SBlock); ok {
+			// If there's already a block, insert at the front
+			stmts := make([]js_ast.Stmt, 0, 1+len(block.Stmts))
+			block.Stmts = append(append(stmts, bodyPrefixStmt), block.Stmts...)
+		} else {
+			// Otherwise, make a block and insert at the front
+			body.Data = &js_ast.SBlock{Stmts: []js_ast.Stmt{bodyPrefixStmt, *body}}
+		}
+	}
+}
+
+func (p *parser) lowerObjectRestInCatchBinding(catch *js_ast.Catch) {
+	if !p.options.unsupportedJSFeatures.Has(compat.ObjectRestSpread) {
+		return
+	}
+
+	if catch.BindingOrNil.Data != nil && bindingHasObjectRest(catch.BindingOrNil) {
+		ref := p.generateTempRef(tempRefNoDeclare, "")
+		decl := js_ast.Decl{Binding: catch.BindingOrNil, ValueOrNil: js_ast.Expr{Loc: catch.BindingOrNil.Loc, Data: &js_ast.EIdentifier{Ref: ref}}}
+		p.recordUsage(ref)
+		decls := p.lowerObjectRestInDecls([]js_ast.Decl{decl})
+		catch.BindingOrNil.Data = &js_ast.BIdentifier{Ref: ref}
+		stmts := make([]js_ast.Stmt, 0, 1+len(catch.Block.Stmts))
+		stmts = append(stmts, js_ast.Stmt{Loc: catch.BindingOrNil.Loc, Data: &js_ast.SLocal{Kind: js_ast.LocalLet, Decls: decls}})
+		catch.Block.Stmts = append(stmts, catch.Block.Stmts...)
+	}
+}
+
+type objRestMode uint8
+
+const (
+	objRestReturnValueIsUnused objRestMode = iota
+	objRestMustReturnInitExpr
+)
+
+func (p *parser) lowerAssign(rootExpr js_ast.Expr, rootInit js_ast.Expr, mode objRestMode) (js_ast.Expr, bool) {
+	rootExpr, didLower := p.lowerSuperPropertyOrPrivateInAssign(rootExpr)
+
+	var expr js_ast.Expr
+	assign := func(left js_ast.Expr, right js_ast.Expr) {
+		expr = js_ast.JoinWithComma(expr, js_ast.Assign(left, right))
+	}
+
+	if initWrapFunc, ok := p.lowerObjectRestHelper(rootExpr, rootInit, assign, tempRefNeedsDeclare, mode); ok {
+		if initWrapFunc != nil {
+			expr = initWrapFunc(expr)
+		}
+		return expr, true
+	}
+
+	if didLower {
+		return js_ast.Assign(rootExpr, rootInit), true
+	}
+
+	return js_ast.Expr{}, false
+}
+
+func (p *parser) lowerObjectRestToDecls(rootExpr js_ast.Expr, rootInit js_ast.Expr, decls []js_ast.Decl) ([]js_ast.Decl, bool) {
+	assign := func(left js_ast.Expr, right js_ast.Expr) {
+		binding, invalidLog := p.convertExprToBinding(left, invalidLog{})
+		if len(invalidLog.invalidTokens) > 0 {
+			panic("Internal error")
+		}
+		decls = append(decls, js_ast.Decl{Binding: binding, ValueOrNil: right})
+	}
+
+	if _, ok := p.lowerObjectRestHelper(rootExpr, rootInit, assign, tempRefNoDeclare, objRestReturnValueIsUnused); ok {
+		return decls, true
+	}
+
+	return nil, false
+}
+
+func (p *parser) lowerObjectRestHelper(
+	rootExpr js_ast.Expr,
+	rootInit js_ast.Expr,
+	assign func(js_ast.Expr, js_ast.Expr),
+	declare generateTempRefArg,
+	mode objRestMode,
+) (wrapFunc func(js_ast.Expr) js_ast.Expr, ok bool) {
+	if !p.options.unsupportedJSFeatures.Has(compat.ObjectRestSpread) {
+		return nil, false
+	}
+
+	// Check if this could possibly contain an object rest binding
+	switch rootExpr.Data.(type) {
+	case *js_ast.EArray, *js_ast.EObject:
+	default:
+		return nil, false
+	}
+
+	// Scan for object rest bindings and initialize rest binding containment
+	containsRestBinding := make(map[js_ast.E]bool)
+	var findRestBindings func(js_ast.Expr) bool
+	findRestBindings = func(expr js_ast.Expr) bool {
+		found := false
+		switch e := expr.Data.(type) {
+		case *js_ast.EBinary:
+			if e.Op == js_ast.BinOpAssign && findRestBindings(e.Left) {
+				found = true
+			}
+		case *js_ast.EArray:
+			for _, item := range e.Items {
+				if findRestBindings(item) {
+					found = true
+				}
+			}
+		case *js_ast.EObject:
+			for _, property := range e.Properties {
+				if property.Kind == js_ast.PropertySpread || findRestBindings(property.ValueOrNil) {
+					found = true
+				}
+			}
+		}
+		if found {
+			containsRestBinding[expr.Data] = true
+		}
+		return found
+	}
+	findRestBindings(rootExpr)
+	if len(containsRestBinding) == 0 {
+		return nil, false
+	}
+
+	// If there is at least one rest binding, lower the whole expression
+	var visit func(js_ast.Expr, js_ast.Expr, []func() js_ast.Expr)
+
+	captureIntoRef := func(expr js_ast.Expr) ast.Ref {
+		ref := p.generateTempRef(declare, "")
+		assign(js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: ref}}, expr)
+		p.recordUsage(ref)
+		return ref
+	}
+
+	lowerObjectRestPattern := func(
+		before []js_ast.Property,
+		binding js_ast.Expr,
+		init js_ast.Expr,
+		capturedKeys []func() js_ast.Expr,
+		isSingleLine bool,
+	) {
+		// If there are properties before this one, store the initializer in a
+		// temporary so we can reference it multiple times, then create a new
+		// destructuring assignment for these properties
+		if len(before) > 0 {
+			// "let {a, ...b} = c"
+			ref := captureIntoRef(init)
+			assign(js_ast.Expr{Loc: before[0].Key.Loc, Data: &js_ast.EObject{Properties: before, IsSingleLine: isSingleLine}},
+				js_ast.Expr{Loc: init.Loc, Data: &js_ast.EIdentifier{Ref: ref}})
+			init = js_ast.Expr{Loc: init.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
+			p.recordUsage(ref)
+			p.recordUsage(ref)
+		}
+
+		// Call "__objRest" to clone the initializer without the keys for previous
+		// properties, then assign the result to the binding for the rest pattern
+		keysToExclude := make([]js_ast.Expr, len(capturedKeys))
+		for i, capturedKey := range capturedKeys {
+			keysToExclude[i] = capturedKey()
+		}
+		assign(binding, p.callRuntime(binding.Loc, "__objRest", []js_ast.Expr{init,
+			{Loc: binding.Loc, Data: &js_ast.EArray{Items: keysToExclude, IsSingleLine: isSingleLine}}}))
+	}
+
+	splitArrayPattern := func(
+		before []js_ast.Expr,
+		split js_ast.Expr,
+		after []js_ast.Expr,
+		init js_ast.Expr,
+		isSingleLine bool,
+	) {
+		// If this has a default value, skip the value to target the binding
+		binding := &split
+		if binary, ok := binding.Data.(*js_ast.EBinary); ok && binary.Op == js_ast.BinOpAssign {
+			binding = &binary.Left
+		}
+
+		// Swap the binding with a temporary
+		splitRef := p.generateTempRef(declare, "")
+		deferredBinding := *binding
+		binding.Data = &js_ast.EIdentifier{Ref: splitRef}
+		items := append(before, split)
+
+		// If there are any items left over, defer them until later too
+		var tailExpr js_ast.Expr
+		var tailInit js_ast.Expr
+		if len(after) > 0 {
+			tailRef := p.generateTempRef(declare, "")
+			loc := after[0].Loc
+			tailExpr = js_ast.Expr{Loc: loc, Data: &js_ast.EArray{Items: after, IsSingleLine: isSingleLine}}
+			tailInit = js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tailRef}}
+			items = append(items, js_ast.Expr{Loc: loc, Data: &js_ast.ESpread{Value: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tailRef}}}})
+			p.recordUsage(tailRef)
+			p.recordUsage(tailRef)
+		}
+
+		// The original destructuring assignment must come first
+		assign(js_ast.Expr{Loc: split.Loc, Data: &js_ast.EArray{Items: items, IsSingleLine: isSingleLine}}, init)
+
+		// Then the deferred split is evaluated
+		visit(deferredBinding, js_ast.Expr{Loc: split.Loc, Data: &js_ast.EIdentifier{Ref: splitRef}}, nil)
+		p.recordUsage(splitRef)
+
+		// Then anything after the split
+		if len(after) > 0 {
+			visit(tailExpr, tailInit, nil)
+		}
+	}
+
+	splitObjectPattern := func(
+		upToSplit []js_ast.Property,
+		afterSplit []js_ast.Property,
+		init js_ast.Expr,
+		capturedKeys []func() js_ast.Expr,
+		isSingleLine bool,
+	) {
+		// If there are properties after the split, store the initializer in a
+		// temporary so we can reference it multiple times
+		var afterSplitInit js_ast.Expr
+		if len(afterSplit) > 0 {
+			ref := captureIntoRef(init)
+			init = js_ast.Expr{Loc: init.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
+			afterSplitInit = js_ast.Expr{Loc: init.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
+		}
+
+		split := &upToSplit[len(upToSplit)-1]
+		binding := &split.ValueOrNil
+
+		// Swap the binding with a temporary
+		splitRef := p.generateTempRef(declare, "")
+		deferredBinding := *binding
+		binding.Data = &js_ast.EIdentifier{Ref: splitRef}
+		p.recordUsage(splitRef)
+
+		// Use a destructuring assignment to unpack everything up to and including
+		// the split point
+		assign(js_ast.Expr{Loc: binding.Loc, Data: &js_ast.EObject{Properties: upToSplit, IsSingleLine: isSingleLine}}, init)
+
+		// Handle any nested rest binding patterns inside the split point
+		visit(deferredBinding, js_ast.Expr{Loc: binding.Loc, Data: &js_ast.EIdentifier{Ref: splitRef}}, nil)
+		p.recordUsage(splitRef)
+
+		// Then continue on to any properties after the split
+		if len(afterSplit) > 0 {
+			visit(js_ast.Expr{Loc: binding.Loc, Data: &js_ast.EObject{
+				Properties:   afterSplit,
+				IsSingleLine: isSingleLine,
+			}}, afterSplitInit, capturedKeys)
+		}
+	}
+
+	// This takes an expression representing a binding pattern as input and
+	// returns that binding pattern with any object rest patterns stripped out.
+	// The object rest patterns are lowered and appended to "exprChain" along
+	// with any child binding patterns that came after the binding pattern
+	// containing the object rest pattern.
+	//
+	// This transform must be very careful to preserve the exact evaluation
+	// order of all assignments, default values, and computed property keys.
+	//
+	// Unlike the Babel and TypeScript compilers, this transform does not
+	// lower binding patterns other than object rest patterns. For example,
+	// array spread patterns are preserved.
+	//
+	// Certain patterns such as "{a: {...a}, b: {...b}, ...c}" may need to be
+	// split multiple times. In this case the "capturedKeys" argument allows
+	// the visitor to pass on captured keys to the tail-recursive call that
+	// handles the properties after the split.
+	visit = func(expr js_ast.Expr, init js_ast.Expr, capturedKeys []func() js_ast.Expr) {
+		switch e := expr.Data.(type) {
+		case *js_ast.EArray:
+			// Split on the first binding with a nested rest binding pattern
+			for i, item := range e.Items {
+				// "let [a, {...b}, c] = d"
+				if containsRestBinding[item.Data] {
+					splitArrayPattern(e.Items[:i], item, append([]js_ast.Expr{}, e.Items[i+1:]...), init, e.IsSingleLine)
+					return
+				}
+			}
+
+		case *js_ast.EObject:
+			last := len(e.Properties) - 1
+			endsWithRestBinding := last >= 0 && e.Properties[last].Kind == js_ast.PropertySpread
+
+			// Split on the first binding with a nested rest binding pattern
+			for i := range e.Properties {
+				property := &e.Properties[i]
+
+				// "let {a, ...b} = c"
+				if property.Kind == js_ast.PropertySpread {
+					lowerObjectRestPattern(e.Properties[:i], property.ValueOrNil, init, capturedKeys, e.IsSingleLine)
+					return
+				}
+
+				// Save a copy of this key so the rest binding can exclude it
+				if endsWithRestBinding {
+					key, capturedKey := p.captureKeyForObjectRest(property.Key)
+					property.Key = key
+					capturedKeys = append(capturedKeys, capturedKey)
+				}
+
+				// "let {a: {...b}, c} = d"
+				if containsRestBinding[property.ValueOrNil.Data] {
+					splitObjectPattern(e.Properties[:i+1], e.Properties[i+1:], init, capturedKeys, e.IsSingleLine)
+					return
+				}
+			}
+		}
+
+		assign(expr, init)
+	}
+
+	// Capture and return the value of the initializer if this is an assignment
+	// expression and the return value is used:
+	//
+	//   // Input:
+	//   console.log({...x} = x);
+	//
+	//   // Output:
+	//   var _a;
+	//   console.log((x = __objRest(_a = x, []), _a));
+	//
+	// This isn't necessary if the return value is unused:
+	//
+	//   // Input:
+	//   ({...x} = x);
+	//
+	//   // Output:
+	//   x = __objRest(x, []);
+	//
+	if mode == objRestMustReturnInitExpr {
+		initFunc, initWrapFunc := p.captureValueWithPossibleSideEffects(rootInit.Loc, 2, rootInit, valueCouldBeMutated)
+		rootInit = initFunc()
+		wrapFunc = func(expr js_ast.Expr) js_ast.Expr {
+			return initWrapFunc(js_ast.JoinWithComma(expr, initFunc()))
+		}
+	}
+
+	visit(rootExpr, rootInit, nil)
+	return wrapFunc, true
+}
+
+// Save a copy of the key for the call to "__objRest" later on. Certain
+// expressions can be converted to keys more efficiently than others.
+func (p *parser) captureKeyForObjectRest(originalKey js_ast.Expr) (finalKey js_ast.Expr, capturedKey func() js_ast.Expr) {
+	loc := originalKey.Loc
+	finalKey = originalKey
+
+	switch k := originalKey.Data.(type) {
+	case *js_ast.EString:
+		capturedKey = func() js_ast.Expr { return js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: k.Value}} }
+
+	case *js_ast.ENumber:
+		// Emit it as the number plus a string (i.e. call toString() on it).
+		// It's important to do it this way instead of trying to print the
+		// float as a string because Go's floating-point printer doesn't
+		// behave exactly the same as JavaScript and if they are different,
+		// the generated code will be wrong.
+		capturedKey = func() js_ast.Expr {
+			return js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+				Op:    js_ast.BinOpAdd,
+				Left:  js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: k.Value}},
+				Right: js_ast.Expr{Loc: loc, Data: &js_ast.EString{}},
+			}}
+		}
+
+	case *js_ast.EIdentifier:
+		capturedKey = func() js_ast.Expr {
+			p.recordUsage(k.Ref)
+			return p.callRuntime(loc, "__restKey", []js_ast.Expr{{Loc: loc, Data: &js_ast.EIdentifier{Ref: k.Ref}}})
+		}
+
+	default:
+		// If it's an arbitrary expression, it probably has a side effect.
+		// Stash it in a temporary reference so we don't evaluate it twice.
+		tempRef := p.generateTempRef(tempRefNeedsDeclare, "")
+		finalKey = js_ast.Assign(js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}}, originalKey)
+		capturedKey = func() js_ast.Expr {
+			p.recordUsage(tempRef)
+			return p.callRuntime(loc, "__restKey", []js_ast.Expr{{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}}})
+		}
+	}
+
+	return
+}
+
+func (p *parser) lowerTemplateLiteral(loc logger.Loc, e *js_ast.ETemplate, tagThisFunc func() js_ast.Expr, tagWrapFunc func(js_ast.Expr) js_ast.Expr) js_ast.Expr {
+	// If there is no tag, turn this into normal string concatenation
+	if e.TagOrNil.Data == nil {
+		var value js_ast.Expr
+
+		// Handle the head
+		value = js_ast.Expr{Loc: loc, Data: &js_ast.EString{
+			Value:          e.HeadCooked,
+			LegacyOctalLoc: e.LegacyOctalLoc,
+		}}
+
+		// Handle the tail. Each one is handled with a separate call to ".concat()"
+		// to handle various corner cases in the specification including:
+		//
+		//   * For objects, "toString" must be called instead of "valueOf"
+		//   * Side effects must happen inline instead of at the end
+		//   * Passing a "Symbol" instance should throw
+		//
+		for _, part := range e.Parts {
+			var args []js_ast.Expr
+			if len(part.TailCooked) > 0 {
+				args = []js_ast.Expr{part.Value, {Loc: part.TailLoc, Data: &js_ast.EString{Value: part.TailCooked}}}
+			} else {
+				args = []js_ast.Expr{part.Value}
+			}
+			value = js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+				Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+					Target:  value,
+					Name:    "concat",
+					NameLoc: part.Value.Loc,
+				}},
+				Args: args,
+				Kind: js_ast.TargetWasOriginallyPropertyAccess,
+			}}
+		}
+
+		return value
+	}
+
+	// Otherwise, call the tag with the template object
+	needsRaw := false
+	cooked := []js_ast.Expr{}
+	raw := []js_ast.Expr{}
+	args := make([]js_ast.Expr, 0, 1+len(e.Parts))
+	args = append(args, js_ast.Expr{})
+
+	// Handle the head
+	if e.HeadCooked == nil {
+		cooked = append(cooked, js_ast.Expr{Loc: e.HeadLoc, Data: js_ast.EUndefinedShared})
+		needsRaw = true
+	} else {
+		cooked = append(cooked, js_ast.Expr{Loc: e.HeadLoc, Data: &js_ast.EString{Value: e.HeadCooked}})
+		if !helpers.UTF16EqualsString(e.HeadCooked, e.HeadRaw) {
+			needsRaw = true
+		}
+	}
+	raw = append(raw, js_ast.Expr{Loc: e.HeadLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.HeadRaw)}})
+
+	// Handle the tail
+	for _, part := range e.Parts {
+		args = append(args, part.Value)
+		if part.TailCooked == nil {
+			cooked = append(cooked, js_ast.Expr{Loc: part.TailLoc, Data: js_ast.EUndefinedShared})
+			needsRaw = true
+		} else {
+			cooked = append(cooked, js_ast.Expr{Loc: part.TailLoc, Data: &js_ast.EString{Value: part.TailCooked}})
+			if !helpers.UTF16EqualsString(part.TailCooked, part.TailRaw) {
+				needsRaw = true
+			}
+		}
+		raw = append(raw, js_ast.Expr{Loc: part.TailLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(part.TailRaw)}})
+	}
+
+	// Construct the template object
+	cookedArray := js_ast.Expr{Loc: e.HeadLoc, Data: &js_ast.EArray{Items: cooked, IsSingleLine: true}}
+	var arrays []js_ast.Expr
+	if needsRaw {
+		arrays = []js_ast.Expr{cookedArray, {Loc: e.HeadLoc, Data: &js_ast.EArray{Items: raw, IsSingleLine: true}}}
+	} else {
+		arrays = []js_ast.Expr{cookedArray}
+	}
+	templateObj := p.callRuntime(e.HeadLoc, "__template", arrays)
+
+	// Cache it in a temporary object (required by the specification)
+	tempRef := p.generateTopLevelTempRef()
+	p.recordUsage(tempRef)
+	p.recordUsage(tempRef)
+	args[0] = js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+		Op:   js_ast.BinOpLogicalOr,
+		Left: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}},
+		Right: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+			Op:    js_ast.BinOpAssign,
+			Left:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: tempRef}},
+			Right: templateObj,
+		}},
+	}}
+
+	// If this optional chain was used as a template tag, then also forward the value for "this"
+	if tagThisFunc != nil {
+		return tagWrapFunc(js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+			Target: js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+				Target:  e.TagOrNil,
+				Name:    "call",
+				NameLoc: e.HeadLoc,
+			}},
+			Args: append([]js_ast.Expr{tagThisFunc()}, args...),
+			Kind: js_ast.TargetWasOriginallyPropertyAccess,
+		}})
+	}
+
+	// Call the tag function
+	kind := js_ast.NormalCall
+	if e.TagWasOriginallyPropertyAccess {
+		kind = js_ast.TargetWasOriginallyPropertyAccess
+	}
+	return js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+		Target: e.TagOrNil,
+		Args:   args,
+		Kind:   kind,
+	}}
+}
+
+func couldPotentiallyThrow(data js_ast.E) bool {
+	switch data.(type) {
+	case *js_ast.ENull, *js_ast.EUndefined, *js_ast.EBoolean, *js_ast.ENumber,
+		*js_ast.EBigInt, *js_ast.EString, *js_ast.EFunction, *js_ast.EArrow:
+		return false
+	}
+	return true
+}
+
+func (p *parser) maybeLowerSetBinOp(left js_ast.Expr, op js_ast.OpCode, right js_ast.Expr) js_ast.Expr {
+	if target, loc, private := p.extractPrivateIndex(left); private != nil {
+		return p.lowerPrivateSetBinOp(target, loc, private, op, right)
+	}
+	if property := p.extractSuperProperty(left); property.Data != nil {
+		return p.lowerSuperPropertySetBinOp(left.Loc, property, op, right)
+	}
+	return js_ast.Expr{}
+}
+
+func (p *parser) shouldLowerUsingDeclarations(stmts []js_ast.Stmt) bool {
+	for _, stmt := range stmts {
+		if local, ok := stmt.Data.(*js_ast.SLocal); ok &&
+			((local.Kind == js_ast.LocalUsing && p.options.unsupportedJSFeatures.Has(compat.Using)) ||
+				(local.Kind == js_ast.LocalAwaitUsing && (p.options.unsupportedJSFeatures.Has(compat.Using) ||
+					p.options.unsupportedJSFeatures.Has(compat.AsyncAwait) ||
+					(p.options.unsupportedJSFeatures.Has(compat.AsyncGenerator) && p.fnOrArrowDataVisit.isGenerator)))) {
+			return true
+		}
+	}
+	return false
+}
+
+type lowerUsingDeclarationContext struct {
+	firstUsingLoc logger.Loc
+	stackRef      ast.Ref
+	hasAwaitUsing bool
+}
+
+func (p *parser) lowerUsingDeclarationContext() lowerUsingDeclarationContext {
+	return lowerUsingDeclarationContext{
+		stackRef: p.newSymbol(ast.SymbolOther, "_stack"),
+	}
+}
+
+// If this returns "nil", then no lowering needed to be done
+func (ctx *lowerUsingDeclarationContext) scanStmts(p *parser, stmts []js_ast.Stmt) {
+	for _, stmt := range stmts {
+		if local, ok := stmt.Data.(*js_ast.SLocal); ok && local.Kind.IsUsing() {
+			// Wrap each "using" initializer in a call to the "__using" helper function
+			if ctx.firstUsingLoc.Start == 0 {
+				ctx.firstUsingLoc = stmt.Loc
+			}
+			if local.Kind == js_ast.LocalAwaitUsing {
+				ctx.hasAwaitUsing = true
+			}
+			for i, decl := range local.Decls {
+				if decl.ValueOrNil.Data != nil {
+					valueLoc := decl.ValueOrNil.Loc
+					p.recordUsage(ctx.stackRef)
+					args := []js_ast.Expr{
+						{Loc: valueLoc, Data: &js_ast.EIdentifier{Ref: ctx.stackRef}},
+						decl.ValueOrNil,
+					}
+					if local.Kind == js_ast.LocalAwaitUsing {
+						args = append(args, js_ast.Expr{Loc: valueLoc, Data: &js_ast.EBoolean{Value: true}})
+					}
+					local.Decls[i].ValueOrNil = p.callRuntime(valueLoc, "__using", args)
+				}
+			}
+			if p.willWrapModuleInTryCatchForUsing && p.currentScope.Parent == nil {
+				local.Kind = js_ast.LocalVar
+			} else {
+				local.Kind = p.selectLocalKind(js_ast.LocalConst)
+			}
+		}
+	}
+}
+
+func (ctx *lowerUsingDeclarationContext) finalize(p *parser, stmts []js_ast.Stmt, shouldHoistFunctions bool) []js_ast.Stmt {
+	var result []js_ast.Stmt
+	var exports []js_ast.ClauseItem
+	end := 0
+
+	// Filter out statements that can't go in a try/catch block
+	for _, stmt := range stmts {
+		switch s := stmt.Data.(type) {
+		// Note: We don't need to handle class declarations here because they
+		// should have been already converted into local "var" declarations
+		// before this point. It's done in "lowerClass" instead of here because
+		// "lowerClass" already does this sometimes for other reasons, and it's
+		// more straightforward to do it in one place because it's complicated.
+
+		case *js_ast.SDirective, *js_ast.SImport, *js_ast.SExportFrom, *js_ast.SExportStar:
+			// These can't go in a try/catch block
+			result = append(result, stmt)
+			continue
+
+		case *js_ast.SExportClause:
+			// Merge export clauses together
+			exports = append(exports, s.Items...)
+			continue
+
+		case *js_ast.SFunction:
+			if shouldHoistFunctions {
+				// Hoist function declarations for cross-file ESM references
+				result = append(result, stmt)
+				continue
+			}
+
+		case *js_ast.SExportDefault:
+			if _, ok := s.Value.Data.(*js_ast.SFunction); ok && shouldHoistFunctions {
+				// Hoist function declarations for cross-file ESM references
+				result = append(result, stmt)
+				continue
+			}
+
+		case *js_ast.SLocal:
+			// If any of these are exported, turn it into a "var" and add export clauses
+			if s.IsExport {
+				js_ast.ForEachIdentifierBindingInDecls(s.Decls, func(loc logger.Loc, b *js_ast.BIdentifier) {
+					exports = append(exports, js_ast.ClauseItem{
+						Alias:    p.symbols[b.Ref.InnerIndex].OriginalName,
+						AliasLoc: loc,
+						Name:     ast.LocRef{Loc: loc, Ref: b.Ref},
+					})
+					s.Kind = js_ast.LocalVar
+				})
+				s.IsExport = false
+			}
+		}
+
+		stmts[end] = stmt
+		end++
+	}
+	stmts = stmts[:end]
+
+	// Generate the variables we'll need
+	caughtRef := p.newSymbol(ast.SymbolOther, "_")
+	errorRef := p.newSymbol(ast.SymbolOther, "_error")
+	hasErrorRef := p.newSymbol(ast.SymbolOther, "_hasError")
+
+	// Generated variables are declared with "var", so hoist them up
+	scope := p.currentScope
+	for !scope.Kind.StopsHoisting() {
+		scope = scope.Parent
+	}
+	isTopLevel := scope == p.moduleScope
+	scope.Generated = append(scope.Generated, ctx.stackRef, caughtRef, errorRef, hasErrorRef)
+	p.declaredSymbols = append(p.declaredSymbols,
+		js_ast.DeclaredSymbol{IsTopLevel: isTopLevel, Ref: ctx.stackRef},
+		js_ast.DeclaredSymbol{IsTopLevel: isTopLevel, Ref: caughtRef},
+		js_ast.DeclaredSymbol{IsTopLevel: isTopLevel, Ref: errorRef},
+		js_ast.DeclaredSymbol{IsTopLevel: isTopLevel, Ref: hasErrorRef},
+	)
+
+	// Call the "__callDispose" helper function at the end of the scope
+	loc := ctx.firstUsingLoc
+	p.recordUsage(ctx.stackRef)
+	p.recordUsage(errorRef)
+	p.recordUsage(hasErrorRef)
+	callDispose := p.callRuntime(loc, "__callDispose", []js_ast.Expr{
+		{Loc: loc, Data: &js_ast.EIdentifier{Ref: ctx.stackRef}},
+		{Loc: loc, Data: &js_ast.EIdentifier{Ref: errorRef}},
+		{Loc: loc, Data: &js_ast.EIdentifier{Ref: hasErrorRef}},
+	})
+
+	// If there was an "await using", optionally await the returned promise
+	var finallyStmts []js_ast.Stmt
+	if ctx.hasAwaitUsing {
+		promiseRef := p.generateTempRef(tempRefNoDeclare, "_promise")
+		scope.Generated = append(scope.Generated, promiseRef)
+		p.declaredSymbols = append(p.declaredSymbols, js_ast.DeclaredSymbol{IsTopLevel: isTopLevel, Ref: promiseRef})
+
+		// "await" expressions turn into "yield" expressions when lowering
+		p.recordUsage(promiseRef)
+		awaitExpr := p.maybeLowerAwait(loc, &js_ast.EAwait{Value: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: promiseRef}}})
+
+		p.recordUsage(promiseRef)
+		finallyStmts = []js_ast.Stmt{
+			{Loc: loc, Data: &js_ast.SLocal{Decls: []js_ast.Decl{{
+				Binding:    js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: promiseRef}},
+				ValueOrNil: callDispose,
+			}}}},
+
+			// The "await" must not happen if an error was thrown before the
+			// "await using", so we conditionally await here:
+			//
+			//   var promise = __callDispose(stack, error, hasError);
+			//   promise && await promise;
+			//
+			{Loc: loc, Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: loc, Data: &js_ast.EBinary{
+				Op:    js_ast.BinOpLogicalAnd,
+				Left:  js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: promiseRef}},
+				Right: awaitExpr,
+			}}}},
+		}
+	} else {
+		finallyStmts = []js_ast.Stmt{{Loc: loc, Data: &js_ast.SExpr{Value: callDispose}}}
+	}
+
+	// Wrap everything in a try/catch/finally block
+	p.recordUsage(caughtRef)
+	result = append(result,
+		js_ast.Stmt{Loc: loc, Data: &js_ast.SLocal{
+			Decls: []js_ast.Decl{{
+				Binding:    js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: ctx.stackRef}},
+				ValueOrNil: js_ast.Expr{Loc: loc, Data: &js_ast.EArray{}},
+			}},
+		}},
+		js_ast.Stmt{Loc: loc, Data: &js_ast.STry{
+			Block: js_ast.SBlock{
+				Stmts: stmts,
+			},
+			BlockLoc: loc,
+			Catch: &js_ast.Catch{
+				Loc:          loc,
+				BindingOrNil: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: caughtRef}},
+				Block: js_ast.SBlock{Stmts: []js_ast.Stmt{{Loc: loc, Data: &js_ast.SLocal{
+					Decls: []js_ast.Decl{{
+						Binding:    js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: errorRef}},
+						ValueOrNil: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: caughtRef}},
+					}, {
+						Binding:    js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: hasErrorRef}},
+						ValueOrNil: js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: true}},
+					}},
+				}}}},
+				BlockLoc: loc,
+			},
+			Finally: &js_ast.Finally{
+				Loc:   loc,
+				Block: js_ast.SBlock{Stmts: finallyStmts},
+			},
+		}},
+	)
+	if len(exports) > 0 {
+		result = append(result, js_ast.Stmt{Loc: loc, Data: &js_ast.SExportClause{Items: exports}})
+	}
+	return result
+}
+
+func (p *parser) lowerUsingDeclarationInForOf(loc logger.Loc, init *js_ast.SLocal, body *js_ast.Stmt) {
+	binding := init.Decls[0].Binding
+	id := binding.Data.(*js_ast.BIdentifier)
+	tempRef := p.generateTempRef(tempRefNoDeclare, "_"+p.symbols[id.Ref.InnerIndex].OriginalName)
+	block, ok := body.Data.(*js_ast.SBlock)
+	if !ok {
+		block = &js_ast.SBlock{}
+		if _, ok := body.Data.(*js_ast.SEmpty); !ok {
+			block.Stmts = append(block.Stmts, *body)
+		}
+		body.Data = block
+	}
+	blockStmts := make([]js_ast.Stmt, 0, 1+len(block.Stmts))
+	blockStmts = append(blockStmts, js_ast.Stmt{Loc: loc, Data: &js_ast.SLocal{
+		Kind: init.Kind,
+		Decls: []js_ast.Decl{{
+			Binding:    js_ast.Binding{Loc: binding.Loc, Data: &js_ast.BIdentifier{Ref: id.Ref}},
+			ValueOrNil: js_ast.Expr{Loc: binding.Loc, Data: &js_ast.EIdentifier{Ref: tempRef}},
+		}},
+	}})
+	blockStmts = append(blockStmts, block.Stmts...)
+	ctx := p.lowerUsingDeclarationContext()
+	ctx.scanStmts(p, blockStmts)
+	block.Stmts = ctx.finalize(p, blockStmts, p.willWrapModuleInTryCatchForUsing && p.currentScope.Parent == nil)
+	init.Kind = js_ast.LocalVar
+	id.Ref = tempRef
+}
+
+func (p *parser) maybeLowerUsingDeclarationsInSwitch(loc logger.Loc, s *js_ast.SSwitch) []js_ast.Stmt {
+	// Check for a "using" declaration in any case
+	shouldLower := false
+	for _, c := range s.Cases {
+		if p.shouldLowerUsingDeclarations(c.Body) {
+			shouldLower = true
+			break
+		}
+	}
+	if !shouldLower {
+		return nil
+	}
+
+	// If we find one, lower all cases together
+	ctx := p.lowerUsingDeclarationContext()
+	for _, c := range s.Cases {
+		ctx.scanStmts(p, c.Body)
+	}
+	return ctx.finalize(p, []js_ast.Stmt{{Loc: loc, Data: s}}, false)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower_class.go b/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower_class.go
new file mode 100644
index 0000000..0145836
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_parser/js_parser_lower_class.go
@@ -0,0 +1,2573 @@
+package js_parser
+
+import (
+	"fmt"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+func (p *parser) privateSymbolNeedsToBeLowered(private *js_ast.EPrivateIdentifier) bool {
+	symbol := &p.symbols[private.Ref.InnerIndex]
+	return p.options.unsupportedJSFeatures.Has(compat.SymbolFeature(symbol.Kind)) || symbol.Flags.Has(ast.PrivateSymbolMustBeLowered)
+}
+
+func (p *parser) lowerPrivateBrandCheck(target js_ast.Expr, loc logger.Loc, private *js_ast.EPrivateIdentifier) js_ast.Expr {
+	// "#field in this" => "__privateIn(#field, this)"
+	return p.callRuntime(loc, "__privateIn", []js_ast.Expr{
+		{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+		target,
+	})
+}
+
+func (p *parser) lowerPrivateGet(target js_ast.Expr, loc logger.Loc, private *js_ast.EPrivateIdentifier) js_ast.Expr {
+	switch p.symbols[private.Ref.InnerIndex].Kind {
+	case ast.SymbolPrivateMethod, ast.SymbolPrivateStaticMethod:
+		// "this.#method" => "__privateMethod(this, #method, method_fn)"
+		fnRef := p.privateGetters[private.Ref]
+		p.recordUsage(fnRef)
+		return p.callRuntime(target.Loc, "__privateMethod", []js_ast.Expr{
+			target,
+			{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+			{Loc: loc, Data: &js_ast.EIdentifier{Ref: fnRef}},
+		})
+
+	case ast.SymbolPrivateGet, ast.SymbolPrivateStaticGet,
+		ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
+		// "this.#getter" => "__privateGet(this, #getter, getter_get)"
+		fnRef := p.privateGetters[private.Ref]
+		p.recordUsage(fnRef)
+		return p.callRuntime(target.Loc, "__privateGet", []js_ast.Expr{
+			target,
+			{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+			{Loc: loc, Data: &js_ast.EIdentifier{Ref: fnRef}},
+		})
+
+	default:
+		// "this.#field" => "__privateGet(this, #field)"
+		return p.callRuntime(target.Loc, "__privateGet", []js_ast.Expr{
+			target,
+			{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+		})
+	}
+}
+
+func (p *parser) lowerPrivateSet(
+	target js_ast.Expr,
+	loc logger.Loc,
+	private *js_ast.EPrivateIdentifier,
+	value js_ast.Expr,
+) js_ast.Expr {
+	switch p.symbols[private.Ref.InnerIndex].Kind {
+	case ast.SymbolPrivateSet, ast.SymbolPrivateStaticSet,
+		ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
+		// "this.#setter = 123" => "__privateSet(this, #setter, 123, setter_set)"
+		fnRef := p.privateSetters[private.Ref]
+		p.recordUsage(fnRef)
+		return p.callRuntime(target.Loc, "__privateSet", []js_ast.Expr{
+			target,
+			{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+			value,
+			{Loc: loc, Data: &js_ast.EIdentifier{Ref: fnRef}},
+		})
+
+	default:
+		// "this.#field = 123" => "__privateSet(this, #field, 123)"
+		return p.callRuntime(target.Loc, "__privateSet", []js_ast.Expr{
+			target,
+			{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+			value,
+		})
+	}
+}
+
+func (p *parser) lowerPrivateSetUnOp(target js_ast.Expr, loc logger.Loc, private *js_ast.EPrivateIdentifier, op js_ast.OpCode) js_ast.Expr {
+	kind := p.symbols[private.Ref.InnerIndex].Kind
+
+	// Determine the setter, if any
+	var setter js_ast.Expr
+	switch kind {
+	case ast.SymbolPrivateSet, ast.SymbolPrivateStaticSet,
+		ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
+		ref := p.privateSetters[private.Ref]
+		p.recordUsage(ref)
+		setter = js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+	}
+
+	// Determine the getter, if any
+	var getter js_ast.Expr
+	switch kind {
+	case ast.SymbolPrivateGet, ast.SymbolPrivateStaticGet,
+		ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
+		ref := p.privateGetters[private.Ref]
+		p.recordUsage(ref)
+		getter = js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+	}
+
+	// Only include necessary arguments
+	args := []js_ast.Expr{
+		target,
+		{Loc: loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+	}
+	if setter.Data != nil {
+		args = append(args, setter)
+	}
+	if getter.Data != nil {
+		if setter.Data == nil {
+			args = append(args, js_ast.Expr{Loc: loc, Data: js_ast.ENullShared})
+		}
+		args = append(args, getter)
+	}
+
+	// "target.#private++" => "__privateWrapper(target, #private, private_set, private_get)._++"
+	return js_ast.Expr{Loc: loc, Data: &js_ast.EUnary{
+		Op: op,
+		Value: js_ast.Expr{Loc: target.Loc, Data: &js_ast.EDot{
+			Target:  p.callRuntime(target.Loc, "__privateWrapper", args),
+			NameLoc: target.Loc,
+			Name:    "_",
+		}},
+	}}
+}
+
+func (p *parser) lowerPrivateSetBinOp(target js_ast.Expr, loc logger.Loc, private *js_ast.EPrivateIdentifier, op js_ast.OpCode, value js_ast.Expr) js_ast.Expr {
+	// "target.#private += 123" => "__privateSet(target, #private, __privateGet(target, #private) + 123)"
+	targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(target.Loc, 2, target, valueDefinitelyNotMutated)
+	return targetWrapFunc(p.lowerPrivateSet(targetFunc(), loc, private, js_ast.Expr{Loc: value.Loc, Data: &js_ast.EBinary{
+		Op:    op,
+		Left:  p.lowerPrivateGet(targetFunc(), loc, private),
+		Right: value,
+	}}))
+}
+
+// Returns valid data if target is an expression of the form "foo.#bar" and if
+// the language target is such that private members must be lowered
+func (p *parser) extractPrivateIndex(target js_ast.Expr) (js_ast.Expr, logger.Loc, *js_ast.EPrivateIdentifier) {
+	if index, ok := target.Data.(*js_ast.EIndex); ok {
+		if private, ok := index.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
+			return index.Target, index.Index.Loc, private
+		}
+	}
+	return js_ast.Expr{}, logger.Loc{}, nil
+}
+
+// Returns a valid property if target is an expression of the form "super.bar"
+// or "super[bar]" and if the situation is such that it must be lowered
+func (p *parser) extractSuperProperty(target js_ast.Expr) js_ast.Expr {
+	switch e := target.Data.(type) {
+	case *js_ast.EDot:
+		if p.shouldLowerSuperPropertyAccess(e.Target) {
+			return js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
+		}
+	case *js_ast.EIndex:
+		if p.shouldLowerSuperPropertyAccess(e.Target) {
+			return e.Index
+		}
+	}
+	return js_ast.Expr{}
+}
+
+func (p *parser) lowerSuperPropertyOrPrivateInAssign(expr js_ast.Expr) (js_ast.Expr, bool) {
+	didLower := false
+
+	switch e := expr.Data.(type) {
+	case *js_ast.ESpread:
+		if value, ok := p.lowerSuperPropertyOrPrivateInAssign(e.Value); ok {
+			e.Value = value
+			didLower = true
+		}
+
+	case *js_ast.EDot:
+		// "[super.foo] = [bar]" => "[__superWrapper(this, 'foo')._] = [bar]"
+		if p.shouldLowerSuperPropertyAccess(e.Target) {
+			key := js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
+			expr = p.callSuperPropertyWrapper(expr.Loc, key)
+			didLower = true
+		}
+
+	case *js_ast.EIndex:
+		// "[super[foo]] = [bar]" => "[__superWrapper(this, foo)._] = [bar]"
+		if p.shouldLowerSuperPropertyAccess(e.Target) {
+			expr = p.callSuperPropertyWrapper(expr.Loc, e.Index)
+			didLower = true
+			break
+		}
+
+		// "[a.#b] = [c]" => "[__privateWrapper(a, #b)._] = [c]"
+		if private, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok && p.privateSymbolNeedsToBeLowered(private) {
+			var target js_ast.Expr
+
+			switch p.symbols[private.Ref.InnerIndex].Kind {
+			case ast.SymbolPrivateSet, ast.SymbolPrivateStaticSet,
+				ast.SymbolPrivateGetSetPair, ast.SymbolPrivateStaticGetSetPair:
+				// "this.#setter" => "__privateWrapper(this, #setter, setter_set)"
+				fnRef := p.privateSetters[private.Ref]
+				p.recordUsage(fnRef)
+				target = p.callRuntime(expr.Loc, "__privateWrapper", []js_ast.Expr{
+					e.Target,
+					{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+					{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: fnRef}},
+				})
+
+			default:
+				// "this.#field" => "__privateWrapper(this, #field)"
+				target = p.callRuntime(expr.Loc, "__privateWrapper", []js_ast.Expr{
+					e.Target,
+					{Loc: expr.Loc, Data: &js_ast.EIdentifier{Ref: private.Ref}},
+				})
+			}
+
+			// "__privateWrapper(this, #field)" => "__privateWrapper(this, #field)._"
+			expr.Data = &js_ast.EDot{Target: target, Name: "_", NameLoc: expr.Loc}
+			didLower = true
+		}
+
+	case *js_ast.EArray:
+		for i, item := range e.Items {
+			if item, ok := p.lowerSuperPropertyOrPrivateInAssign(item); ok {
+				e.Items[i] = item
+				didLower = true
+			}
+		}
+
+	case *js_ast.EObject:
+		for i, property := range e.Properties {
+			if property.ValueOrNil.Data != nil {
+				if value, ok := p.lowerSuperPropertyOrPrivateInAssign(property.ValueOrNil); ok {
+					e.Properties[i].ValueOrNil = value
+					didLower = true
+				}
+			}
+		}
+	}
+
+	return expr, didLower
+}
+
+func (p *parser) shouldLowerSuperPropertyAccess(expr js_ast.Expr) bool {
+	if p.fnOrArrowDataVisit.shouldLowerSuperPropertyAccess {
+		_, isSuper := expr.Data.(*js_ast.ESuper)
+		return isSuper
+	}
+	return false
+}
+
+func (p *parser) callSuperPropertyWrapper(loc logger.Loc, key js_ast.Expr) js_ast.Expr {
+	ref := *p.fnOnlyDataVisit.innerClassNameRef
+	p.recordUsage(ref)
+	class := js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+	this := js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+
+	// Handle "this" in lowered static class field initializers
+	if p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef {
+		p.recordUsage(ref)
+		this.Data = &js_ast.EIdentifier{Ref: ref}
+	}
+
+	if !p.fnOnlyDataVisit.isInStaticClassContext {
+		// "super.foo" => "__superWrapper(Class.prototype, this, 'foo')._"
+		// "super[foo]" => "__superWrapper(Class.prototype, this, foo)._"
+		class.Data = &js_ast.EDot{Target: class, NameLoc: loc, Name: "prototype"}
+	}
+
+	return js_ast.Expr{Loc: loc, Data: &js_ast.EDot{Target: p.callRuntime(loc, "__superWrapper", []js_ast.Expr{
+		class,
+		this,
+		key,
+	}), Name: "_", NameLoc: loc}}
+}
+
+func (p *parser) lowerSuperPropertyGet(loc logger.Loc, key js_ast.Expr) js_ast.Expr {
+	ref := *p.fnOnlyDataVisit.innerClassNameRef
+	p.recordUsage(ref)
+	class := js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+	this := js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+
+	// Handle "this" in lowered static class field initializers
+	if p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef {
+		p.recordUsage(ref)
+		this.Data = &js_ast.EIdentifier{Ref: ref}
+	}
+
+	if !p.fnOnlyDataVisit.isInStaticClassContext {
+		// "super.foo" => "__superGet(Class.prototype, this, 'foo')"
+		// "super[foo]" => "__superGet(Class.prototype, this, foo)"
+		class.Data = &js_ast.EDot{Target: class, NameLoc: loc, Name: "prototype"}
+	}
+
+	return p.callRuntime(loc, "__superGet", []js_ast.Expr{
+		class,
+		this,
+		key,
+	})
+}
+
+func (p *parser) lowerSuperPropertySet(loc logger.Loc, key js_ast.Expr, value js_ast.Expr) js_ast.Expr {
+	// "super.foo = bar" => "__superSet(Class, this, 'foo', bar)"
+	// "super[foo] = bar" => "__superSet(Class, this, foo, bar)"
+	ref := *p.fnOnlyDataVisit.innerClassNameRef
+	p.recordUsage(ref)
+	class := js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+	this := js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+
+	// Handle "this" in lowered static class field initializers
+	if p.fnOnlyDataVisit.shouldReplaceThisWithInnerClassNameRef {
+		p.recordUsage(ref)
+		this.Data = &js_ast.EIdentifier{Ref: ref}
+	}
+
+	if !p.fnOnlyDataVisit.isInStaticClassContext {
+		// "super.foo = bar" => "__superSet(Class.prototype, this, 'foo', bar)"
+		// "super[foo] = bar" => "__superSet(Class.prototype, this, foo, bar)"
+		class.Data = &js_ast.EDot{Target: class, NameLoc: loc, Name: "prototype"}
+	}
+
+	return p.callRuntime(loc, "__superSet", []js_ast.Expr{
+		class,
+		this,
+		key,
+		value,
+	})
+}
+
+func (p *parser) lowerSuperPropertySetBinOp(loc logger.Loc, property js_ast.Expr, op js_ast.OpCode, value js_ast.Expr) js_ast.Expr {
+	// "super.foo += bar" => "__superSet(Class, this, 'foo', __superGet(Class, this, 'foo') + bar)"
+	// "super[foo] += bar" => "__superSet(Class, this, foo, __superGet(Class, this, foo) + bar)"
+	// "super[foo()] += bar" => "__superSet(Class, this, _a = foo(), __superGet(Class, this, _a) + bar)"
+	targetFunc, targetWrapFunc := p.captureValueWithPossibleSideEffects(property.Loc, 2, property, valueDefinitelyNotMutated)
+	return targetWrapFunc(p.lowerSuperPropertySet(loc, targetFunc(), js_ast.Expr{Loc: value.Loc, Data: &js_ast.EBinary{
+		Op:    op,
+		Left:  p.lowerSuperPropertyGet(loc, targetFunc()),
+		Right: value,
+	}}))
+}
+
+func (p *parser) maybeLowerSuperPropertyGetInsideCall(call *js_ast.ECall) {
+	var key js_ast.Expr
+
+	switch e := call.Target.Data.(type) {
+	case *js_ast.EDot:
+		// Lower "super.prop" if necessary
+		if !p.shouldLowerSuperPropertyAccess(e.Target) {
+			return
+		}
+		key = js_ast.Expr{Loc: e.NameLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(e.Name)}}
+
+	case *js_ast.EIndex:
+		// Lower "super[prop]" if necessary
+		if !p.shouldLowerSuperPropertyAccess(e.Target) {
+			return
+		}
+		key = e.Index
+
+	default:
+		return
+	}
+
+	// "super.foo(a, b)" => "__superGet(Class, this, 'foo').call(this, a, b)"
+	call.Target.Data = &js_ast.EDot{
+		Target:  p.lowerSuperPropertyGet(call.Target.Loc, key),
+		NameLoc: key.Loc,
+		Name:    "call",
+	}
+	thisExpr := js_ast.Expr{Loc: call.Target.Loc, Data: js_ast.EThisShared}
+	call.Args = append([]js_ast.Expr{thisExpr}, call.Args...)
+}
+
+type classLoweringInfo struct {
+	lowerAllInstanceFields bool
+	lowerAllStaticFields   bool
+	shimSuperCtorCalls     bool
+}
+
+func (p *parser) computeClassLoweringInfo(class *js_ast.Class) (result classLoweringInfo) {
+	// Name keeping for classes is implemented with a static block. So we need to
+	// lower all static fields if static blocks are unsupported so that the name
+	// keeping comes first before other static initializers.
+	if p.options.keepNames && p.options.unsupportedJSFeatures.Has(compat.ClassStaticBlocks) {
+		result.lowerAllStaticFields = true
+	}
+
+	// TypeScript's "experimentalDecorators" feature replaces all references of
+	// the class name with the decorated class after class decorators have run.
+	// This cannot be done by only reassigning to the class symbol in JavaScript
+	// because it's shadowed by the class name within the class body. Instead,
+	// we need to hoist all code in static contexts out of the class body so
+	// that it's no longer shadowed:
+	//
+	//   const decorate = x => ({ x })
+	//   @decorate
+	//   class Foo {
+	//     static oldFoo = Foo
+	//     static newFoo = () => Foo
+	//   }
+	//   console.log('This must be false:', Foo.x.oldFoo === Foo.x.newFoo())
+	//
+	if p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators == config.True && len(class.Decorators) > 0 {
+		result.lowerAllStaticFields = true
+	}
+
+	// If something has decorators, just lower everything for now. It's possible
+	// that we could avoid lowering in certain cases, but doing so is very tricky
+	// due to the complexity of the decorator specification. The specification is
+	// also still evolving so trying to optimize it now is also potentially
+	// premature.
+	if class.ShouldLowerStandardDecorators {
+		for _, prop := range class.Properties {
+			if len(prop.Decorators) > 0 {
+				for _, prop := range class.Properties {
+					if private, ok := prop.Key.Data.(*js_ast.EPrivateIdentifier); ok {
+						p.symbols[private.Ref.InnerIndex].Flags |= ast.PrivateSymbolMustBeLowered
+					}
+				}
+				result.lowerAllStaticFields = true
+				result.lowerAllInstanceFields = true
+				break
+			}
+		}
+	}
+
+	// Conservatively lower fields of a given type (instance or static) when any
+	// member of that type needs to be lowered. This must be done to preserve
+	// evaluation order. For example:
+	//
+	//   class Foo {
+	//     #foo = 123
+	//     bar = this.#foo
+	//   }
+	//
+	// It would be bad if we transformed that into something like this:
+	//
+	//   var _foo;
+	//   class Foo {
+	//     constructor() {
+	//       _foo.set(this, 123);
+	//     }
+	//     bar = __privateGet(this, _foo);
+	//   }
+	//   _foo = new WeakMap();
+	//
+	// That evaluates "bar" then "foo" instead of "foo" then "bar" like the
+	// original code. We need to do this instead:
+	//
+	//   var _foo;
+	//   class Foo {
+	//     constructor() {
+	//       _foo.set(this, 123);
+	//       __publicField(this, "bar", __privateGet(this, _foo));
+	//     }
+	//   }
+	//   _foo = new WeakMap();
+	//
+	for _, prop := range class.Properties {
+		if prop.Kind == js_ast.PropertyClassStaticBlock {
+			if p.options.unsupportedJSFeatures.Has(compat.ClassStaticBlocks) {
+				result.lowerAllStaticFields = true
+			}
+			continue
+		}
+
+		if private, ok := prop.Key.Data.(*js_ast.EPrivateIdentifier); ok {
+			if prop.Flags.Has(js_ast.PropertyIsStatic) {
+				if p.privateSymbolNeedsToBeLowered(private) {
+					result.lowerAllStaticFields = true
+				}
+			} else {
+				if p.privateSymbolNeedsToBeLowered(private) {
+					result.lowerAllInstanceFields = true
+
+					// We can't transform this:
+					//
+					//   class Foo {
+					//     #foo = 123
+					//     static bar = new Foo().#foo
+					//   }
+					//
+					// into this:
+					//
+					//   var _foo;
+					//   const _Foo = class {
+					//     constructor() {
+					//       _foo.set(this, 123);
+					//     }
+					//     static bar = __privateGet(new _Foo(), _foo);
+					//   };
+					//   let Foo = _Foo;
+					//   _foo = new WeakMap();
+					//
+					// because "_Foo" won't be initialized in the initializer for "bar".
+					// So we currently lower all static fields in this case too. This
+					// isn't great and it would be good to find a way to avoid this.
+					// The inner class name symbol substitution mechanism should probably
+					// be rethought.
+					result.lowerAllStaticFields = true
+				}
+			}
+			continue
+		}
+
+		if prop.Kind == js_ast.PropertyAutoAccessor {
+			if prop.Flags.Has(js_ast.PropertyIsStatic) {
+				if p.options.unsupportedJSFeatures.Has(compat.ClassPrivateStaticField) {
+					result.lowerAllStaticFields = true
+				}
+			} else {
+				if p.options.unsupportedJSFeatures.Has(compat.ClassPrivateField) {
+					result.lowerAllInstanceFields = true
+					result.lowerAllStaticFields = true
+				}
+			}
+			continue
+		}
+
+		// This doesn't come before the private member check above because
+		// unsupported private methods must also trigger field lowering:
+		//
+		//   class Foo {
+		//     bar = this.#foo()
+		//     #foo() {}
+		//   }
+		//
+		// It would be bad if we transformed that to something like this:
+		//
+		//   var _foo, foo_fn;
+		//   class Foo {
+		//     constructor() {
+		//       _foo.add(this);
+		//     }
+		//     bar = __privateMethod(this, _foo, foo_fn).call(this);
+		//   }
+		//   _foo = new WeakSet();
+		//   foo_fn = function() {
+		//   };
+		//
+		// In that case the initializer of "bar" would fail to call "#foo" because
+		// it's only added to the instance in the body of the constructor.
+		if prop.Kind.IsMethodDefinition() {
+			// We need to shim "super()" inside the constructor if this is a derived
+			// class and the constructor has any parameter properties, since those
+			// use "this" and we can only access "this" after "super()" is called
+			if class.ExtendsOrNil.Data != nil {
+				if key, ok := prop.Key.Data.(*js_ast.EString); ok && helpers.UTF16EqualsString(key.Value, "constructor") {
+					if fn, ok := prop.ValueOrNil.Data.(*js_ast.EFunction); ok {
+						for _, arg := range fn.Fn.Args {
+							if arg.IsTypeScriptCtorField {
+								result.shimSuperCtorCalls = true
+								break
+							}
+						}
+					}
+				}
+			}
+			continue
+		}
+
+		if prop.Flags.Has(js_ast.PropertyIsStatic) {
+			// Static fields must be lowered if the target doesn't support them
+			if p.options.unsupportedJSFeatures.Has(compat.ClassStaticField) {
+				result.lowerAllStaticFields = true
+			}
+
+			// Convert static fields to assignment statements if the TypeScript
+			// setting for this is enabled. I don't think this matters for private
+			// fields because there's no way for this to call a setter in the base
+			// class, so this isn't done for private fields.
+			//
+			// If class static blocks are supported, then we can do this inline
+			// without needing to move the initializers outside of the class body.
+			// Otherwise, we need to lower all static class fields.
+			if p.options.ts.Parse && !class.UseDefineForClassFields && p.options.unsupportedJSFeatures.Has(compat.ClassStaticBlocks) {
+				result.lowerAllStaticFields = true
+			}
+		} else {
+			if p.options.ts.Parse && !class.UseDefineForClassFields {
+				// Convert instance fields to assignment statements if the TypeScript
+				// setting for this is enabled. I don't think this matters for private
+				// fields because there's no way for this to call a setter in the base
+				// class, so this isn't done for private fields.
+				if prop.InitializerOrNil.Data != nil {
+					// We can skip lowering all instance fields if all instance fields
+					// disappear completely when lowered. This happens when
+					// "useDefineForClassFields" is false and there is no initializer.
+					result.lowerAllInstanceFields = true
+				}
+			} else if p.options.unsupportedJSFeatures.Has(compat.ClassField) {
+				// Instance fields must be lowered if the target doesn't support them
+				result.lowerAllInstanceFields = true
+			}
+		}
+	}
+
+	// We need to shim "super()" inside the constructor if this is a derived
+	// class and there are any instance fields that need to be lowered, since
+	// those use "this" and we can only access "this" after "super()" is called
+	if result.lowerAllInstanceFields && class.ExtendsOrNil.Data != nil {
+		result.shimSuperCtorCalls = true
+	}
+
+	return
+}
+
+type classKind uint8
+
+const (
+	classKindExpr classKind = iota
+	classKindStmt
+	classKindExportStmt
+	classKindExportDefaultStmt
+)
+
+type lowerClassContext struct {
+	nameToKeep  string
+	kind        classKind
+	class       *js_ast.Class
+	classLoc    logger.Loc
+	classExpr   js_ast.Expr // Only for "kind == classKindExpr", may be replaced by "nameFunc()"
+	defaultName ast.LocRef
+
+	ctor                   *js_ast.EFunction
+	extendsRef             ast.Ref
+	parameterFields        []js_ast.Stmt
+	instanceMembers        []js_ast.Stmt
+	instancePrivateMethods []js_ast.Stmt
+	autoAccessorCount      int
+
+	// These expressions are generated after the class body, in this order
+	computedPropertyChain js_ast.Expr
+	privateMembers        []js_ast.Expr
+	staticMembers         []js_ast.Expr
+	staticPrivateMethods  []js_ast.Expr
+
+	// These contain calls to "__decorateClass" for TypeScript experimental decorators
+	instanceExperimentalDecorators []js_ast.Expr
+	staticExperimentalDecorators   []js_ast.Expr
+
+	// These are used for implementing JavaScript decorators
+	decoratorContextRef                          ast.Ref
+	decoratorClassDecorators                     js_ast.Expr
+	decoratorPropertyToInitializerMap            map[int]int
+	decoratorCallInstanceMethodExtraInitializers bool
+	decoratorCallStaticMethodExtraInitializers   bool
+	decoratorStaticNonFieldElements              []js_ast.Expr
+	decoratorInstanceNonFieldElements            []js_ast.Expr
+	decoratorStaticFieldElements                 []js_ast.Expr
+	decoratorInstanceFieldElements               []js_ast.Expr
+
+	// These are used by "lowerMethod"
+	privateInstanceMethodRef ast.Ref
+	privateStaticMethodRef   ast.Ref
+
+	// These are only for class expressions that need to be captured
+	nameFunc            func() js_ast.Expr
+	wrapFunc            func(js_ast.Expr) js_ast.Expr
+	didCaptureClassExpr bool
+}
+
+// Apply all relevant transforms to a class object (either a statement or an
+// expression) including:
+//
+//   - Transforming class fields for older environments
+//   - Transforming static blocks for older environments
+//   - Transforming TypeScript experimental decorators into JavaScript
+//   - Transforming TypeScript class fields into assignments for "useDefineForClassFields"
+//
+// Note that this doesn't transform any nested AST subtrees inside the class
+// body (e.g. the contents of initializers, methods, and static blocks). Those
+// have already been transformed by "visitClass" by this point. It's done that
+// way for performance so that we don't need to do another AST pass.
+func (p *parser) lowerClass(stmt js_ast.Stmt, expr js_ast.Expr, result visitClassResult, nameToKeep string) ([]js_ast.Stmt, js_ast.Expr) {
+	ctx := lowerClassContext{
+		nameToKeep:               nameToKeep,
+		extendsRef:               ast.InvalidRef,
+		decoratorContextRef:      ast.InvalidRef,
+		privateInstanceMethodRef: ast.InvalidRef,
+		privateStaticMethodRef:   ast.InvalidRef,
+	}
+
+	// Unpack the class from the statement or expression
+	if stmt.Data == nil {
+		e, _ := expr.Data.(*js_ast.EClass)
+		ctx.class = &e.Class
+		ctx.classExpr = expr
+		ctx.kind = classKindExpr
+		if ctx.class.Name != nil {
+			symbol := &p.symbols[ctx.class.Name.Ref.InnerIndex]
+			ctx.nameToKeep = symbol.OriginalName
+
+			// The inner class name inside the class expression should be the same as
+			// the class expression name itself
+			if result.innerClassNameRef != ast.InvalidRef {
+				p.mergeSymbols(result.innerClassNameRef, ctx.class.Name.Ref)
+			}
+
+			// Remove unused class names when minifying. Check this after we merge in
+			// the inner class name above since that will adjust the use count.
+			if p.options.minifySyntax && symbol.UseCountEstimate == 0 {
+				ctx.class.Name = nil
+			}
+		}
+	} else if s, ok := stmt.Data.(*js_ast.SClass); ok {
+		ctx.class = &s.Class
+		if ctx.class.Name != nil {
+			ctx.nameToKeep = p.symbols[ctx.class.Name.Ref.InnerIndex].OriginalName
+		}
+		if s.IsExport {
+			ctx.kind = classKindExportStmt
+		} else {
+			ctx.kind = classKindStmt
+		}
+	} else {
+		s, _ := stmt.Data.(*js_ast.SExportDefault)
+		s2, _ := s.Value.Data.(*js_ast.SClass)
+		ctx.class = &s2.Class
+		if ctx.class.Name != nil {
+			ctx.nameToKeep = p.symbols[ctx.class.Name.Ref.InnerIndex].OriginalName
+		}
+		ctx.defaultName = s.DefaultName
+		ctx.kind = classKindExportDefaultStmt
+	}
+	if stmt.Data == nil {
+		ctx.classLoc = expr.Loc
+	} else {
+		ctx.classLoc = stmt.Loc
+	}
+
+	classLoweringInfo := p.computeClassLoweringInfo(ctx.class)
+	ctx.enableNameCapture(p, result)
+	ctx.processProperties(p, classLoweringInfo, result)
+	ctx.insertInitializersIntoConstructor(p, classLoweringInfo, result)
+	return ctx.finishAndGenerateCode(p, result)
+}
+
+func (ctx *lowerClassContext) enableNameCapture(p *parser, result visitClassResult) {
+	// Class statements can be missing a name if they are in an
+	// "export default" statement:
+	//
+	//   export default class {
+	//     static foo = 123
+	//   }
+	//
+	ctx.nameFunc = func() js_ast.Expr {
+		if ctx.kind == classKindExpr {
+			// If this is a class expression, capture and store it. We have to
+			// do this even if it has a name since the name isn't exposed
+			// outside the class body.
+			classExpr := &js_ast.EClass{Class: *ctx.class}
+			ctx.class = &classExpr.Class
+			ctx.nameFunc, ctx.wrapFunc = p.captureValueWithPossibleSideEffects(ctx.classLoc, 2, js_ast.Expr{Loc: ctx.classLoc, Data: classExpr}, valueDefinitelyNotMutated)
+			ctx.classExpr = ctx.nameFunc()
+			ctx.didCaptureClassExpr = true
+			name := ctx.nameFunc()
+
+			// If we're storing the class expression in a variable, remove the class
+			// name and rewrite all references to the class name with references to
+			// the temporary variable holding the class expression. This ensures that
+			// references to the class expression by name in any expressions that end
+			// up being pulled outside of the class body still work. For example:
+			//
+			//   let Bar = class Foo {
+			//     static foo = 123
+			//     static bar = Foo.foo
+			//   }
+			//
+			// This might be converted into the following:
+			//
+			//   var _a;
+			//   let Bar = (_a = class {
+			//   }, _a.foo = 123, _a.bar = _a.foo, _a);
+			//
+			if ctx.class.Name != nil {
+				p.mergeSymbols(ctx.class.Name.Ref, name.Data.(*js_ast.EIdentifier).Ref)
+				ctx.class.Name = nil
+			}
+
+			return name
+		} else {
+			// If anything referenced the inner class name, then we should use that
+			// name for any automatically-generated initialization code, since it
+			// will come before the outer class name is initialized.
+			if result.innerClassNameRef != ast.InvalidRef {
+				p.recordUsage(result.innerClassNameRef)
+				return js_ast.Expr{Loc: ctx.class.Name.Loc, Data: &js_ast.EIdentifier{Ref: result.innerClassNameRef}}
+			}
+
+			// Otherwise we should just use the outer class name
+			if ctx.class.Name == nil {
+				if ctx.kind == classKindExportDefaultStmt {
+					ctx.class.Name = &ctx.defaultName
+				} else {
+					ctx.class.Name = &ast.LocRef{Loc: ctx.classLoc, Ref: p.generateTempRef(tempRefNoDeclare, "")}
+				}
+			}
+			p.recordUsage(ctx.class.Name.Ref)
+			return js_ast.Expr{Loc: ctx.class.Name.Loc, Data: &js_ast.EIdentifier{Ref: ctx.class.Name.Ref}}
+		}
+	}
+}
+
+// Handle lowering of instance and static fields. Move their initializers
+// from the class body to either the constructor (instance fields) or after
+// the class (static fields).
+//
+// If this returns true, the return property should be added to the class
+// body. Otherwise the property should be omitted from the class body.
+func (ctx *lowerClassContext) lowerField(
+	p *parser,
+	prop js_ast.Property,
+	private *js_ast.EPrivateIdentifier,
+	shouldOmitFieldInitializer bool,
+	staticFieldToBlockAssign bool,
+	initializerIndex int,
+) (js_ast.Property, ast.Ref, bool) {
+	mustLowerPrivate := private != nil && p.privateSymbolNeedsToBeLowered(private)
+	ref := ast.InvalidRef
+
+	// The TypeScript compiler doesn't follow the JavaScript spec for
+	// uninitialized fields. They are supposed to be set to undefined but the
+	// TypeScript compiler just omits them entirely.
+	if !shouldOmitFieldInitializer {
+		loc := prop.Loc
+
+		// Determine where to store the field
+		var target js_ast.Expr
+		if prop.Flags.Has(js_ast.PropertyIsStatic) && !staticFieldToBlockAssign {
+			target = ctx.nameFunc()
+		} else {
+			target = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+		}
+
+		// Generate the assignment initializer
+		var init js_ast.Expr
+		if prop.InitializerOrNil.Data != nil {
+			init = prop.InitializerOrNil
+		} else {
+			init = js_ast.Expr{Loc: loc, Data: js_ast.EUndefinedShared}
+		}
+
+		// Optionally call registered decorator initializers
+		if initializerIndex != -1 {
+			var value js_ast.Expr
+			if prop.Flags.Has(js_ast.PropertyIsStatic) {
+				value = ctx.nameFunc()
+			} else {
+				value = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+			}
+			args := []js_ast.Expr{
+				{Loc: loc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+				{Loc: loc, Data: &js_ast.ENumber{Value: float64((4 + 2*initializerIndex) << 1)}},
+				value,
+			}
+			if _, ok := init.Data.(*js_ast.EUndefined); !ok {
+				args = append(args, init)
+			}
+			init = p.callRuntime(init.Loc, "__runInitializers", args)
+			p.recordUsage(ctx.decoratorContextRef)
+		}
+
+		// Generate the assignment target
+		var memberExpr js_ast.Expr
+		if mustLowerPrivate {
+			// Generate a new symbol for this private field
+			ref = p.generateTempRef(tempRefNeedsDeclare, "_"+p.symbols[private.Ref.InnerIndex].OriginalName[1:])
+			p.symbols[private.Ref.InnerIndex].Link = ref
+
+			// Initialize the private field to a new WeakMap
+			if p.weakMapRef == ast.InvalidRef {
+				p.weakMapRef = p.newSymbol(ast.SymbolUnbound, "WeakMap")
+				p.moduleScope.Generated = append(p.moduleScope.Generated, p.weakMapRef)
+			}
+			ctx.privateMembers = append(ctx.privateMembers, js_ast.Assign(
+				js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}},
+				js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.ENew{Target: js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: p.weakMapRef}}}},
+			))
+			p.recordUsage(ref)
+
+			// Add every newly-constructed instance into this map
+			key := js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
+			args := []js_ast.Expr{target, key}
+			if _, ok := init.Data.(*js_ast.EUndefined); !ok {
+				args = append(args, init)
+			}
+			memberExpr = p.callRuntime(loc, "__privateAdd", args)
+			p.recordUsage(ref)
+		} else if private == nil && ctx.class.UseDefineForClassFields {
+			args := []js_ast.Expr{target, prop.Key}
+			if _, ok := init.Data.(*js_ast.EUndefined); !ok {
+				args = append(args, init)
+			}
+			memberExpr = js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+				Target: p.importFromRuntime(loc, "__publicField"),
+				Args:   args,
+			}}
+		} else {
+			if key, ok := prop.Key.Data.(*js_ast.EString); ok && !prop.Flags.Has(js_ast.PropertyIsComputed) && !prop.Flags.Has(js_ast.PropertyPreferQuotedKey) {
+				target = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{
+					Target:  target,
+					Name:    helpers.UTF16ToString(key.Value),
+					NameLoc: prop.Key.Loc,
+				}}
+			} else {
+				target = js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
+					Target: target,
+					Index:  prop.Key,
+				}}
+			}
+
+			memberExpr = js_ast.Assign(target, init)
+		}
+
+		// Run extra initializers
+		if initializerIndex != -1 {
+			var value js_ast.Expr
+			if prop.Flags.Has(js_ast.PropertyIsStatic) {
+				value = ctx.nameFunc()
+			} else {
+				value = js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}
+			}
+			memberExpr = js_ast.JoinWithComma(memberExpr, p.callRuntime(loc, "__runInitializers", []js_ast.Expr{
+				{Loc: loc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+				{Loc: loc, Data: &js_ast.ENumber{Value: float64(((5 + 2*initializerIndex) << 1) | 1)}},
+				value,
+			}))
+			p.recordUsage(ctx.decoratorContextRef)
+		}
+
+		if prop.Flags.Has(js_ast.PropertyIsStatic) {
+			// Move this property to an assignment after the class ends
+			if staticFieldToBlockAssign {
+				// Use inline assignment in a static block instead of lowering
+				return js_ast.Property{
+					Loc:  loc,
+					Kind: js_ast.PropertyClassStaticBlock,
+					ClassStaticBlock: &js_ast.ClassStaticBlock{
+						Loc: loc,
+						Block: js_ast.SBlock{Stmts: []js_ast.Stmt{
+							{Loc: loc, Data: &js_ast.SExpr{Value: memberExpr}}},
+						},
+					},
+				}, ref, true
+			} else {
+				// Move this property to an assignment after the class ends
+				ctx.staticMembers = append(ctx.staticMembers, memberExpr)
+			}
+		} else {
+			// Move this property to an assignment inside the class constructor
+			ctx.instanceMembers = append(ctx.instanceMembers, js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: memberExpr}})
+		}
+	}
+
+	if private == nil || mustLowerPrivate {
+		// Remove the field from the class body
+		return js_ast.Property{}, ref, false
+	}
+
+	// Keep the private field but remove the initializer
+	prop.InitializerOrNil = js_ast.Expr{}
+	return prop, ref, true
+}
+
+func (ctx *lowerClassContext) lowerPrivateMethod(p *parser, prop js_ast.Property, private *js_ast.EPrivateIdentifier) {
+	// All private methods can share the same WeakSet
+	var ref *ast.Ref
+	if prop.Flags.Has(js_ast.PropertyIsStatic) {
+		ref = &ctx.privateStaticMethodRef
+	} else {
+		ref = &ctx.privateInstanceMethodRef
+	}
+	if *ref == ast.InvalidRef {
+		// Generate a new symbol to store the WeakSet
+		var name string
+		if prop.Flags.Has(js_ast.PropertyIsStatic) {
+			name = "_static"
+		} else {
+			name = "_instances"
+		}
+		if ctx.nameToKeep != "" {
+			name = fmt.Sprintf("_%s%s", ctx.nameToKeep, name)
+		}
+		*ref = p.generateTempRef(tempRefNeedsDeclare, name)
+
+		// Generate the initializer
+		if p.weakSetRef == ast.InvalidRef {
+			p.weakSetRef = p.newSymbol(ast.SymbolUnbound, "WeakSet")
+			p.moduleScope.Generated = append(p.moduleScope.Generated, p.weakSetRef)
+		}
+		ctx.privateMembers = append(ctx.privateMembers, js_ast.Assign(
+			js_ast.Expr{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: *ref}},
+			js_ast.Expr{Loc: ctx.classLoc, Data: &js_ast.ENew{Target: js_ast.Expr{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: p.weakSetRef}}}},
+		))
+		p.recordUsage(*ref)
+		p.recordUsage(p.weakSetRef)
+
+		// Determine what to store in the WeakSet
+		var target js_ast.Expr
+		if prop.Flags.Has(js_ast.PropertyIsStatic) {
+			target = ctx.nameFunc()
+		} else {
+			target = js_ast.Expr{Loc: ctx.classLoc, Data: js_ast.EThisShared}
+		}
+
+		// Add every newly-constructed instance into this set
+		methodExpr := p.callRuntime(ctx.classLoc, "__privateAdd", []js_ast.Expr{
+			target,
+			{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: *ref}},
+		})
+		p.recordUsage(*ref)
+
+		// Make sure that adding to the map happens before any field
+		// initializers to handle cases like this:
+		//
+		//   class A {
+		//     pub = this.#priv;
+		//     #priv() {}
+		//   }
+		//
+		if prop.Flags.Has(js_ast.PropertyIsStatic) {
+			// Move this property to an assignment after the class ends
+			ctx.staticPrivateMethods = append(ctx.staticPrivateMethods, methodExpr)
+		} else {
+			// Move this property to an assignment inside the class constructor
+			ctx.instancePrivateMethods = append(ctx.instancePrivateMethods, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SExpr{Value: methodExpr}})
+		}
+	}
+	p.symbols[private.Ref.InnerIndex].Link = *ref
+}
+
+// If this returns true, the method property should be dropped as it has
+// already been accounted for elsewhere (e.g. a lowered private method).
+func (ctx *lowerClassContext) lowerMethod(p *parser, prop js_ast.Property, private *js_ast.EPrivateIdentifier) bool {
+	if private != nil && p.privateSymbolNeedsToBeLowered(private) {
+		ctx.lowerPrivateMethod(p, prop, private)
+
+		// Move the method definition outside the class body
+		methodRef := p.generateTempRef(tempRefNeedsDeclare, "_")
+		if prop.Kind == js_ast.PropertySetter {
+			p.symbols[methodRef.InnerIndex].Link = p.privateSetters[private.Ref]
+		} else {
+			p.symbols[methodRef.InnerIndex].Link = p.privateGetters[private.Ref]
+		}
+		p.recordUsage(methodRef)
+		ctx.privateMembers = append(ctx.privateMembers, js_ast.Assign(
+			js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: methodRef}},
+			prop.ValueOrNil,
+		))
+		return true
+	}
+
+	if key, ok := prop.Key.Data.(*js_ast.EString); ok && helpers.UTF16EqualsString(key.Value, "constructor") {
+		if fn, ok := prop.ValueOrNil.Data.(*js_ast.EFunction); ok {
+			// Remember where the constructor is for later
+			ctx.ctor = fn
+
+			// Initialize TypeScript constructor parameter fields
+			if p.options.ts.Parse {
+				for _, arg := range ctx.ctor.Fn.Args {
+					if arg.IsTypeScriptCtorField {
+						if id, ok := arg.Binding.Data.(*js_ast.BIdentifier); ok {
+							ctx.parameterFields = append(ctx.parameterFields, js_ast.AssignStmt(
+								js_ast.Expr{Loc: arg.Binding.Loc, Data: p.dotOrMangledPropVisit(
+									js_ast.Expr{Loc: arg.Binding.Loc, Data: js_ast.EThisShared},
+									p.symbols[id.Ref.InnerIndex].OriginalName,
+									arg.Binding.Loc,
+								)},
+								js_ast.Expr{Loc: arg.Binding.Loc, Data: &js_ast.EIdentifier{Ref: id.Ref}},
+							))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return false
+}
+
+type propertyAnalysis struct {
+	private                         *js_ast.EPrivateIdentifier
+	propExperimentalDecorators      []js_ast.Decorator
+	propDecorators                  []js_ast.Decorator
+	mustLowerField                  bool
+	needsValueOfKey                 bool
+	rewriteAutoAccessorToGetSet     bool
+	shouldOmitFieldInitializer      bool
+	staticFieldToBlockAssign        bool
+	isComputedPropertyCopiedOrMoved bool
+}
+
+func (ctx *lowerClassContext) analyzeProperty(p *parser, prop js_ast.Property, classLoweringInfo classLoweringInfo) (analysis propertyAnalysis) {
+	// The TypeScript class field transform requires removing fields without
+	// initializers. If the field is removed, then we only need the key for
+	// its side effects and we don't need a temporary reference for the key.
+	// However, the TypeScript compiler doesn't remove the field when doing
+	// strict class field initialization, so we shouldn't either.
+	analysis.private, _ = prop.Key.Data.(*js_ast.EPrivateIdentifier)
+	mustLowerPrivate := analysis.private != nil && p.privateSymbolNeedsToBeLowered(analysis.private)
+	analysis.shouldOmitFieldInitializer = p.options.ts.Parse && !prop.Kind.IsMethodDefinition() && prop.InitializerOrNil.Data == nil &&
+		!ctx.class.UseDefineForClassFields && !mustLowerPrivate && !ctx.class.ShouldLowerStandardDecorators
+
+	// Class fields must be lowered if the environment doesn't support them
+	if !prop.Kind.IsMethodDefinition() {
+		if prop.Flags.Has(js_ast.PropertyIsStatic) {
+			analysis.mustLowerField = classLoweringInfo.lowerAllStaticFields
+		} else if prop.Kind == js_ast.PropertyField && p.options.ts.Parse && !ctx.class.UseDefineForClassFields && analysis.private == nil {
+			// Lower non-private instance fields (not accessors) if TypeScript's
+			// "useDefineForClassFields" setting is disabled. When all such fields
+			// have no initializers, we avoid setting the "lowerAllInstanceFields"
+			// flag as an optimization because we can just remove all class field
+			// declarations in that case without messing with the constructor. But
+			// we must set the "mustLowerField" flag here to cause this class field
+			// declaration to still be removed.
+			analysis.mustLowerField = true
+		} else {
+			analysis.mustLowerField = classLoweringInfo.lowerAllInstanceFields
+		}
+	}
+
+	// If the field uses the TypeScript "declare" or "abstract" keyword, just
+	// omit it entirely. However, we must still keep any side-effects in the
+	// computed value and/or in the decorators.
+	if prop.Kind == js_ast.PropertyDeclareOrAbstract && prop.ValueOrNil.Data == nil {
+		analysis.mustLowerField = true
+		analysis.shouldOmitFieldInitializer = true
+	}
+
+	// For convenience, split decorators off into separate fields based on how
+	// they will end up being lowered (if they are even being lowered at all)
+	if p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators == config.True {
+		analysis.propExperimentalDecorators = prop.Decorators
+	} else if ctx.class.ShouldLowerStandardDecorators {
+		analysis.propDecorators = prop.Decorators
+	}
+
+	// Note: Auto-accessors use a different transform when they are decorated.
+	// This transform trades off worse run-time performance for better code size.
+	analysis.rewriteAutoAccessorToGetSet = len(analysis.propDecorators) == 0 && prop.Kind == js_ast.PropertyAutoAccessor &&
+		(p.options.unsupportedJSFeatures.Has(compat.Decorators) || analysis.mustLowerField)
+
+	// Transform non-lowered static fields that use assign semantics into an
+	// assignment in an inline static block instead of lowering them. This lets
+	// us avoid having to unnecessarily lower static private fields when
+	// "useDefineForClassFields" is disabled.
+	analysis.staticFieldToBlockAssign = prop.Kind == js_ast.PropertyField && !analysis.mustLowerField && !ctx.class.UseDefineForClassFields &&
+		prop.Flags.Has(js_ast.PropertyIsStatic) && analysis.private == nil
+
+	// Computed properties can't be copied or moved because they have side effects
+	// and we don't want to evaluate their side effects twice or change their
+	// evaluation order. We'll need to store them in temporary variables to keep
+	// their side effects in place when we reference them elsewhere.
+	analysis.needsValueOfKey = true
+	if prop.Flags.Has(js_ast.PropertyIsComputed) &&
+		(len(analysis.propExperimentalDecorators) > 0 ||
+			len(analysis.propDecorators) > 0 ||
+			analysis.mustLowerField ||
+			analysis.staticFieldToBlockAssign ||
+			analysis.rewriteAutoAccessorToGetSet) {
+		analysis.isComputedPropertyCopiedOrMoved = true
+
+		// Determine if we don't actually need the value of the key (only the side
+		// effects). In that case we don't need a temporary variable.
+		if len(analysis.propExperimentalDecorators) == 0 &&
+			len(analysis.propDecorators) == 0 &&
+			!analysis.rewriteAutoAccessorToGetSet &&
+			analysis.shouldOmitFieldInitializer {
+			analysis.needsValueOfKey = false
+		}
+	}
+	return
+}
+
+func (p *parser) propertyNameHint(key js_ast.Expr) string {
+	switch k := key.Data.(type) {
+	case *js_ast.EString:
+		return helpers.UTF16ToString(k.Value)
+	case *js_ast.EIdentifier:
+		return p.symbols[k.Ref.InnerIndex].OriginalName
+	case *js_ast.EPrivateIdentifier:
+		return p.symbols[k.Ref.InnerIndex].OriginalName[1:]
+	default:
+		return ""
+	}
+}
+
+func (ctx *lowerClassContext) hoistComputedProperties(p *parser, classLoweringInfo classLoweringInfo) (
+	propertyKeyTempRefs map[int]ast.Ref, decoratorTempRefs map[int]ast.Ref) {
+	var nextComputedPropertyKey *js_ast.Expr
+
+	// Computed property keys must be evaluated in a specific order for their
+	// side effects. This order must be preserved even when we have to move a
+	// class element around. For example, this can happen when using class fields
+	// with computed property keys and targeting environments without class field
+	// support. For example:
+	//
+	//   class Foo {
+	//     [a()]() {}
+	//     static [b()] = null;
+	//     [c()]() {}
+	//   }
+	//
+	// If we need to lower the static field because static fields aren't supported,
+	// we still need to ensure that "b()" is called before "a()" and after "c()".
+	// That looks something like this:
+	//
+	//   var _a;
+	//   class Foo {
+	//     [a()]() {}
+	//     [(_a = b(), c())]() {}
+	//   }
+	//   __publicField(Foo, _a, null);
+	//
+	// Iterate in reverse so that any initializers are "pushed up" before the
+	// class body if there's nowhere else to put them. They can't be "pushed
+	// down" into a static block in the class body (the logical place to put
+	// them that's next in the evaluation order) because these expressions
+	// may contain "await" and static blocks do not allow "await".
+	for propIndex := len(ctx.class.Properties) - 1; propIndex >= 0; propIndex-- {
+		prop := &ctx.class.Properties[propIndex]
+		analysis := ctx.analyzeProperty(p, *prop, classLoweringInfo)
+
+		// Evaluate the decorator expressions inline before computed property keys
+		var decorators js_ast.Expr
+		if len(analysis.propDecorators) > 0 {
+			name := p.propertyNameHint(prop.Key)
+			if name != "" {
+				name = "_" + name
+			}
+			name += "_dec"
+			ref := p.generateTempRef(tempRefNeedsDeclare, name)
+			values := make([]js_ast.Expr, len(analysis.propDecorators))
+			for i, decorator := range analysis.propDecorators {
+				values[i] = decorator.Value
+			}
+			atLoc := analysis.propDecorators[0].AtLoc
+			decorators = js_ast.Assign(
+				js_ast.Expr{Loc: atLoc, Data: &js_ast.EIdentifier{Ref: ref}},
+				js_ast.Expr{Loc: atLoc, Data: &js_ast.EArray{Items: values, IsSingleLine: true}})
+			p.recordUsage(ref)
+			if decoratorTempRefs == nil {
+				decoratorTempRefs = make(map[int]ast.Ref)
+			}
+			decoratorTempRefs[propIndex] = ref
+		}
+
+		// Skip property keys that we know are side-effect free
+		switch prop.Key.Data.(type) {
+		case *js_ast.EString, *js_ast.ENameOfSymbol, *js_ast.ENumber, *js_ast.EPrivateIdentifier:
+			// Figure out where to stick the decorator side effects to preserve their order
+			if nextComputedPropertyKey != nil {
+				// Insert it before everything that comes after it
+				*nextComputedPropertyKey = js_ast.JoinWithComma(decorators, *nextComputedPropertyKey)
+			} else {
+				// Insert it after the first thing that comes before it
+				ctx.computedPropertyChain = js_ast.JoinWithComma(decorators, ctx.computedPropertyChain)
+			}
+			continue
+
+		default:
+			// Otherwise, evaluate the decorators right before the property key
+			if decorators.Data != nil {
+				prop.Key = js_ast.JoinWithComma(decorators, prop.Key)
+				prop.Flags |= js_ast.PropertyIsComputed
+			}
+		}
+
+		// If this key is referenced elsewhere, make sure to still preserve
+		// its side effects in the property's original location
+		if analysis.isComputedPropertyCopiedOrMoved {
+			// If this property is being duplicated instead of moved or removed, then
+			// we still need the assignment to the temporary so that we can reference
+			// it in multiple places, but we don't have to hoist the assignment to an
+			// earlier property (since this property is still there). In that case
+			// we can reduce generated code size by avoiding the hoist. One example
+			// of this case is a decorator on a class element with a computed
+			// property key:
+			//
+			//   class Foo {
+			//     @dec [a()]() {}
+			//   }
+			//
+			// We want to do this:
+			//
+			//   var _a;
+			//   class Foo {
+			//     [_a = a()]() {}
+			//   }
+			//   __decorateClass([dec], Foo.prototype, _a, 1);
+			//
+			// instead of this:
+			//
+			//   var _a;
+			//   _a = a();
+			//   class Foo {
+			//     [_a]() {}
+			//   }
+			//   __decorateClass([dec], Foo.prototype, _a, 1);
+			//
+			// So only do the hoist if this property is being moved or removed.
+			if !analysis.rewriteAutoAccessorToGetSet && (analysis.mustLowerField || analysis.staticFieldToBlockAssign) {
+				inlineKey := prop.Key
+
+				if !analysis.needsValueOfKey {
+					// In certain cases, we only need to evaluate a property key for its
+					// side effects but we don't actually need the value of the key itself.
+					// For example, a TypeScript class field without an initializer is
+					// omitted when TypeScript's "useDefineForClassFields" setting is false.
+				} else {
+					// Store the key in a temporary so we can refer to it later
+					ref := p.generateTempRef(tempRefNeedsDeclare, "")
+					inlineKey = js_ast.Assign(js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}}, prop.Key)
+					p.recordUsage(ref)
+
+					// Replace this property key with a reference to the temporary. We
+					// don't need to store the temporary in the "propertyKeyTempRefs"
+					// map because all references will refer to the temporary, not just
+					// some of them.
+					prop.Key = js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
+					p.recordUsage(ref)
+				}
+
+				// Figure out where to stick this property's side effect to preserve its order
+				if nextComputedPropertyKey != nil {
+					// Insert it before everything that comes after it
+					*nextComputedPropertyKey = js_ast.JoinWithComma(inlineKey, *nextComputedPropertyKey)
+				} else {
+					// Insert it after the first thing that comes before it
+					ctx.computedPropertyChain = js_ast.JoinWithComma(inlineKey, ctx.computedPropertyChain)
+				}
+				continue
+			}
+
+			// Otherwise, we keep the side effects in place (as described above) but
+			// just store the key in a temporary so we can refer to it later.
+			ref := p.generateTempRef(tempRefNeedsDeclare, "")
+			prop.Key = js_ast.Assign(js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}}, prop.Key)
+			p.recordUsage(ref)
+
+			// Use this temporary when creating duplicate references to this key
+			if propertyKeyTempRefs == nil {
+				propertyKeyTempRefs = make(map[int]ast.Ref)
+			}
+			propertyKeyTempRefs[propIndex] = ref
+
+			// Deliberately continue to fall through to the "computed" case below:
+		}
+
+		// Otherwise, this computed property could be a good location to evaluate
+		// something that comes before it. Remember this location for later.
+		if prop.Flags.Has(js_ast.PropertyIsComputed) {
+			// If any side effects after this were hoisted here, then inline them now.
+			// We don't want to reorder any side effects.
+			if ctx.computedPropertyChain.Data != nil {
+				ref, ok := propertyKeyTempRefs[propIndex]
+				if !ok {
+					ref = p.generateTempRef(tempRefNeedsDeclare, "")
+					prop.Key = js_ast.Assign(js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}}, prop.Key)
+					p.recordUsage(ref)
+				}
+				prop.Key = js_ast.JoinWithComma(
+					js_ast.JoinWithComma(prop.Key, ctx.computedPropertyChain),
+					js_ast.Expr{Loc: prop.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}})
+				p.recordUsage(ref)
+				ctx.computedPropertyChain = js_ast.Expr{}
+			}
+
+			// Remember this location for later
+			nextComputedPropertyKey = &prop.Key
+		}
+	}
+
+	// If any side effects in the class body were hoisted up to the "extends"
+	// clause, then inline them before the "extends" clause is evaluated. We
+	// don't want to reorder any side effects. For example:
+	//
+	//   class Foo extends a() {
+	//     static [b()]
+	//   }
+	//
+	// We want to do this:
+	//
+	//   var _a, _b;
+	//   class Foo extends (_b = a(), _a = b(), _b) {
+	//   }
+	//   __publicField(Foo, _a);
+	//
+	// instead of this:
+	//
+	//   var _a;
+	//   _a = b();
+	//   class Foo extends a() {
+	//   }
+	//   __publicField(Foo, _a);
+	//
+	if ctx.computedPropertyChain.Data != nil && ctx.class.ExtendsOrNil.Data != nil {
+		ctx.extendsRef = p.generateTempRef(tempRefNeedsDeclare, "")
+		ctx.class.ExtendsOrNil = js_ast.JoinWithComma(js_ast.JoinWithComma(
+			js_ast.Assign(js_ast.Expr{Loc: ctx.class.ExtendsOrNil.Loc, Data: &js_ast.EIdentifier{Ref: ctx.extendsRef}}, ctx.class.ExtendsOrNil),
+			ctx.computedPropertyChain),
+			js_ast.Expr{Loc: ctx.class.ExtendsOrNil.Loc, Data: &js_ast.EIdentifier{Ref: ctx.extendsRef}})
+		p.recordUsage(ctx.extendsRef)
+		p.recordUsage(ctx.extendsRef)
+		ctx.computedPropertyChain = js_ast.Expr{}
+	}
+	return
+}
+
+// This corresponds to the initialization order in the specification:
+//
+//  27. For each element e of staticElements, do
+//     a. If e is a ClassElementDefinition Record and e.[[Kind]] is not field, then
+//
+//  28. For each element e of instanceElements, do
+//     a. If e.[[Kind]] is not field, then
+//
+//  29. For each element e of staticElements, do
+//     a. If e.[[Kind]] is field, then
+//
+//  30. For each element e of instanceElements, do
+//     a. If e.[[Kind]] is field, then
+func fieldOrAccessorOrder(kind js_ast.PropertyKind, flags js_ast.PropertyFlags) (int, bool) {
+	if kind == js_ast.PropertyAutoAccessor {
+		if flags.Has(js_ast.PropertyIsStatic) {
+			return 0, true
+		} else {
+			return 1, true
+		}
+	} else if kind == js_ast.PropertyField {
+		if flags.Has(js_ast.PropertyIsStatic) {
+			return 2, true
+		} else {
+			return 3, true
+		}
+	}
+	return 0, false
+}
+
+func (ctx *lowerClassContext) processProperties(p *parser, classLoweringInfo classLoweringInfo, result visitClassResult) {
+	properties := make([]js_ast.Property, 0, len(ctx.class.Properties))
+	propertyKeyTempRefs, decoratorTempRefs := ctx.hoistComputedProperties(p, classLoweringInfo)
+
+	// Save the initializer index for each field and accessor element
+	if ctx.class.ShouldLowerStandardDecorators {
+		var counts [4]int
+
+		// Count how many initializers there are in each section
+		for _, prop := range ctx.class.Properties {
+			if len(prop.Decorators) > 0 {
+				if i, ok := fieldOrAccessorOrder(prop.Kind, prop.Flags); ok {
+					counts[i]++
+				} else if prop.Flags.Has(js_ast.PropertyIsStatic) {
+					ctx.decoratorCallStaticMethodExtraInitializers = true
+				} else {
+					ctx.decoratorCallInstanceMethodExtraInitializers = true
+				}
+			}
+		}
+
+		// Give each on an index for the order it will be initialized in
+		if counts[0] > 0 || counts[1] > 0 || counts[2] > 0 || counts[3] > 0 {
+			indices := [4]int{0, counts[0], counts[0] + counts[1], counts[0] + counts[1] + counts[2]}
+			ctx.decoratorPropertyToInitializerMap = make(map[int]int)
+
+			for propIndex, prop := range ctx.class.Properties {
+				if len(prop.Decorators) > 0 {
+					if i, ok := fieldOrAccessorOrder(prop.Kind, prop.Flags); ok {
+						ctx.decoratorPropertyToInitializerMap[propIndex] = indices[i]
+						indices[i]++
+					}
+				}
+			}
+		}
+	}
+
+	// Evaluate the decorator expressions inline
+	if ctx.class.ShouldLowerStandardDecorators && len(ctx.class.Decorators) > 0 {
+		name := ctx.nameToKeep
+		if name == "" {
+			name = "class"
+		}
+		decoratorsRef := p.generateTempRef(tempRefNeedsDeclare, fmt.Sprintf("_%s_decorators", name))
+		values := make([]js_ast.Expr, len(ctx.class.Decorators))
+		for i, decorator := range ctx.class.Decorators {
+			values[i] = decorator.Value
+		}
+		atLoc := ctx.class.Decorators[0].AtLoc
+		ctx.computedPropertyChain = js_ast.JoinWithComma(js_ast.Assign(
+			js_ast.Expr{Loc: atLoc, Data: &js_ast.EIdentifier{Ref: decoratorsRef}},
+			js_ast.Expr{Loc: atLoc, Data: &js_ast.EArray{Items: values, IsSingleLine: true}},
+		), ctx.computedPropertyChain)
+		p.recordUsage(decoratorsRef)
+		ctx.decoratorClassDecorators = js_ast.Expr{Loc: atLoc, Data: &js_ast.EIdentifier{Ref: decoratorsRef}}
+		p.recordUsage(decoratorsRef)
+		ctx.class.Decorators = nil
+	}
+
+	for propIndex, prop := range ctx.class.Properties {
+		if prop.Kind == js_ast.PropertyClassStaticBlock {
+			// Drop empty class blocks when minifying
+			if p.options.minifySyntax && len(prop.ClassStaticBlock.Block.Stmts) == 0 {
+				continue
+			}
+
+			// Lower this block if needed
+			if classLoweringInfo.lowerAllStaticFields {
+				ctx.lowerStaticBlock(p, prop.Loc, *prop.ClassStaticBlock)
+				continue
+			}
+
+			// Otherwise, keep this property
+			properties = append(properties, prop)
+			continue
+		}
+
+		// Merge parameter decorators with method decorators
+		if p.options.ts.Parse && prop.Kind.IsMethodDefinition() {
+			if fn, ok := prop.ValueOrNil.Data.(*js_ast.EFunction); ok {
+				isConstructor := false
+				if key, ok := prop.Key.Data.(*js_ast.EString); ok {
+					isConstructor = helpers.UTF16EqualsString(key.Value, "constructor")
+				}
+				args := fn.Fn.Args
+				for i, arg := range args {
+					for _, decorator := range arg.Decorators {
+						// Generate a call to "__decorateParam()" for this parameter decorator
+						var decorators *[]js_ast.Decorator = &prop.Decorators
+						if isConstructor {
+							decorators = &ctx.class.Decorators
+						}
+						*decorators = append(*decorators, js_ast.Decorator{
+							Value: p.callRuntime(decorator.Value.Loc, "__decorateParam", []js_ast.Expr{
+								{Loc: decorator.Value.Loc, Data: &js_ast.ENumber{Value: float64(i)}},
+								decorator.Value,
+							}),
+							AtLoc: decorator.AtLoc,
+						})
+						args[i].Decorators = nil
+					}
+				}
+			}
+		}
+
+		analysis := ctx.analyzeProperty(p, prop, classLoweringInfo)
+
+		// When the property key needs to be referenced multiple times, subsequent
+		// references may need to reference a temporary variable instead of copying
+		// the whole property key expression (since we only want to evaluate side
+		// effects once).
+		keyExprNoSideEffects := prop.Key
+		if ref, ok := propertyKeyTempRefs[propIndex]; ok {
+			keyExprNoSideEffects.Data = &js_ast.EIdentifier{Ref: ref}
+		}
+
+		// Handle TypeScript experimental decorators
+		if len(analysis.propExperimentalDecorators) > 0 {
+			prop.Decorators = nil
+
+			// Generate a single call to "__decorateClass()" for this property
+			loc := prop.Key.Loc
+
+			// This code tells "__decorateClass()" if the descriptor should be undefined
+			descriptorKind := float64(1)
+			if prop.Kind == js_ast.PropertyField || prop.Kind == js_ast.PropertyDeclareOrAbstract {
+				descriptorKind = 2
+			}
+
+			// Instance properties use the prototype, static properties use the class
+			var target js_ast.Expr
+			if prop.Flags.Has(js_ast.PropertyIsStatic) {
+				target = ctx.nameFunc()
+			} else {
+				target = js_ast.Expr{Loc: loc, Data: &js_ast.EDot{Target: ctx.nameFunc(), Name: "prototype", NameLoc: loc}}
+			}
+
+			values := make([]js_ast.Expr, len(analysis.propExperimentalDecorators))
+			for i, decorator := range analysis.propExperimentalDecorators {
+				values[i] = decorator.Value
+			}
+			decorator := p.callRuntime(loc, "__decorateClass", []js_ast.Expr{
+				{Loc: loc, Data: &js_ast.EArray{Items: values}},
+				target,
+				cloneKeyForLowerClass(keyExprNoSideEffects),
+				{Loc: loc, Data: &js_ast.ENumber{Value: descriptorKind}},
+			})
+
+			// Static decorators are grouped after instance decorators
+			if prop.Flags.Has(js_ast.PropertyIsStatic) {
+				ctx.staticExperimentalDecorators = append(ctx.staticExperimentalDecorators, decorator)
+			} else {
+				ctx.instanceExperimentalDecorators = append(ctx.instanceExperimentalDecorators, decorator)
+			}
+		}
+
+		// Handle JavaScript decorators
+		initializerIndex := -1
+		if len(analysis.propDecorators) > 0 {
+			prop.Decorators = nil
+			loc := prop.Loc
+			keyLoc := prop.Key.Loc
+			atLoc := analysis.propDecorators[0].AtLoc
+
+			// Encode information about this property using bit flags
+			var flags int
+			switch prop.Kind {
+			case js_ast.PropertyMethod:
+				flags = 1
+			case js_ast.PropertyGetter:
+				flags = 2
+			case js_ast.PropertySetter:
+				flags = 3
+			case js_ast.PropertyAutoAccessor:
+				flags = 4
+			case js_ast.PropertyField:
+				flags = 5
+			}
+			if flags >= 4 {
+				initializerIndex = ctx.decoratorPropertyToInitializerMap[propIndex]
+			}
+			if prop.Flags.Has(js_ast.PropertyIsStatic) {
+				flags |= 8
+			}
+			if analysis.private != nil {
+				flags |= 16
+			}
+
+			// Start the arguments for the call to "__decorateElement"
+			var key js_ast.Expr
+			decoratorsRef := decoratorTempRefs[propIndex]
+			if ctx.decoratorContextRef == ast.InvalidRef {
+				ctx.decoratorContextRef = p.generateTempRef(tempRefNeedsDeclare, "_init")
+			}
+			if analysis.private != nil {
+				key = js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(p.symbols[analysis.private.Ref.InnerIndex].OriginalName)}}
+			} else {
+				key = cloneKeyForLowerClass(keyExprNoSideEffects)
+			}
+			args := []js_ast.Expr{
+				{Loc: loc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+				{Loc: loc, Data: &js_ast.ENumber{Value: float64(flags)}},
+				key,
+				{Loc: atLoc, Data: &js_ast.EIdentifier{Ref: decoratorsRef}},
+			}
+			p.recordUsage(ctx.decoratorContextRef)
+			p.recordUsage(decoratorsRef)
+
+			// Append any optional additional arguments
+			privateFnRef := ast.InvalidRef
+			if analysis.private != nil {
+				// Add the "target" argument (the weak set)
+				args = append(args, js_ast.Expr{Loc: keyLoc, Data: &js_ast.EIdentifier{Ref: analysis.private.Ref}})
+				p.recordUsage(analysis.private.Ref)
+
+				// Add the "extra" argument (the function)
+				switch prop.Kind {
+				case js_ast.PropertyMethod:
+					privateFnRef = p.privateGetters[analysis.private.Ref]
+				case js_ast.PropertyGetter:
+					privateFnRef = p.privateGetters[analysis.private.Ref]
+				case js_ast.PropertySetter:
+					privateFnRef = p.privateSetters[analysis.private.Ref]
+				}
+				if privateFnRef != ast.InvalidRef {
+					args = append(args, js_ast.Expr{Loc: keyLoc, Data: &js_ast.EIdentifier{Ref: privateFnRef}})
+					p.recordUsage(privateFnRef)
+				}
+			} else {
+				// Add the "target" argument (the class object)
+				args = append(args, ctx.nameFunc())
+			}
+
+			// Auto-accessors will generate a private field for storage. Lower this
+			// field, which will generate a WeakMap instance, and then pass the
+			// WeakMap instance into the decorator helper so the lowered getter and
+			// setter can use it.
+			if prop.Kind == js_ast.PropertyAutoAccessor {
+				var kind ast.SymbolKind
+				if prop.Flags.Has(js_ast.PropertyIsStatic) {
+					kind = ast.SymbolPrivateStaticField
+				} else {
+					kind = ast.SymbolPrivateField
+				}
+				ref := p.newSymbol(kind, "#"+p.propertyNameHint(prop.Key))
+				p.symbols[ref.InnerIndex].Flags |= ast.PrivateSymbolMustBeLowered
+				_, autoAccessorWeakMapRef, _ := ctx.lowerField(p, prop, &js_ast.EPrivateIdentifier{Ref: ref}, false, false, initializerIndex)
+				args = append(args, js_ast.Expr{Loc: keyLoc, Data: &js_ast.EIdentifier{Ref: autoAccessorWeakMapRef}})
+				p.recordUsage(autoAccessorWeakMapRef)
+			}
+
+			// Assign the result
+			element := p.callRuntime(loc, "__decorateElement", args)
+			if privateFnRef != ast.InvalidRef {
+				element = js_ast.Assign(js_ast.Expr{Loc: keyLoc, Data: &js_ast.EIdentifier{Ref: privateFnRef}}, element)
+				p.recordUsage(privateFnRef)
+			} else if prop.Kind == js_ast.PropertyAutoAccessor && analysis.private != nil {
+				ref := p.generateTempRef(tempRefNeedsDeclare, "")
+				privateGetFnRef := p.generateTempRef(tempRefNeedsDeclare, "_")
+				privateSetFnRef := p.generateTempRef(tempRefNeedsDeclare, "_")
+				p.symbols[privateGetFnRef.InnerIndex].Link = p.privateGetters[analysis.private.Ref]
+				p.symbols[privateSetFnRef.InnerIndex].Link = p.privateSetters[analysis.private.Ref]
+
+				// Unpack the "get" and "set" properties from the returned property descriptor
+				element = js_ast.JoinWithComma(js_ast.JoinWithComma(
+					js_ast.Assign(
+						js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}},
+						element),
+					js_ast.Assign(
+						js_ast.Expr{Loc: keyLoc, Data: &js_ast.EIdentifier{Ref: privateGetFnRef}},
+						js_ast.Expr{Loc: loc, Data: &js_ast.EDot{Target: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}, Name: "get", NameLoc: loc}})),
+					js_ast.Assign(
+						js_ast.Expr{Loc: keyLoc, Data: &js_ast.EIdentifier{Ref: privateSetFnRef}},
+						js_ast.Expr{Loc: loc, Data: &js_ast.EDot{Target: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}, Name: "set", NameLoc: loc}}))
+				p.recordUsage(ref)
+				p.recordUsage(privateGetFnRef)
+				p.recordUsage(ref)
+				p.recordUsage(privateSetFnRef)
+				p.recordUsage(ref)
+			}
+
+			// Put the call to the decorators in the right place
+			if prop.Kind == js_ast.PropertyField {
+				// Field
+				if prop.Flags.Has(js_ast.PropertyIsStatic) {
+					ctx.decoratorStaticFieldElements = append(ctx.decoratorStaticFieldElements, element)
+				} else {
+					ctx.decoratorInstanceFieldElements = append(ctx.decoratorInstanceFieldElements, element)
+				}
+			} else {
+				// Non-field
+				if prop.Flags.Has(js_ast.PropertyIsStatic) {
+					ctx.decoratorStaticNonFieldElements = append(ctx.decoratorStaticNonFieldElements, element)
+				} else {
+					ctx.decoratorInstanceNonFieldElements = append(ctx.decoratorInstanceNonFieldElements, element)
+				}
+			}
+
+			// Omit decorated auto-accessors as they will be now generated at run-time instead
+			if prop.Kind == js_ast.PropertyAutoAccessor {
+				if analysis.private != nil {
+					ctx.lowerPrivateMethod(p, prop, analysis.private)
+				}
+				continue
+			}
+		}
+
+		// Generate get/set methods for auto-accessors
+		if analysis.rewriteAutoAccessorToGetSet {
+			properties = ctx.rewriteAutoAccessorToGetSet(p, prop, properties, keyExprNoSideEffects, analysis.mustLowerField, analysis.private, result)
+			continue
+		}
+
+		// Lower fields
+		if (!prop.Kind.IsMethodDefinition() && analysis.mustLowerField) || analysis.staticFieldToBlockAssign {
+			var keep bool
+			prop, _, keep = ctx.lowerField(p, prop, analysis.private, analysis.shouldOmitFieldInitializer, analysis.staticFieldToBlockAssign, initializerIndex)
+			if !keep {
+				continue
+			}
+		}
+
+		// Lower methods
+		if prop.Kind.IsMethodDefinition() && ctx.lowerMethod(p, prop, analysis.private) {
+			continue
+		}
+
+		// Keep this property
+		properties = append(properties, prop)
+	}
+
+	// Finish the filtering operation
+	ctx.class.Properties = properties
+}
+
+func (ctx *lowerClassContext) lowerStaticBlock(p *parser, loc logger.Loc, block js_ast.ClassStaticBlock) {
+	isAllExprs := []js_ast.Expr{}
+
+	// Are all statements in the block expression statements?
+loop:
+	for _, stmt := range block.Block.Stmts {
+		switch s := stmt.Data.(type) {
+		case *js_ast.SEmpty:
+			// Omit stray semicolons completely
+		case *js_ast.SExpr:
+			isAllExprs = append(isAllExprs, s.Value)
+		default:
+			isAllExprs = nil
+			break loop
+		}
+	}
+
+	if isAllExprs != nil {
+		// I think it should be safe to inline the static block IIFE here
+		// since all uses of "this" should have already been replaced by now.
+		ctx.staticMembers = append(ctx.staticMembers, isAllExprs...)
+	} else {
+		// But if there is a non-expression statement, fall back to using an
+		// IIFE since we may be in an expression context and can't use a block.
+		ctx.staticMembers = append(ctx.staticMembers, js_ast.Expr{Loc: loc, Data: &js_ast.ECall{
+			Target: js_ast.Expr{Loc: loc, Data: &js_ast.EArrow{Body: js_ast.FnBody{
+				Loc:   block.Loc,
+				Block: block.Block,
+			}}},
+			CanBeUnwrappedIfUnused: p.astHelpers.StmtsCanBeRemovedIfUnused(block.Block.Stmts, 0),
+		}})
+	}
+}
+
+func (ctx *lowerClassContext) rewriteAutoAccessorToGetSet(
+	p *parser,
+	prop js_ast.Property,
+	properties []js_ast.Property,
+	keyExprNoSideEffects js_ast.Expr,
+	mustLowerField bool,
+	private *js_ast.EPrivateIdentifier,
+	result visitClassResult,
+) []js_ast.Property {
+	var storageKind ast.SymbolKind
+	if prop.Flags.Has(js_ast.PropertyIsStatic) {
+		storageKind = ast.SymbolPrivateStaticField
+	} else {
+		storageKind = ast.SymbolPrivateField
+	}
+
+	// Generate the name of the private field to use for storage
+	var storageName string
+	switch k := keyExprNoSideEffects.Data.(type) {
+	case *js_ast.EString:
+		storageName = "#" + helpers.UTF16ToString(k.Value)
+	case *js_ast.EPrivateIdentifier:
+		storageName = "#_" + p.symbols[k.Ref.InnerIndex].OriginalName[1:]
+	default:
+		storageName = "#" + ast.DefaultNameMinifierJS.NumberToMinifiedName(ctx.autoAccessorCount)
+		ctx.autoAccessorCount++
+	}
+
+	// Generate the symbols we need
+	storageRef := p.newSymbol(storageKind, storageName)
+	argRef := p.newSymbol(ast.SymbolOther, "_")
+	result.bodyScope.Generated = append(result.bodyScope.Generated, storageRef)
+	result.bodyScope.Children = append(result.bodyScope.Children, &js_ast.Scope{Kind: js_ast.ScopeFunctionBody, Generated: []ast.Ref{argRef}})
+
+	// Replace this accessor with other properties
+	loc := keyExprNoSideEffects.Loc
+	storagePrivate := &js_ast.EPrivateIdentifier{Ref: storageRef}
+	if mustLowerField {
+		// Forward the accessor's lowering status on to the storage field. If we
+		// don't do this, then we risk having the underlying private symbol
+		// behaving differently than if it were authored manually (e.g. being
+		// placed outside of the class body, which is a syntax error).
+		p.symbols[storageRef.InnerIndex].Flags |= ast.PrivateSymbolMustBeLowered
+	}
+	storageNeedsToBeLowered := p.privateSymbolNeedsToBeLowered(storagePrivate)
+	storageProp := js_ast.Property{
+		Loc:              prop.Loc,
+		Kind:             js_ast.PropertyField,
+		Flags:            prop.Flags & js_ast.PropertyIsStatic,
+		Key:              js_ast.Expr{Loc: loc, Data: storagePrivate},
+		InitializerOrNil: prop.InitializerOrNil,
+	}
+	if !mustLowerField {
+		properties = append(properties, storageProp)
+	} else if prop, _, ok := ctx.lowerField(p, storageProp, storagePrivate, false, false, -1); ok {
+		properties = append(properties, prop)
+	}
+
+	// Getter
+	var getExpr js_ast.Expr
+	if storageNeedsToBeLowered {
+		getExpr = p.lowerPrivateGet(js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}, loc, storagePrivate)
+	} else {
+		p.recordUsage(storageRef)
+		getExpr = js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
+			Target: js_ast.Expr{Loc: loc, Data: js_ast.EThisShared},
+			Index:  js_ast.Expr{Loc: loc, Data: &js_ast.EPrivateIdentifier{Ref: storageRef}},
+		}}
+	}
+	getterProp := js_ast.Property{
+		Loc:   prop.Loc,
+		Kind:  js_ast.PropertyGetter,
+		Flags: prop.Flags,
+		Key:   prop.Key,
+		ValueOrNil: js_ast.Expr{Loc: loc, Data: &js_ast.EFunction{
+			Fn: js_ast.Fn{
+				Body: js_ast.FnBody{
+					Loc: loc,
+					Block: js_ast.SBlock{
+						Stmts: []js_ast.Stmt{
+							{Loc: loc, Data: &js_ast.SReturn{ValueOrNil: getExpr}},
+						},
+					},
+				},
+			},
+		}},
+	}
+	if !ctx.lowerMethod(p, getterProp, private) {
+		properties = append(properties, getterProp)
+	}
+
+	// Setter
+	var setExpr js_ast.Expr
+	if storageNeedsToBeLowered {
+		setExpr = p.lowerPrivateSet(js_ast.Expr{Loc: loc, Data: js_ast.EThisShared}, loc, storagePrivate,
+			js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: argRef}})
+	} else {
+		p.recordUsage(storageRef)
+		p.recordUsage(argRef)
+		setExpr = js_ast.Assign(
+			js_ast.Expr{Loc: loc, Data: &js_ast.EIndex{
+				Target: js_ast.Expr{Loc: loc, Data: js_ast.EThisShared},
+				Index:  js_ast.Expr{Loc: loc, Data: &js_ast.EPrivateIdentifier{Ref: storageRef}},
+			}},
+			js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: argRef}},
+		)
+	}
+	setterProp := js_ast.Property{
+		Loc:   prop.Loc,
+		Kind:  js_ast.PropertySetter,
+		Flags: prop.Flags,
+		Key:   cloneKeyForLowerClass(keyExprNoSideEffects),
+		ValueOrNil: js_ast.Expr{Loc: loc, Data: &js_ast.EFunction{
+			Fn: js_ast.Fn{
+				Args: []js_ast.Arg{
+					{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: argRef}}},
+				},
+				Body: js_ast.FnBody{
+					Loc: loc,
+					Block: js_ast.SBlock{
+						Stmts: []js_ast.Stmt{
+							{Loc: loc, Data: &js_ast.SExpr{Value: setExpr}},
+						},
+					},
+				},
+			},
+		}},
+	}
+	if !ctx.lowerMethod(p, setterProp, private) {
+		properties = append(properties, setterProp)
+	}
+	return properties
+}
+
+func (ctx *lowerClassContext) insertInitializersIntoConstructor(p *parser, classLoweringInfo classLoweringInfo, result visitClassResult) {
+	if len(ctx.parameterFields) == 0 &&
+		!ctx.decoratorCallInstanceMethodExtraInitializers &&
+		len(ctx.instancePrivateMethods) == 0 &&
+		len(ctx.instanceMembers) == 0 &&
+		(ctx.ctor == nil || result.superCtorRef == ast.InvalidRef) {
+		// No need to generate a constructor
+		return
+	}
+
+	// Create a constructor if one doesn't already exist
+	if ctx.ctor == nil {
+		ctx.ctor = &js_ast.EFunction{Fn: js_ast.Fn{Body: js_ast.FnBody{Loc: ctx.classLoc}}}
+
+		// Append it to the list to reuse existing allocation space
+		ctx.class.Properties = append(ctx.class.Properties, js_ast.Property{
+			Kind:       js_ast.PropertyMethod,
+			Loc:        ctx.classLoc,
+			Key:        js_ast.Expr{Loc: ctx.classLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16("constructor")}},
+			ValueOrNil: js_ast.Expr{Loc: ctx.classLoc, Data: ctx.ctor},
+		})
+
+		// Make sure the constructor has a super() call if needed
+		if ctx.class.ExtendsOrNil.Data != nil {
+			target := js_ast.Expr{Loc: ctx.classLoc, Data: js_ast.ESuperShared}
+			if classLoweringInfo.shimSuperCtorCalls {
+				p.recordUsage(result.superCtorRef)
+				target.Data = &js_ast.EIdentifier{Ref: result.superCtorRef}
+			}
+			argumentsRef := p.newSymbol(ast.SymbolUnbound, "arguments")
+			p.currentScope.Generated = append(p.currentScope.Generated, argumentsRef)
+			ctx.ctor.Fn.Body.Block.Stmts = append(ctx.ctor.Fn.Body.Block.Stmts, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: ctx.classLoc, Data: &js_ast.ECall{
+				Target: target,
+				Args:   []js_ast.Expr{{Loc: ctx.classLoc, Data: &js_ast.ESpread{Value: js_ast.Expr{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: argumentsRef}}}}},
+			}}}})
+		}
+	}
+
+	// Run instanceMethodExtraInitializers if needed
+	var decoratorInstanceMethodExtraInitializers js_ast.Expr
+	if ctx.decoratorCallInstanceMethodExtraInitializers {
+		decoratorInstanceMethodExtraInitializers = p.callRuntime(ctx.classLoc, "__runInitializers", []js_ast.Expr{
+			{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+			{Loc: ctx.classLoc, Data: &js_ast.ENumber{Value: (2 << 1) | 1}},
+			{Loc: ctx.classLoc, Data: js_ast.EThisShared},
+		})
+		p.recordUsage(ctx.decoratorContextRef)
+	}
+
+	// Make sure the instance field initializers come after "super()" since
+	// they need "this" to ba available
+	generatedStmts := make([]js_ast.Stmt, 0,
+		len(ctx.parameterFields)+
+			len(ctx.instancePrivateMethods)+
+			len(ctx.instanceMembers))
+	generatedStmts = append(generatedStmts, ctx.parameterFields...)
+	if decoratorInstanceMethodExtraInitializers.Data != nil {
+		generatedStmts = append(generatedStmts, js_ast.Stmt{Loc: decoratorInstanceMethodExtraInitializers.Loc, Data: &js_ast.SExpr{Value: decoratorInstanceMethodExtraInitializers}})
+	}
+	generatedStmts = append(generatedStmts, ctx.instancePrivateMethods...)
+	generatedStmts = append(generatedStmts, ctx.instanceMembers...)
+	p.insertStmtsAfterSuperCall(&ctx.ctor.Fn.Body, generatedStmts, result.superCtorRef)
+
+	// Sort the constructor first to match the TypeScript compiler's output
+	for i := 0; i < len(ctx.class.Properties); i++ {
+		if ctx.class.Properties[i].ValueOrNil.Data == ctx.ctor {
+			ctorProp := ctx.class.Properties[i]
+			for j := i; j > 0; j-- {
+				ctx.class.Properties[j] = ctx.class.Properties[j-1]
+			}
+			ctx.class.Properties[0] = ctorProp
+			break
+		}
+	}
+}
+
+func (ctx *lowerClassContext) finishAndGenerateCode(p *parser, result visitClassResult) ([]js_ast.Stmt, js_ast.Expr) {
+	// When bundling is enabled, we convert top-level class statements to
+	// expressions:
+	//
+	//   // Before
+	//   class Foo {
+	//     static foo = () => Foo
+	//   }
+	//   Foo = wrap(Foo)
+	//
+	//   // After
+	//   var _Foo = class _Foo {
+	//     static foo = () => _Foo;
+	//   };
+	//   var Foo = _Foo;
+	//   Foo = wrap(Foo);
+	//
+	// One reason to do this is that esbuild's bundler sometimes needs to lazily-
+	// evaluate a module. For example, a module may end up being both the target
+	// of a dynamic "import()" call and a static "import" statement. Lazy module
+	// evaluation is done by wrapping the top-level module code in a closure. To
+	// avoid a performance hit for static "import" statements, esbuild stores
+	// top-level exported symbols outside of the closure and references them
+	// directly instead of indirectly.
+	//
+	// Another reason to do this is that multiple JavaScript VMs have had and
+	// continue to have performance issues with TDZ (i.e. "temporal dead zone")
+	// checks. These checks validate that a let, or const, or class symbol isn't
+	// used before it's initialized. Here are two issues with well-known VMs:
+	//
+	//   * V8: https://bugs.chromium.org/p/v8/issues/detail?id=13723 (10% slowdown)
+	//   * JavaScriptCore: https://bugs.webkit.org/show_bug.cgi?id=199866 (1,000% slowdown!)
+	//
+	// JavaScriptCore had a severe performance issue as their TDZ implementation
+	// had time complexity that was quadratic in the number of variables needing
+	// TDZ checks in the same scope (with the top-level scope typically being the
+	// worst offender). V8 has ongoing issues with TDZ checks being present
+	// throughout the code their JIT generates even when they have already been
+	// checked earlier in the same function or when the function in question has
+	// already been run (so the checks have already happened).
+	//
+	// Due to esbuild's parallel architecture, we both a) need to transform class
+	// statements to variables during parsing and b) don't yet know whether this
+	// module will need to be lazily-evaluated or not in the parser. So we always
+	// do this just in case it's needed.
+	mustConvertStmtToExpr := ctx.kind != classKindExpr && p.currentScope.Parent == nil && (p.options.mode == config.ModeBundle || p.willWrapModuleInTryCatchForUsing)
+
+	// Check to see if we have lowered decorators on the class itself
+	var classDecorators js_ast.Expr
+	var classExperimentalDecorators []js_ast.Decorator
+	if p.options.ts.Parse && p.options.ts.Config.ExperimentalDecorators == config.True {
+		classExperimentalDecorators = ctx.class.Decorators
+		ctx.class.Decorators = nil
+	} else if ctx.class.ShouldLowerStandardDecorators {
+		classDecorators = ctx.decoratorClassDecorators
+	}
+
+	var decorateClassExpr js_ast.Expr
+	if classDecorators.Data != nil {
+		// Handle JavaScript decorators on the class itself
+		if ctx.decoratorContextRef == ast.InvalidRef {
+			ctx.decoratorContextRef = p.generateTempRef(tempRefNeedsDeclare, "_init")
+		}
+		decorateClassExpr = p.callRuntime(ctx.classLoc, "__decorateElement", []js_ast.Expr{
+			{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+			{Loc: ctx.classLoc, Data: &js_ast.ENumber{Value: 0}},
+			{Loc: ctx.classLoc, Data: &js_ast.EString{Value: helpers.StringToUTF16(ctx.nameToKeep)}},
+			classDecorators,
+			ctx.nameFunc(),
+		})
+		p.recordUsage(ctx.decoratorContextRef)
+		decorateClassExpr = js_ast.Assign(ctx.nameFunc(), decorateClassExpr)
+	} else if ctx.decoratorContextRef != ast.InvalidRef {
+		// Decorator metadata is present if there are any decorators on the class at all
+		decorateClassExpr = p.callRuntime(ctx.classLoc, "__decoratorMetadata", []js_ast.Expr{
+			{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+			ctx.nameFunc(),
+		})
+	}
+
+	// If this is true, we have removed some code from the class body that could
+	// potentially contain an expression that captures the inner class name.
+	// In this case we must explicitly store the class to a separate inner class
+	// name binding to avoid incorrect behavior if the class is later re-assigned,
+	// since the removed code will no longer be in the class body scope.
+	hasPotentialInnerClassNameEscape := result.innerClassNameRef != ast.InvalidRef &&
+		(ctx.computedPropertyChain.Data != nil ||
+			len(ctx.privateMembers) > 0 ||
+			len(ctx.staticPrivateMethods) > 0 ||
+			len(ctx.staticMembers) > 0 ||
+
+			// TypeScript experimental decorators
+			len(ctx.instanceExperimentalDecorators) > 0 ||
+			len(ctx.staticExperimentalDecorators) > 0 ||
+			len(classExperimentalDecorators) > 0 ||
+
+			// JavaScript decorators
+			ctx.decoratorContextRef != ast.InvalidRef)
+
+	// If we need to represent the class as an expression (even if it's a
+	// statement), then generate another symbol to use as the class name
+	nameForClassDecorators := ast.LocRef{Ref: ast.InvalidRef}
+	if len(classExperimentalDecorators) > 0 || hasPotentialInnerClassNameEscape || mustConvertStmtToExpr {
+		if ctx.kind == classKindExpr {
+			// For expressions, the inner and outer class names are the same
+			name := ctx.nameFunc()
+			nameForClassDecorators = ast.LocRef{Loc: name.Loc, Ref: name.Data.(*js_ast.EIdentifier).Ref}
+		} else {
+			// For statements we need to use the outer class name, not the inner one
+			if ctx.class.Name != nil {
+				nameForClassDecorators = *ctx.class.Name
+			} else if ctx.kind == classKindExportDefaultStmt {
+				nameForClassDecorators = ctx.defaultName
+			} else {
+				nameForClassDecorators = ast.LocRef{Loc: ctx.classLoc, Ref: p.generateTempRef(tempRefNoDeclare, "")}
+			}
+			p.recordUsage(nameForClassDecorators.Ref)
+		}
+	}
+
+	var prefixExprs []js_ast.Expr
+	var suffixExprs []js_ast.Expr
+
+	// If there are JavaScript decorators, start by allocating a context object
+	if ctx.decoratorContextRef != ast.InvalidRef {
+		base := js_ast.Expr{Loc: ctx.classLoc, Data: js_ast.ENullShared}
+		if ctx.class.ExtendsOrNil.Data != nil {
+			if ctx.extendsRef == ast.InvalidRef {
+				ctx.extendsRef = p.generateTempRef(tempRefNeedsDeclare, "")
+				ctx.class.ExtendsOrNil = js_ast.Assign(js_ast.Expr{Loc: ctx.class.ExtendsOrNil.Loc, Data: &js_ast.EIdentifier{Ref: ctx.extendsRef}}, ctx.class.ExtendsOrNil)
+				p.recordUsage(ctx.extendsRef)
+			}
+			base.Data = &js_ast.EIdentifier{Ref: ctx.extendsRef}
+		}
+		suffixExprs = append(suffixExprs, js_ast.Assign(
+			js_ast.Expr{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+			p.callRuntime(ctx.classLoc, "__decoratorStart", []js_ast.Expr{base}),
+		))
+		p.recordUsage(ctx.decoratorContextRef)
+	}
+
+	// Any of the computed property chain that we hoisted out of the class
+	// body needs to come before the class expression.
+	if ctx.computedPropertyChain.Data != nil {
+		prefixExprs = append(prefixExprs, ctx.computedPropertyChain)
+	}
+
+	// WeakSets and WeakMaps
+	suffixExprs = append(suffixExprs, ctx.privateMembers...)
+
+	// Evaluate JavaScript decorators here
+	suffixExprs = append(suffixExprs, ctx.decoratorStaticNonFieldElements...)
+	suffixExprs = append(suffixExprs, ctx.decoratorInstanceNonFieldElements...)
+	suffixExprs = append(suffixExprs, ctx.decoratorStaticFieldElements...)
+	suffixExprs = append(suffixExprs, ctx.decoratorInstanceFieldElements...)
+
+	// Lowered initializers for static methods (including getters and setters)
+	suffixExprs = append(suffixExprs, ctx.staticPrivateMethods...)
+
+	// Run JavaScript class decorators at the end of class initialization
+	if decorateClassExpr.Data != nil {
+		suffixExprs = append(suffixExprs, decorateClassExpr)
+	}
+
+	// For each element initializer of staticMethodExtraInitializers
+	if ctx.decoratorCallStaticMethodExtraInitializers {
+		suffixExprs = append(suffixExprs, p.callRuntime(ctx.classLoc, "__runInitializers", []js_ast.Expr{
+			{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+			{Loc: ctx.classLoc, Data: &js_ast.ENumber{Value: (1 << 1) | 1}},
+			ctx.nameFunc(),
+		}))
+		p.recordUsage(ctx.decoratorContextRef)
+	}
+
+	// Lowered initializers for static fields, static accessors, and static blocks
+	suffixExprs = append(suffixExprs, ctx.staticMembers...)
+
+	// The official TypeScript compiler adds generated code after the class body
+	// in this exact order. Matching this order is important for correctness.
+	suffixExprs = append(suffixExprs, ctx.instanceExperimentalDecorators...)
+	suffixExprs = append(suffixExprs, ctx.staticExperimentalDecorators...)
+
+	// For each element initializer of classExtraInitializers
+	if classDecorators.Data != nil {
+		suffixExprs = append(suffixExprs, p.callRuntime(ctx.classLoc, "__runInitializers", []js_ast.Expr{
+			{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: ctx.decoratorContextRef}},
+			{Loc: ctx.classLoc, Data: &js_ast.ENumber{Value: (0 << 1) | 1}},
+			ctx.nameFunc(),
+		}))
+		p.recordUsage(ctx.decoratorContextRef)
+	}
+
+	// Run TypeScript experimental class decorators at the end of class initialization
+	if len(classExperimentalDecorators) > 0 {
+		values := make([]js_ast.Expr, len(classExperimentalDecorators))
+		for i, decorator := range classExperimentalDecorators {
+			values[i] = decorator.Value
+		}
+		suffixExprs = append(suffixExprs, js_ast.Assign(
+			js_ast.Expr{Loc: nameForClassDecorators.Loc, Data: &js_ast.EIdentifier{Ref: nameForClassDecorators.Ref}},
+			p.callRuntime(ctx.classLoc, "__decorateClass", []js_ast.Expr{
+				{Loc: ctx.classLoc, Data: &js_ast.EArray{Items: values}},
+				{Loc: nameForClassDecorators.Loc, Data: &js_ast.EIdentifier{Ref: nameForClassDecorators.Ref}},
+			}),
+		))
+		p.recordUsage(nameForClassDecorators.Ref)
+		p.recordUsage(nameForClassDecorators.Ref)
+	}
+
+	// Our caller expects us to return the same form that was originally given to
+	// us. If the class was originally an expression, then return an expression.
+	if ctx.kind == classKindExpr {
+		// Calling "nameFunc" will replace "classExpr", so make sure to do that first
+		// before joining "classExpr" with any other expressions
+		var nameToJoin js_ast.Expr
+		if ctx.didCaptureClassExpr || len(suffixExprs) > 0 {
+			nameToJoin = ctx.nameFunc()
+		}
+
+		// Insert expressions on either side of the class as appropriate
+		ctx.classExpr = js_ast.JoinWithComma(js_ast.JoinAllWithComma(prefixExprs), ctx.classExpr)
+		ctx.classExpr = js_ast.JoinWithComma(ctx.classExpr, js_ast.JoinAllWithComma(suffixExprs))
+
+		// Finally join "classExpr" with the variable that holds the class object
+		ctx.classExpr = js_ast.JoinWithComma(ctx.classExpr, nameToJoin)
+		if ctx.wrapFunc != nil {
+			ctx.classExpr = ctx.wrapFunc(ctx.classExpr)
+		}
+		return nil, ctx.classExpr
+	}
+
+	// Otherwise, the class was originally a statement. Return an array of
+	// statements instead.
+	var stmts []js_ast.Stmt
+	var outerClassNameDecl js_ast.Stmt
+
+	// Insert expressions before the class as appropriate
+	for _, expr := range prefixExprs {
+		stmts = append(stmts, js_ast.Stmt{Loc: expr.Loc, Data: &js_ast.SExpr{Value: expr}})
+	}
+
+	// Handle converting a class statement to a class expression
+	if nameForClassDecorators.Ref != ast.InvalidRef {
+		classExpr := js_ast.EClass{Class: *ctx.class}
+		ctx.class = &classExpr.Class
+		init := js_ast.Expr{Loc: ctx.classLoc, Data: &classExpr}
+
+		// If the inner class name was referenced, then set the name of the class
+		// that we will end up printing to the inner class name. Otherwise if the
+		// inner class name was unused, we can just leave it blank.
+		if result.innerClassNameRef != ast.InvalidRef {
+			// "class Foo { x = Foo }" => "const Foo = class _Foo { x = _Foo }"
+			ctx.class.Name.Ref = result.innerClassNameRef
+		} else {
+			// "class Foo {}" => "const Foo = class {}"
+			ctx.class.Name = nil
+		}
+
+		// Generate the class initialization statement
+		if len(classExperimentalDecorators) > 0 {
+			// If there are class decorators, then we actually need to mutate the
+			// immutable "const" binding that shadows everything in the class body.
+			// The official TypeScript compiler does this by rewriting all class name
+			// references in the class body to another temporary variable. This is
+			// basically what we're doing here.
+			p.recordUsage(nameForClassDecorators.Ref)
+			stmts = append(stmts, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SLocal{
+				Kind:     p.selectLocalKind(js_ast.LocalLet),
+				IsExport: ctx.kind == classKindExportStmt,
+				Decls: []js_ast.Decl{{
+					Binding:    js_ast.Binding{Loc: nameForClassDecorators.Loc, Data: &js_ast.BIdentifier{Ref: nameForClassDecorators.Ref}},
+					ValueOrNil: init,
+				}},
+			}})
+			if ctx.class.Name != nil {
+				p.mergeSymbols(ctx.class.Name.Ref, nameForClassDecorators.Ref)
+				ctx.class.Name = nil
+			}
+		} else if hasPotentialInnerClassNameEscape {
+			// If the inner class name was used, then we explicitly generate a binding
+			// for it. That means the mutable outer class name is separate, and is
+			// initialized after all static member initializers have finished.
+			captureRef := p.newSymbol(ast.SymbolOther, p.symbols[result.innerClassNameRef.InnerIndex].OriginalName)
+			p.currentScope.Generated = append(p.currentScope.Generated, captureRef)
+			p.recordDeclaredSymbol(captureRef)
+			p.mergeSymbols(result.innerClassNameRef, captureRef)
+			kind := js_ast.LocalConst
+			if classDecorators.Data != nil {
+				// Class decorators need to be able to potentially mutate this binding
+				kind = js_ast.LocalLet
+			}
+			stmts = append(stmts, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SLocal{
+				Kind: p.selectLocalKind(kind),
+				Decls: []js_ast.Decl{{
+					Binding:    js_ast.Binding{Loc: nameForClassDecorators.Loc, Data: &js_ast.BIdentifier{Ref: captureRef}},
+					ValueOrNil: init,
+				}},
+			}})
+			p.recordUsage(nameForClassDecorators.Ref)
+			p.recordUsage(captureRef)
+			outerClassNameDecl = js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SLocal{
+				Kind:     p.selectLocalKind(js_ast.LocalLet),
+				IsExport: ctx.kind == classKindExportStmt,
+				Decls: []js_ast.Decl{{
+					Binding:    js_ast.Binding{Loc: nameForClassDecorators.Loc, Data: &js_ast.BIdentifier{Ref: nameForClassDecorators.Ref}},
+					ValueOrNil: js_ast.Expr{Loc: ctx.classLoc, Data: &js_ast.EIdentifier{Ref: captureRef}},
+				}},
+			}}
+		} else {
+			// Otherwise, the inner class name isn't needed and we can just
+			// use a single variable declaration for the outer class name.
+			p.recordUsage(nameForClassDecorators.Ref)
+			stmts = append(stmts, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SLocal{
+				Kind:     p.selectLocalKind(js_ast.LocalLet),
+				IsExport: ctx.kind == classKindExportStmt,
+				Decls: []js_ast.Decl{{
+					Binding:    js_ast.Binding{Loc: nameForClassDecorators.Loc, Data: &js_ast.BIdentifier{Ref: nameForClassDecorators.Ref}},
+					ValueOrNil: init,
+				}},
+			}})
+		}
+	} else {
+		// Generate the specific kind of class statement that was passed in to us
+		switch ctx.kind {
+		case classKindStmt:
+			stmts = append(stmts, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SClass{Class: *ctx.class}})
+		case classKindExportStmt:
+			stmts = append(stmts, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SClass{Class: *ctx.class, IsExport: true}})
+		case classKindExportDefaultStmt:
+			stmts = append(stmts, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SExportDefault{
+				DefaultName: ctx.defaultName,
+				Value:       js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SClass{Class: *ctx.class}},
+			}})
+		}
+
+		// The inner class name inside the class statement should be the same as
+		// the class statement name itself
+		if ctx.class.Name != nil && result.innerClassNameRef != ast.InvalidRef {
+			// If the class body contains a direct eval call, then the inner class
+			// name will be marked as "MustNotBeRenamed" (because we have already
+			// popped the class body scope) but the outer class name won't be marked
+			// as "MustNotBeRenamed" yet (because we haven't yet popped the containing
+			// scope). Propagate this flag now before we merge these symbols so we
+			// don't end up accidentally renaming the outer class name to the inner
+			// class name.
+			if p.currentScope.ContainsDirectEval {
+				p.symbols[ctx.class.Name.Ref.InnerIndex].Flags |= (p.symbols[result.innerClassNameRef.InnerIndex].Flags & ast.MustNotBeRenamed)
+			}
+			p.mergeSymbols(result.innerClassNameRef, ctx.class.Name.Ref)
+		}
+	}
+
+	// Insert expressions after the class as appropriate
+	for _, expr := range suffixExprs {
+		stmts = append(stmts, js_ast.Stmt{Loc: expr.Loc, Data: &js_ast.SExpr{Value: expr}})
+	}
+
+	// This must come after the class body initializers have finished
+	if outerClassNameDecl.Data != nil {
+		stmts = append(stmts, outerClassNameDecl)
+	}
+
+	if nameForClassDecorators.Ref != ast.InvalidRef && ctx.kind == classKindExportDefaultStmt {
+		// "export default class x {}" => "class x {} export {x as default}"
+		stmts = append(stmts, js_ast.Stmt{Loc: ctx.classLoc, Data: &js_ast.SExportClause{
+			Items: []js_ast.ClauseItem{{Alias: "default", Name: ctx.defaultName}},
+		}})
+	}
+	return stmts, js_ast.Expr{}
+}
+
+func cloneKeyForLowerClass(key js_ast.Expr) js_ast.Expr {
+	switch k := key.Data.(type) {
+	case *js_ast.ENumber:
+		clone := *k
+		key.Data = &clone
+	case *js_ast.EString:
+		clone := *k
+		key.Data = &clone
+	case *js_ast.EIdentifier:
+		clone := *k
+		key.Data = &clone
+	case *js_ast.ENameOfSymbol:
+		clone := *k
+		key.Data = &clone
+	case *js_ast.EPrivateIdentifier:
+		clone := *k
+		key.Data = &clone
+	default:
+		panic("Internal error")
+	}
+	return key
+}
+
+// Replace "super()" calls with our shim so that we can guarantee
+// that instance field initialization doesn't happen before "super()"
+// is called, since at that point "this" isn't available.
+func (p *parser) insertStmtsAfterSuperCall(body *js_ast.FnBody, stmtsToInsert []js_ast.Stmt, superCtorRef ast.Ref) {
+	// If this class has no base class, then there's no "super()" call to handle
+	if superCtorRef == ast.InvalidRef || p.symbols[superCtorRef.InnerIndex].UseCountEstimate == 0 {
+		body.Block.Stmts = append(stmtsToInsert, body.Block.Stmts...)
+		return
+	}
+
+	// It's likely that there's only one "super()" call, and that it's a
+	// top-level expression in the constructor function body. If so, we
+	// can generate tighter code for this common case.
+	if p.symbols[superCtorRef.InnerIndex].UseCountEstimate == 1 {
+		for i, stmt := range body.Block.Stmts {
+			var before js_ast.Expr
+			var callLoc logger.Loc
+			var callData *js_ast.ECall
+			var after js_ast.Stmt
+
+			switch s := stmt.Data.(type) {
+			case *js_ast.SExpr:
+				if b, loc, c, a := findFirstTopLevelSuperCall(s.Value, superCtorRef); c != nil {
+					before, callLoc, callData = b, loc, c
+					if a.Data != nil {
+						s.Value = a
+						after = js_ast.Stmt{Loc: a.Loc, Data: s}
+					}
+				}
+
+			case *js_ast.SReturn:
+				if s.ValueOrNil.Data != nil {
+					if b, loc, c, a := findFirstTopLevelSuperCall(s.ValueOrNil, superCtorRef); c != nil && a.Data != nil {
+						before, callLoc, callData = b, loc, c
+						s.ValueOrNil = a
+						after = js_ast.Stmt{Loc: a.Loc, Data: s}
+					}
+				}
+
+			case *js_ast.SThrow:
+				if b, loc, c, a := findFirstTopLevelSuperCall(s.Value, superCtorRef); c != nil && a.Data != nil {
+					before, callLoc, callData = b, loc, c
+					s.Value = a
+					after = js_ast.Stmt{Loc: a.Loc, Data: s}
+				}
+
+			case *js_ast.SIf:
+				if b, loc, c, a := findFirstTopLevelSuperCall(s.Test, superCtorRef); c != nil && a.Data != nil {
+					before, callLoc, callData = b, loc, c
+					s.Test = a
+					after = js_ast.Stmt{Loc: a.Loc, Data: s}
+				}
+
+			case *js_ast.SSwitch:
+				if b, loc, c, a := findFirstTopLevelSuperCall(s.Test, superCtorRef); c != nil && a.Data != nil {
+					before, callLoc, callData = b, loc, c
+					s.Test = a
+					after = js_ast.Stmt{Loc: a.Loc, Data: s}
+				}
+
+			case *js_ast.SFor:
+				if expr, ok := s.InitOrNil.Data.(*js_ast.SExpr); ok {
+					if b, loc, c, a := findFirstTopLevelSuperCall(expr.Value, superCtorRef); c != nil {
+						before, callLoc, callData = b, loc, c
+						if a.Data != nil {
+							expr.Value = a
+						} else {
+							s.InitOrNil.Data = nil
+						}
+						after = js_ast.Stmt{Loc: a.Loc, Data: s}
+					}
+				}
+			}
+
+			if callData != nil {
+				// Revert "__super()" back to "super()"
+				callData.Target.Data = js_ast.ESuperShared
+				p.ignoreUsage(superCtorRef)
+
+				// Inject "stmtsToInsert" after "super()"
+				stmtsBefore := body.Block.Stmts[:i]
+				stmtsAfter := body.Block.Stmts[i+1:]
+				stmts := append([]js_ast.Stmt{}, stmtsBefore...)
+				if before.Data != nil {
+					stmts = append(stmts, js_ast.Stmt{Loc: before.Loc, Data: &js_ast.SExpr{Value: before}})
+				}
+				stmts = append(stmts, js_ast.Stmt{Loc: callLoc, Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: callLoc, Data: callData}}})
+				stmts = append(stmts, stmtsToInsert...)
+				if after.Data != nil {
+					stmts = append(stmts, after)
+				}
+				stmts = append(stmts, stmtsAfter...)
+				body.Block.Stmts = stmts
+				return
+			}
+		}
+	}
+
+	// Otherwise, inject a generated "__super" helper function at the top of the
+	// constructor that looks like this:
+	//
+	//   var __super = (...args) => {
+	//     super(...args);
+	//     ...stmtsToInsert...
+	//     return this;
+	//   };
+	//
+	argsRef := p.newSymbol(ast.SymbolOther, "args")
+	p.currentScope.Generated = append(p.currentScope.Generated, argsRef)
+	p.recordUsage(argsRef)
+	superCall := js_ast.Expr{Loc: body.Loc, Data: &js_ast.ECall{
+		Target: js_ast.Expr{Loc: body.Loc, Data: js_ast.ESuperShared},
+		Args:   []js_ast.Expr{{Loc: body.Loc, Data: &js_ast.ESpread{Value: js_ast.Expr{Loc: body.Loc, Data: &js_ast.EIdentifier{Ref: argsRef}}}}},
+	}}
+	stmtsToInsert = append(append(
+		[]js_ast.Stmt{{Loc: body.Loc, Data: &js_ast.SExpr{Value: superCall}}},
+		stmtsToInsert...),
+		js_ast.Stmt{Loc: body.Loc, Data: &js_ast.SReturn{ValueOrNil: js_ast.Expr{Loc: body.Loc, Data: js_ast.EThisShared}}},
+	)
+	if p.options.minifySyntax {
+		stmtsToInsert = p.mangleStmts(stmtsToInsert, stmtsFnBody)
+	}
+	body.Block.Stmts = append([]js_ast.Stmt{{Loc: body.Loc, Data: &js_ast.SLocal{Decls: []js_ast.Decl{{
+		Binding: js_ast.Binding{Loc: body.Loc, Data: &js_ast.BIdentifier{Ref: superCtorRef}}, ValueOrNil: js_ast.Expr{Loc: body.Loc, Data: &js_ast.EArrow{
+			HasRestArg: true,
+			PreferExpr: true,
+			Args:       []js_ast.Arg{{Binding: js_ast.Binding{Loc: body.Loc, Data: &js_ast.BIdentifier{Ref: argsRef}}}},
+			Body:       js_ast.FnBody{Loc: body.Loc, Block: js_ast.SBlock{Stmts: stmtsToInsert}},
+		}},
+	}}}}}, body.Block.Stmts...)
+}
+
+func findFirstTopLevelSuperCall(expr js_ast.Expr, superCtorRef ast.Ref) (js_ast.Expr, logger.Loc, *js_ast.ECall, js_ast.Expr) {
+	if call, ok := expr.Data.(*js_ast.ECall); ok {
+		if target, ok := call.Target.Data.(*js_ast.EIdentifier); ok && target.Ref == superCtorRef {
+			call.Target.Data = js_ast.ESuperShared
+			return js_ast.Expr{}, expr.Loc, call, js_ast.Expr{}
+		}
+	}
+
+	// Also search down comma operator chains for a super call
+	if comma, ok := expr.Data.(*js_ast.EBinary); ok && comma.Op == js_ast.BinOpComma {
+		if before, loc, call, after := findFirstTopLevelSuperCall(comma.Left, superCtorRef); call != nil {
+			return before, loc, call, js_ast.JoinWithComma(after, comma.Right)
+		}
+
+		if before, loc, call, after := findFirstTopLevelSuperCall(comma.Right, superCtorRef); call != nil {
+			return js_ast.JoinWithComma(comma.Left, before), loc, call, after
+		}
+	}
+
+	return js_ast.Expr{}, logger.Loc{}, nil, js_ast.Expr{}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_parser/json_parser.go b/source/vendor/github.com/evanw/esbuild/internal/js_parser/json_parser.go
new file mode 100644
index 0000000..64062ca
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_parser/json_parser.go
@@ -0,0 +1,238 @@
+package js_parser
+
+import (
+	"fmt"
+
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type jsonParser struct {
+	log                            logger.Log
+	source                         logger.Source
+	tracker                        logger.LineColumnTracker
+	lexer                          js_lexer.Lexer
+	options                        JSONOptions
+	suppressWarningsAboutWeirdCode bool
+}
+
+func (p *jsonParser) parseMaybeTrailingComma(closeToken js_lexer.T) bool {
+	commaRange := p.lexer.Range()
+	p.lexer.Expect(js_lexer.TComma)
+
+	if p.lexer.Token == closeToken {
+		if p.options.Flavor == js_lexer.JSON {
+			p.log.AddError(&p.tracker, commaRange, "JSON does not support trailing commas")
+		}
+		return false
+	}
+
+	return true
+}
+
+func (p *jsonParser) parseExpr() js_ast.Expr {
+	loc := p.lexer.Loc()
+
+	switch p.lexer.Token {
+	case js_lexer.TFalse:
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: false}}
+
+	case js_lexer.TTrue:
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EBoolean{Value: true}}
+
+	case js_lexer.TNull:
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: js_ast.ENullShared}
+
+	case js_lexer.TStringLiteral:
+		value := p.lexer.StringLiteral()
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EString{Value: value}}
+
+	case js_lexer.TNumericLiteral:
+		value := p.lexer.Number
+		p.lexer.Next()
+		return js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: value}}
+
+	case js_lexer.TMinus:
+		p.lexer.Next()
+		value := p.lexer.Number
+		p.lexer.Expect(js_lexer.TNumericLiteral)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.ENumber{Value: -value}}
+
+	case js_lexer.TOpenBracket:
+		p.lexer.Next()
+		isSingleLine := !p.lexer.HasNewlineBefore
+		items := []js_ast.Expr{}
+
+		for p.lexer.Token != js_lexer.TCloseBracket {
+			if len(items) > 0 {
+				if p.lexer.HasNewlineBefore {
+					isSingleLine = false
+				}
+				if !p.parseMaybeTrailingComma(js_lexer.TCloseBracket) {
+					break
+				}
+				if p.lexer.HasNewlineBefore {
+					isSingleLine = false
+				}
+			}
+
+			item := p.parseExpr()
+			items = append(items, item)
+		}
+
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+		closeBracketLoc := p.lexer.Loc()
+		p.lexer.Expect(js_lexer.TCloseBracket)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EArray{
+			Items:           items,
+			IsSingleLine:    isSingleLine,
+			CloseBracketLoc: closeBracketLoc,
+		}}
+
+	case js_lexer.TOpenBrace:
+		p.lexer.Next()
+		isSingleLine := !p.lexer.HasNewlineBefore
+		properties := []js_ast.Property{}
+		duplicates := make(map[string]logger.Range)
+
+		for p.lexer.Token != js_lexer.TCloseBrace {
+			if len(properties) > 0 {
+				if p.lexer.HasNewlineBefore {
+					isSingleLine = false
+				}
+				if !p.parseMaybeTrailingComma(js_lexer.TCloseBrace) {
+					break
+				}
+				if p.lexer.HasNewlineBefore {
+					isSingleLine = false
+				}
+			}
+
+			keyString := p.lexer.StringLiteral()
+			keyRange := p.lexer.Range()
+			key := js_ast.Expr{Loc: keyRange.Loc, Data: &js_ast.EString{Value: keyString}}
+			p.lexer.Expect(js_lexer.TStringLiteral)
+
+			// Warn about duplicate keys
+			if !p.suppressWarningsAboutWeirdCode {
+				keyText := helpers.UTF16ToString(keyString)
+				if prevRange, ok := duplicates[keyText]; ok {
+					p.log.AddIDWithNotes(logger.MsgID_JS_DuplicateObjectKey, logger.Warning, &p.tracker, keyRange,
+						fmt.Sprintf("Duplicate key %q in object literal", keyText),
+						[]logger.MsgData{p.tracker.MsgData(prevRange, fmt.Sprintf("The original key %q is here:", keyText))})
+				} else {
+					duplicates[keyText] = keyRange
+				}
+			}
+
+			p.lexer.Expect(js_lexer.TColon)
+			value := p.parseExpr()
+
+			property := js_ast.Property{
+				Kind:       js_ast.PropertyField,
+				Loc:        keyRange.Loc,
+				Key:        key,
+				ValueOrNil: value,
+			}
+
+			// The key "__proto__" must not be a string literal in JavaScript because
+			// that actually modifies the prototype of the object. This can be
+			// avoided by using a computed property key instead of a string literal.
+			if helpers.UTF16EqualsString(keyString, "__proto__") && !p.options.UnsupportedJSFeatures.Has(compat.ObjectExtensions) {
+				property.Flags |= js_ast.PropertyIsComputed
+			}
+
+			properties = append(properties, property)
+		}
+
+		if p.lexer.HasNewlineBefore {
+			isSingleLine = false
+		}
+		closeBraceLoc := p.lexer.Loc()
+		p.lexer.Expect(js_lexer.TCloseBrace)
+		return js_ast.Expr{Loc: loc, Data: &js_ast.EObject{
+			Properties:    properties,
+			IsSingleLine:  isSingleLine,
+			CloseBraceLoc: closeBraceLoc,
+		}}
+
+	default:
+		p.lexer.Unexpected()
+		return js_ast.Expr{}
+	}
+}
+
+type JSONOptions struct {
+	UnsupportedJSFeatures compat.JSFeature
+	Flavor                js_lexer.JSONFlavor
+	ErrorSuffix           string
+}
+
+func ParseJSON(log logger.Log, source logger.Source, options JSONOptions) (result js_ast.Expr, ok bool) {
+	ok = true
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			ok = false
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	if options.ErrorSuffix == "" {
+		options.ErrorSuffix = " in JSON"
+	}
+
+	p := &jsonParser{
+		log:                            log,
+		source:                         source,
+		tracker:                        logger.MakeLineColumnTracker(&source),
+		options:                        options,
+		lexer:                          js_lexer.NewLexerJSON(log, source, options.Flavor, options.ErrorSuffix),
+		suppressWarningsAboutWeirdCode: helpers.IsInsideNodeModules(source.KeyPath.Text),
+	}
+
+	result = p.parseExpr()
+	p.lexer.Expect(js_lexer.TEndOfFile)
+	return
+}
+
+func isValidJSON(value js_ast.Expr) bool {
+	switch e := value.Data.(type) {
+	case *js_ast.ENull, *js_ast.EBoolean, *js_ast.EString, *js_ast.ENumber:
+		return true
+
+	case *js_ast.EArray:
+		for _, item := range e.Items {
+			if !isValidJSON(item) {
+				return false
+			}
+		}
+		return true
+
+	case *js_ast.EObject:
+		for _, property := range e.Properties {
+			if property.Kind != js_ast.PropertyField || property.Flags.Has(js_ast.PropertyIsComputed) {
+				return false
+			}
+			if _, ok := property.Key.Data.(*js_ast.EString); !ok {
+				return false
+			}
+			if !isValidJSON(property.ValueOrNil) {
+				return false
+			}
+		}
+		return true
+	}
+
+	return false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_parser/sourcemap_parser.go b/source/vendor/github.com/evanw/esbuild/internal/js_parser/sourcemap_parser.go
new file mode 100644
index 0000000..c83d767
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_parser/sourcemap_parser.go
@@ -0,0 +1,277 @@
+package js_parser
+
+import (
+	"fmt"
+	"sort"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/sourcemap"
+)
+
+// Specification: https://sourcemaps.info/spec.html
+func ParseSourceMap(log logger.Log, source logger.Source) *sourcemap.SourceMap {
+	expr, ok := ParseJSON(log, source, JSONOptions{ErrorSuffix: " in source map"})
+	if !ok {
+		return nil
+	}
+
+	obj, ok := expr.Data.(*js_ast.EObject)
+	tracker := logger.MakeLineColumnTracker(&source)
+	if !ok {
+		log.AddError(&tracker, logger.Range{Loc: expr.Loc}, "Invalid source map")
+		return nil
+	}
+
+	var sources []string
+	var sourcesContent []sourcemap.SourceContent
+	var names []string
+	var mappingsRaw []uint16
+	var mappingsStart int32
+	hasVersion := false
+
+	for _, prop := range obj.Properties {
+		keyRange := source.RangeOfString(prop.Key.Loc)
+
+		switch helpers.UTF16ToString(prop.Key.Data.(*js_ast.EString).Value) {
+		case "sections":
+			log.AddID(logger.MsgID_SourceMap_SectionsInSourceMap, logger.Warning, &tracker, keyRange, "Source maps with \"sections\" are not supported")
+			return nil
+
+		case "version":
+			if value, ok := prop.ValueOrNil.Data.(*js_ast.ENumber); ok && value.Value == 3 {
+				hasVersion = true
+			}
+
+		case "mappings":
+			if value, ok := prop.ValueOrNil.Data.(*js_ast.EString); ok {
+				mappingsRaw = value.Value
+				mappingsStart = prop.ValueOrNil.Loc.Start + 1
+			}
+
+		case "sources":
+			if value, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
+				sources = []string{}
+				for _, item := range value.Items {
+					if element, ok := item.Data.(*js_ast.EString); ok {
+						sources = append(sources, helpers.UTF16ToString(element.Value))
+					} else {
+						sources = append(sources, "")
+					}
+				}
+			}
+
+		case "sourcesContent":
+			if value, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
+				sourcesContent = []sourcemap.SourceContent{}
+				for _, item := range value.Items {
+					if element, ok := item.Data.(*js_ast.EString); ok {
+						sourcesContent = append(sourcesContent, sourcemap.SourceContent{
+							Value:  element.Value,
+							Quoted: source.TextForRange(source.RangeOfString(item.Loc)),
+						})
+					} else {
+						sourcesContent = append(sourcesContent, sourcemap.SourceContent{})
+					}
+				}
+			}
+
+		case "names":
+			if value, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
+				names = []string{}
+				for _, item := range value.Items {
+					if element, ok := item.Data.(*js_ast.EString); ok {
+						names = append(names, helpers.UTF16ToString(element.Value))
+					} else {
+						names = append(names, "")
+					}
+				}
+			}
+		}
+	}
+
+	// Silently fail if the version was missing or incorrect
+	if !hasVersion {
+		return nil
+	}
+
+	// Silently fail if the source map is pointless (i.e. empty)
+	if len(sources) == 0 || len(mappingsRaw) == 0 {
+		return nil
+	}
+
+	var mappings mappingArray
+	mappingsLen := len(mappingsRaw)
+	sourcesLen := len(sources)
+	namesLen := len(names)
+	var generatedLine int32
+	var generatedColumn int32
+	var sourceIndex int32
+	var originalLine int32
+	var originalColumn int32
+	var originalName int32
+	current := 0
+	errorText := ""
+	errorLen := 0
+	needSort := false
+
+	// Parse the mappings
+	for current < mappingsLen {
+		// Handle a line break
+		if mappingsRaw[current] == ';' {
+			generatedLine++
+			generatedColumn = 0
+			current++
+			continue
+		}
+
+		// Read the generated column
+		generatedColumnDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:])
+		if !ok {
+			errorText = "Missing generated column"
+			errorLen = i
+			break
+		}
+		if generatedColumnDelta < 0 {
+			// This would mess up binary search
+			needSort = true
+		}
+		generatedColumn += generatedColumnDelta
+		if generatedColumn < 0 {
+			errorText = fmt.Sprintf("Invalid generated column value: %d", generatedColumn)
+			errorLen = i
+			break
+		}
+		current += i
+
+		// According to the specification, it's valid for a mapping to have 1,
+		// 4, or 5 variable-length fields. Having one field means there's no
+		// original location information, which is pretty useless. Just ignore
+		// those entries.
+		if current == mappingsLen {
+			break
+		}
+		switch mappingsRaw[current] {
+		case ',':
+			current++
+			continue
+		case ';':
+			continue
+		}
+
+		// Read the original source
+		sourceIndexDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:])
+		if !ok {
+			errorText = "Missing source index"
+			errorLen = i
+			break
+		}
+		sourceIndex += sourceIndexDelta
+		if sourceIndex < 0 || sourceIndex >= int32(sourcesLen) {
+			errorText = fmt.Sprintf("Invalid source index value: %d", sourceIndex)
+			errorLen = i
+			break
+		}
+		current += i
+
+		// Read the original line
+		originalLineDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:])
+		if !ok {
+			errorText = "Missing original line"
+			errorLen = i
+			break
+		}
+		originalLine += originalLineDelta
+		if originalLine < 0 {
+			errorText = fmt.Sprintf("Invalid original line value: %d", originalLine)
+			errorLen = i
+			break
+		}
+		current += i
+
+		// Read the original column
+		originalColumnDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:])
+		if !ok {
+			errorText = "Missing original column"
+			errorLen = i
+			break
+		}
+		originalColumn += originalColumnDelta
+		if originalColumn < 0 {
+			errorText = fmt.Sprintf("Invalid original column value: %d", originalColumn)
+			errorLen = i
+			break
+		}
+		current += i
+
+		// Read the original name
+		var optionalName ast.Index32
+		if originalNameDelta, i, ok := sourcemap.DecodeVLQUTF16(mappingsRaw[current:]); ok {
+			originalName += originalNameDelta
+			if originalName < 0 || originalName >= int32(namesLen) {
+				errorText = fmt.Sprintf("Invalid name index value: %d", originalName)
+				errorLen = i
+				break
+			}
+			optionalName = ast.MakeIndex32(uint32(originalName))
+			current += i
+		}
+
+		// Handle the next character
+		if current < mappingsLen {
+			if c := mappingsRaw[current]; c == ',' {
+				current++
+			} else if c != ';' {
+				errorText = fmt.Sprintf("Invalid character after mapping: %q",
+					helpers.UTF16ToString(mappingsRaw[current:current+1]))
+				errorLen = 1
+				break
+			}
+		}
+
+		mappings = append(mappings, sourcemap.Mapping{
+			GeneratedLine:   generatedLine,
+			GeneratedColumn: generatedColumn,
+			SourceIndex:     sourceIndex,
+			OriginalLine:    originalLine,
+			OriginalColumn:  originalColumn,
+			OriginalName:    optionalName,
+		})
+	}
+
+	if errorText != "" {
+		r := logger.Range{Loc: logger.Loc{Start: mappingsStart + int32(current)}, Len: int32(errorLen)}
+		log.AddID(logger.MsgID_SourceMap_InvalidSourceMappings, logger.Warning, &tracker, r,
+			fmt.Sprintf("Bad \"mappings\" data in source map at character %d: %s", current, errorText))
+		return nil
+	}
+
+	if needSort {
+		// If we get here, some mappings are out of order. Lines can't be out of
+		// order by construction but columns can. This is a pretty rare situation
+		// because almost all source map generators always write out mappings in
+		// order as they write the output instead of scrambling the order.
+		sort.Stable(mappings)
+	}
+
+	return &sourcemap.SourceMap{
+		Sources:        sources,
+		SourcesContent: sourcesContent,
+		Mappings:       mappings,
+		Names:          names,
+	}
+}
+
+// This type is just so we can use Go's native sort function
+type mappingArray []sourcemap.Mapping
+
+func (a mappingArray) Len() int          { return len(a) }
+func (a mappingArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a mappingArray) Less(i int, j int) bool {
+	ai := a[i]
+	aj := a[j]
+	return ai.GeneratedLine < aj.GeneratedLine || (ai.GeneratedLine == aj.GeneratedLine && ai.GeneratedColumn <= aj.GeneratedColumn)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_parser/ts_parser.go b/source/vendor/github.com/evanw/esbuild/internal/js_parser/ts_parser.go
new file mode 100644
index 0000000..6338fa0
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_parser/ts_parser.go
@@ -0,0 +1,1999 @@
+// This file contains code for parsing TypeScript syntax. The parser just skips
+// over type expressions as if they are whitespace and doesn't bother generating
+// an AST because nothing uses type information.
+
+package js_parser
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_lexer"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+func (p *parser) skipTypeScriptBinding() {
+	switch p.lexer.Token {
+	case js_lexer.TIdentifier, js_lexer.TThis:
+		p.lexer.Next()
+
+	case js_lexer.TOpenBracket:
+		p.lexer.Next()
+
+		// "[, , a]"
+		for p.lexer.Token == js_lexer.TComma {
+			p.lexer.Next()
+		}
+
+		// "[a, b]"
+		for p.lexer.Token != js_lexer.TCloseBracket {
+			// "[...a]"
+			if p.lexer.Token == js_lexer.TDotDotDot {
+				p.lexer.Next()
+			}
+
+			p.skipTypeScriptBinding()
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			p.lexer.Next()
+		}
+
+		p.lexer.Expect(js_lexer.TCloseBracket)
+
+	case js_lexer.TOpenBrace:
+		p.lexer.Next()
+
+		for p.lexer.Token != js_lexer.TCloseBrace {
+			foundIdentifier := false
+
+			switch p.lexer.Token {
+			case js_lexer.TDotDotDot:
+				p.lexer.Next()
+
+				if p.lexer.Token != js_lexer.TIdentifier {
+					p.lexer.Unexpected()
+				}
+
+				// "{...x}"
+				foundIdentifier = true
+				p.lexer.Next()
+
+			case js_lexer.TIdentifier:
+				// "{x}"
+				// "{x: y}"
+				foundIdentifier = true
+				p.lexer.Next()
+
+				// "{1: y}"
+				// "{'x': y}"
+			case js_lexer.TStringLiteral, js_lexer.TNumericLiteral:
+				p.lexer.Next()
+
+			default:
+				if p.lexer.IsIdentifierOrKeyword() {
+					// "{if: x}"
+					p.lexer.Next()
+				} else {
+					p.lexer.Unexpected()
+				}
+			}
+
+			if p.lexer.Token == js_lexer.TColon || !foundIdentifier {
+				p.lexer.Expect(js_lexer.TColon)
+				p.skipTypeScriptBinding()
+			}
+
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			p.lexer.Next()
+		}
+
+		p.lexer.Expect(js_lexer.TCloseBrace)
+
+	default:
+		p.lexer.Unexpected()
+	}
+}
+
+func (p *parser) skipTypeScriptFnArgs() {
+	p.lexer.Expect(js_lexer.TOpenParen)
+
+	for p.lexer.Token != js_lexer.TCloseParen {
+		// "(...a)"
+		if p.lexer.Token == js_lexer.TDotDotDot {
+			p.lexer.Next()
+		}
+
+		p.skipTypeScriptBinding()
+
+		// "(a?)"
+		if p.lexer.Token == js_lexer.TQuestion {
+			p.lexer.Next()
+		}
+
+		// "(a: any)"
+		if p.lexer.Token == js_lexer.TColon {
+			p.lexer.Next()
+			p.skipTypeScriptType(js_ast.LLowest)
+		}
+
+		// "(a, b)"
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+		p.lexer.Next()
+	}
+
+	p.lexer.Expect(js_lexer.TCloseParen)
+}
+
+// This is a spot where the TypeScript grammar is highly ambiguous. Here are
+// some cases that are valid:
+//
+//	let x = (y: any): (() => {}) => { };
+//	let x = (y: any): () => {} => { };
+//	let x = (y: any): (y) => {} => { };
+//	let x = (y: any): (y[]) => {};
+//	let x = (y: any): (a | b) => {};
+//
+// Here are some cases that aren't valid:
+//
+//	let x = (y: any): (y) => {};
+//	let x = (y: any): (y) => {return 0};
+//	let x = (y: any): asserts y is (y) => {};
+func (p *parser) skipTypeScriptParenOrFnType() {
+	if p.trySkipTypeScriptArrowArgsWithBacktracking() {
+		p.skipTypeScriptReturnType()
+	} else {
+		p.lexer.Expect(js_lexer.TOpenParen)
+		p.skipTypeScriptType(js_ast.LLowest)
+		p.lexer.Expect(js_lexer.TCloseParen)
+	}
+}
+
+func (p *parser) skipTypeScriptReturnType() {
+	p.skipTypeScriptTypeWithFlags(js_ast.LLowest, isReturnTypeFlag)
+}
+
+func (p *parser) skipTypeScriptType(level js_ast.L) {
+	p.skipTypeScriptTypeWithFlags(level, 0)
+}
+
+type skipTypeFlags uint8
+
+const (
+	isReturnTypeFlag skipTypeFlags = 1 << iota
+	isIndexSignatureFlag
+	allowTupleLabelsFlag
+	disallowConditionalTypesFlag
+)
+
+func (flags skipTypeFlags) has(flag skipTypeFlags) bool {
+	return (flags & flag) != 0
+}
+
+type tsTypeIdentifierKind uint8
+
+const (
+	tsTypeIdentifierNormal tsTypeIdentifierKind = iota
+	tsTypeIdentifierUnique
+	tsTypeIdentifierAbstract
+	tsTypeIdentifierAsserts
+	tsTypeIdentifierPrefix
+	tsTypeIdentifierPrimitive
+	tsTypeIdentifierInfer
+)
+
+// Use a map to improve lookup speed
+var tsTypeIdentifierMap = map[string]tsTypeIdentifierKind{
+	"unique":   tsTypeIdentifierUnique,
+	"abstract": tsTypeIdentifierAbstract,
+	"asserts":  tsTypeIdentifierAsserts,
+
+	"keyof":    tsTypeIdentifierPrefix,
+	"readonly": tsTypeIdentifierPrefix,
+
+	"any":       tsTypeIdentifierPrimitive,
+	"never":     tsTypeIdentifierPrimitive,
+	"unknown":   tsTypeIdentifierPrimitive,
+	"undefined": tsTypeIdentifierPrimitive,
+	"object":    tsTypeIdentifierPrimitive,
+	"number":    tsTypeIdentifierPrimitive,
+	"string":    tsTypeIdentifierPrimitive,
+	"boolean":   tsTypeIdentifierPrimitive,
+	"bigint":    tsTypeIdentifierPrimitive,
+	"symbol":    tsTypeIdentifierPrimitive,
+
+	"infer": tsTypeIdentifierInfer,
+}
+
+func (p *parser) skipTypeScriptTypeWithFlags(level js_ast.L, flags skipTypeFlags) {
+loop:
+	for {
+		switch p.lexer.Token {
+		case js_lexer.TNumericLiteral, js_lexer.TBigIntegerLiteral, js_lexer.TStringLiteral,
+			js_lexer.TNoSubstitutionTemplateLiteral, js_lexer.TTrue, js_lexer.TFalse,
+			js_lexer.TNull, js_lexer.TVoid:
+			p.lexer.Next()
+
+		case js_lexer.TConst:
+			r := p.lexer.Range()
+			p.lexer.Next()
+
+			// "[const: number]"
+			if flags.has(allowTupleLabelsFlag) && p.lexer.Token == js_lexer.TColon {
+				p.log.AddError(&p.tracker, r, "Unexpected \"const\"")
+			}
+
+		case js_lexer.TThis:
+			p.lexer.Next()
+
+			// "function check(): this is boolean"
+			if p.lexer.IsContextualKeyword("is") && !p.lexer.HasNewlineBefore {
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+				return
+			}
+
+		case js_lexer.TMinus:
+			// "-123"
+			// "-123n"
+			p.lexer.Next()
+			if p.lexer.Token == js_lexer.TBigIntegerLiteral {
+				p.lexer.Next()
+			} else {
+				p.lexer.Expect(js_lexer.TNumericLiteral)
+			}
+
+		case js_lexer.TAmpersand:
+		case js_lexer.TBar:
+			// Support things like "type Foo = | A | B" and "type Foo = & A & B"
+			p.lexer.Next()
+			continue
+
+		case js_lexer.TImport:
+			// "import('fs')"
+			p.lexer.Next()
+
+			// "[import: number]"
+			if flags.has(allowTupleLabelsFlag) && p.lexer.Token == js_lexer.TColon {
+				return
+			}
+
+			p.lexer.Expect(js_lexer.TOpenParen)
+			p.lexer.Expect(js_lexer.TStringLiteral)
+
+			// "import('./foo.json', { assert: { type: 'json' } })"
+			if p.lexer.Token == js_lexer.TComma {
+				p.lexer.Next()
+				p.skipTypeScriptObjectType()
+
+				// "import('./foo.json', { assert: { type: 'json' } }, )"
+				if p.lexer.Token == js_lexer.TComma {
+					p.lexer.Next()
+				}
+			}
+
+			p.lexer.Expect(js_lexer.TCloseParen)
+
+		case js_lexer.TNew:
+			// "new () => Foo"
+			// "new <T>() => Foo<T>"
+			p.lexer.Next()
+
+			// "[new: number]"
+			if flags.has(allowTupleLabelsFlag) && p.lexer.Token == js_lexer.TColon {
+				return
+			}
+
+			p.skipTypeScriptTypeParameters(allowConstModifier)
+			p.skipTypeScriptParenOrFnType()
+
+		case js_lexer.TLessThan:
+			// "<T>() => Foo<T>"
+			p.skipTypeScriptTypeParameters(allowConstModifier)
+			p.skipTypeScriptParenOrFnType()
+
+		case js_lexer.TOpenParen:
+			// "(number | string)"
+			p.skipTypeScriptParenOrFnType()
+
+		case js_lexer.TIdentifier:
+			kind := tsTypeIdentifierMap[p.lexer.Identifier.String]
+			checkTypeParameters := true
+
+			switch kind {
+			case tsTypeIdentifierPrefix:
+				p.lexer.Next()
+
+				// Valid:
+				//   "[keyof: string]"
+				//   "{[keyof: string]: number}"
+				//   "{[keyof in string]: number}"
+				//
+				// Invalid:
+				//   "A extends B ? keyof : string"
+				//
+				if (p.lexer.Token != js_lexer.TColon && p.lexer.Token != js_lexer.TIn) || (!flags.has(isIndexSignatureFlag) && !flags.has(allowTupleLabelsFlag)) {
+					p.skipTypeScriptType(js_ast.LPrefix)
+				}
+				break loop
+
+			case tsTypeIdentifierInfer:
+				p.lexer.Next()
+
+				// "type Foo = Bar extends [infer T] ? T : null"
+				// "type Foo = Bar extends [infer T extends string] ? T : null"
+				// "type Foo = Bar extends [infer T extends string ? infer T : never] ? T : null"
+				// "type Foo = { [infer in Bar]: number }"
+				if (p.lexer.Token != js_lexer.TColon && p.lexer.Token != js_lexer.TIn) || (!flags.has(isIndexSignatureFlag) && !flags.has(allowTupleLabelsFlag)) {
+					p.lexer.Expect(js_lexer.TIdentifier)
+					if p.lexer.Token == js_lexer.TExtends {
+						p.trySkipTypeScriptConstraintOfInferTypeWithBacktracking(flags)
+					}
+				}
+				break loop
+
+			case tsTypeIdentifierUnique:
+				p.lexer.Next()
+
+				// "let foo: unique symbol"
+				if p.lexer.IsContextualKeyword("symbol") {
+					p.lexer.Next()
+					break loop
+				}
+
+			case tsTypeIdentifierAbstract:
+				p.lexer.Next()
+
+				// "let foo: abstract new () => {}" added in TypeScript 4.2
+				if p.lexer.Token == js_lexer.TNew {
+					continue
+				}
+
+			case tsTypeIdentifierAsserts:
+				p.lexer.Next()
+
+				// "function assert(x: boolean): asserts x"
+				// "function assert(x: boolean): asserts x is boolean"
+				if flags.has(isReturnTypeFlag) && !p.lexer.HasNewlineBefore && (p.lexer.Token == js_lexer.TIdentifier || p.lexer.Token == js_lexer.TThis) {
+					p.lexer.Next()
+				}
+
+			case tsTypeIdentifierPrimitive:
+				p.lexer.Next()
+				checkTypeParameters = false
+
+			default:
+				p.lexer.Next()
+			}
+
+			// "function assert(x: any): x is boolean"
+			if p.lexer.IsContextualKeyword("is") && !p.lexer.HasNewlineBefore {
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+				return
+			}
+
+			// "let foo: any \n <number>foo" must not become a single type
+			if checkTypeParameters && !p.lexer.HasNewlineBefore {
+				p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{})
+			}
+
+		case js_lexer.TTypeof:
+			p.lexer.Next()
+
+			// "[typeof: number]"
+			if flags.has(allowTupleLabelsFlag) && p.lexer.Token == js_lexer.TColon {
+				return
+			}
+
+			if p.lexer.Token == js_lexer.TImport {
+				// "typeof import('fs')"
+				continue
+			} else {
+				// "typeof x"
+				if !p.lexer.IsIdentifierOrKeyword() {
+					p.lexer.Expected(js_lexer.TIdentifier)
+				}
+				p.lexer.Next()
+
+				// "typeof x.y"
+				// "typeof x.#y"
+				for p.lexer.Token == js_lexer.TDot {
+					p.lexer.Next()
+					if !p.lexer.IsIdentifierOrKeyword() && p.lexer.Token != js_lexer.TPrivateIdentifier {
+						p.lexer.Expected(js_lexer.TIdentifier)
+					}
+					p.lexer.Next()
+				}
+
+				if !p.lexer.HasNewlineBefore {
+					p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{})
+				}
+			}
+
+		case js_lexer.TOpenBracket:
+			// "[number, string]"
+			// "[first: number, second: string]"
+			p.lexer.Next()
+			for p.lexer.Token != js_lexer.TCloseBracket {
+				if p.lexer.Token == js_lexer.TDotDotDot {
+					p.lexer.Next()
+				}
+				p.skipTypeScriptTypeWithFlags(js_ast.LLowest, allowTupleLabelsFlag)
+				if p.lexer.Token == js_lexer.TQuestion {
+					p.lexer.Next()
+				}
+				if p.lexer.Token == js_lexer.TColon {
+					p.lexer.Next()
+					p.skipTypeScriptType(js_ast.LLowest)
+				}
+				if p.lexer.Token != js_lexer.TComma {
+					break
+				}
+				p.lexer.Next()
+			}
+			p.lexer.Expect(js_lexer.TCloseBracket)
+
+		case js_lexer.TOpenBrace:
+			p.skipTypeScriptObjectType()
+
+		case js_lexer.TTemplateHead:
+			// "`${'a' | 'b'}-${'c' | 'd'}`"
+			for {
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+				p.lexer.RescanCloseBraceAsTemplateToken()
+				if p.lexer.Token == js_lexer.TTemplateTail {
+					p.lexer.Next()
+					break
+				}
+			}
+
+		default:
+			// "[function: number]"
+			if flags.has(allowTupleLabelsFlag) && p.lexer.IsIdentifierOrKeyword() {
+				if p.lexer.Token != js_lexer.TFunction {
+					p.log.AddError(&p.tracker, p.lexer.Range(), fmt.Sprintf("Unexpected %q", p.lexer.Raw()))
+				}
+				p.lexer.Next()
+				if p.lexer.Token != js_lexer.TColon {
+					p.lexer.Expect(js_lexer.TColon)
+				}
+				return
+			}
+
+			p.lexer.Unexpected()
+		}
+		break
+	}
+
+	for {
+		switch p.lexer.Token {
+		case js_lexer.TBar:
+			if level >= js_ast.LBitwiseOr {
+				return
+			}
+			p.lexer.Next()
+			p.skipTypeScriptTypeWithFlags(js_ast.LBitwiseOr, flags)
+
+		case js_lexer.TAmpersand:
+			if level >= js_ast.LBitwiseAnd {
+				return
+			}
+			p.lexer.Next()
+			p.skipTypeScriptTypeWithFlags(js_ast.LBitwiseAnd, flags)
+
+		case js_lexer.TExclamation:
+			// A postfix "!" is allowed in JSDoc types in TypeScript, which are only
+			// present in comments. While it's not valid in a non-comment position,
+			// it's still parsed and turned into a soft error by the TypeScript
+			// compiler. It turns out parsing this is important for correctness for
+			// "as" casts because the "!" token must still be consumed.
+			if p.lexer.HasNewlineBefore {
+				return
+			}
+			p.lexer.Next()
+
+		case js_lexer.TDot:
+			p.lexer.Next()
+			if !p.lexer.IsIdentifierOrKeyword() {
+				p.lexer.Expect(js_lexer.TIdentifier)
+			}
+			p.lexer.Next()
+
+			// "{ <A extends B>(): c.d \n <E extends F>(): g.h }" must not become a single type
+			if !p.lexer.HasNewlineBefore {
+				p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{})
+			}
+
+		case js_lexer.TOpenBracket:
+			// "{ ['x']: string \n ['y']: string }" must not become a single type
+			if p.lexer.HasNewlineBefore {
+				return
+			}
+			p.lexer.Next()
+			if p.lexer.Token != js_lexer.TCloseBracket {
+				p.skipTypeScriptType(js_ast.LLowest)
+			}
+			p.lexer.Expect(js_lexer.TCloseBracket)
+
+		case js_lexer.TExtends:
+			// "{ x: number \n extends: boolean }" must not become a single type
+			if p.lexer.HasNewlineBefore || flags.has(disallowConditionalTypesFlag) {
+				return
+			}
+			p.lexer.Next()
+
+			// The type following "extends" is not permitted to be another conditional type
+			p.skipTypeScriptTypeWithFlags(js_ast.LLowest, disallowConditionalTypesFlag)
+			p.lexer.Expect(js_lexer.TQuestion)
+			p.skipTypeScriptType(js_ast.LLowest)
+			p.lexer.Expect(js_lexer.TColon)
+			p.skipTypeScriptType(js_ast.LLowest)
+
+		default:
+			return
+		}
+	}
+}
+
+func (p *parser) skipTypeScriptObjectType() {
+	p.lexer.Expect(js_lexer.TOpenBrace)
+
+	for p.lexer.Token != js_lexer.TCloseBrace {
+		// "{ -readonly [K in keyof T]: T[K] }"
+		// "{ +readonly [K in keyof T]: T[K] }"
+		if p.lexer.Token == js_lexer.TPlus || p.lexer.Token == js_lexer.TMinus {
+			p.lexer.Next()
+		}
+
+		// Skip over modifiers and the property identifier
+		foundKey := false
+		for p.lexer.IsIdentifierOrKeyword() ||
+			p.lexer.Token == js_lexer.TStringLiteral ||
+			p.lexer.Token == js_lexer.TNumericLiteral {
+			p.lexer.Next()
+			foundKey = true
+		}
+
+		if p.lexer.Token == js_lexer.TOpenBracket {
+			// Index signature or computed property
+			p.lexer.Next()
+			p.skipTypeScriptTypeWithFlags(js_ast.LLowest, isIndexSignatureFlag)
+
+			// "{ [key: string]: number }"
+			// "{ readonly [K in keyof T]: T[K] }"
+			if p.lexer.Token == js_lexer.TColon {
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+			} else if p.lexer.Token == js_lexer.TIn {
+				p.lexer.Next()
+				p.skipTypeScriptType(js_ast.LLowest)
+				if p.lexer.IsContextualKeyword("as") {
+					// "{ [K in keyof T as `get-${K}`]: T[K] }"
+					p.lexer.Next()
+					p.skipTypeScriptType(js_ast.LLowest)
+				}
+			}
+
+			p.lexer.Expect(js_lexer.TCloseBracket)
+
+			// "{ [K in keyof T]+?: T[K] }"
+			// "{ [K in keyof T]-?: T[K] }"
+			if p.lexer.Token == js_lexer.TPlus || p.lexer.Token == js_lexer.TMinus {
+				p.lexer.Next()
+			}
+
+			foundKey = true
+		}
+
+		// "?" indicates an optional property
+		// "!" indicates an initialization assertion
+		if foundKey && (p.lexer.Token == js_lexer.TQuestion || p.lexer.Token == js_lexer.TExclamation) {
+			p.lexer.Next()
+		}
+
+		// Type parameters come right after the optional mark
+		p.skipTypeScriptTypeParameters(allowConstModifier)
+
+		switch p.lexer.Token {
+		case js_lexer.TColon:
+			// Regular property
+			if !foundKey {
+				p.lexer.Expect(js_lexer.TIdentifier)
+			}
+			p.lexer.Next()
+			p.skipTypeScriptType(js_ast.LLowest)
+
+		case js_lexer.TOpenParen:
+			// Method signature
+			p.skipTypeScriptFnArgs()
+			if p.lexer.Token == js_lexer.TColon {
+				p.lexer.Next()
+				p.skipTypeScriptReturnType()
+			}
+
+		default:
+			if !foundKey {
+				p.lexer.Unexpected()
+			}
+		}
+
+		switch p.lexer.Token {
+		case js_lexer.TCloseBrace:
+
+		case js_lexer.TComma, js_lexer.TSemicolon:
+			p.lexer.Next()
+
+		default:
+			if !p.lexer.HasNewlineBefore {
+				p.lexer.Unexpected()
+			}
+		}
+	}
+
+	p.lexer.Expect(js_lexer.TCloseBrace)
+}
+
+type typeParameterFlags uint8
+
+const (
+	// TypeScript 4.7
+	allowInOutVarianceAnnotations typeParameterFlags = 1 << iota
+
+	// TypeScript 5.0
+	allowConstModifier
+
+	// Allow "<>" without any type parameters
+	allowEmptyTypeParameters
+)
+
+type skipTypeScriptTypeParametersResult uint8
+
+const (
+	didNotSkipAnything skipTypeScriptTypeParametersResult = iota
+	couldBeTypeCast
+	definitelyTypeParameters
+)
+
+// This is the type parameter declarations that go with other symbol
+// declarations (class, function, type, etc.)
+func (p *parser) skipTypeScriptTypeParameters(flags typeParameterFlags) skipTypeScriptTypeParametersResult {
+	if p.lexer.Token != js_lexer.TLessThan {
+		return didNotSkipAnything
+	}
+
+	p.lexer.Next()
+	result := couldBeTypeCast
+
+	if (flags&allowEmptyTypeParameters) != 0 && p.lexer.Token == js_lexer.TGreaterThan {
+		p.lexer.Next()
+		return definitelyTypeParameters
+	}
+
+	for {
+		hasIn := false
+		hasOut := false
+		expectIdentifier := true
+		invalidModifierRange := logger.Range{}
+
+		// Scan over a sequence of "in" and "out" modifiers (a.k.a. optional
+		// variance annotations) as well as "const" modifiers
+		for {
+			if p.lexer.Token == js_lexer.TConst {
+				if invalidModifierRange.Len == 0 && (flags&allowConstModifier) == 0 {
+					// Valid:
+					//   "class Foo<const T> {}"
+					// Invalid:
+					//   "interface Foo<const T> {}"
+					invalidModifierRange = p.lexer.Range()
+				}
+				result = definitelyTypeParameters
+				p.lexer.Next()
+				expectIdentifier = true
+				continue
+			}
+
+			if p.lexer.Token == js_lexer.TIn {
+				if invalidModifierRange.Len == 0 && ((flags&allowInOutVarianceAnnotations) == 0 || hasIn || hasOut) {
+					// Valid:
+					//   "type Foo<in T> = T"
+					// Invalid:
+					//   "type Foo<in in T> = T"
+					//   "type Foo<out in T> = T"
+					invalidModifierRange = p.lexer.Range()
+				}
+				p.lexer.Next()
+				hasIn = true
+				expectIdentifier = true
+				continue
+			}
+
+			if p.lexer.IsContextualKeyword("out") {
+				r := p.lexer.Range()
+				if invalidModifierRange.Len == 0 && (flags&allowInOutVarianceAnnotations) == 0 {
+					invalidModifierRange = r
+				}
+				p.lexer.Next()
+				if invalidModifierRange.Len == 0 && hasOut && (p.lexer.Token == js_lexer.TIn || p.lexer.Token == js_lexer.TIdentifier) {
+					// Valid:
+					//   "type Foo<out T> = T"
+					//   "type Foo<out out> = T"
+					//   "type Foo<out out, T> = T"
+					//   "type Foo<out out = T> = T"
+					//   "type Foo<out out extends T> = T"
+					// Invalid:
+					//   "type Foo<out out in T> = T"
+					//   "type Foo<out out T> = T"
+					invalidModifierRange = r
+				}
+				hasOut = true
+				expectIdentifier = false
+				continue
+			}
+
+			break
+		}
+
+		// Only report an error for the first invalid modifier
+		if invalidModifierRange.Len > 0 {
+			p.log.AddError(&p.tracker, invalidModifierRange, fmt.Sprintf(
+				"The modifier %q is not valid here:", p.source.TextForRange(invalidModifierRange)))
+		}
+
+		// expectIdentifier => Mandatory identifier (e.g. after "type Foo <in ___")
+		// !expectIdentifier => Optional identifier (e.g. after "type Foo <out ___" since "out" may be the identifier)
+		if expectIdentifier || p.lexer.Token == js_lexer.TIdentifier {
+			p.lexer.Expect(js_lexer.TIdentifier)
+		}
+
+		// "class Foo<T extends number> {}"
+		if p.lexer.Token == js_lexer.TExtends {
+			result = definitelyTypeParameters
+			p.lexer.Next()
+			p.skipTypeScriptType(js_ast.LLowest)
+		}
+
+		// "class Foo<T = void> {}"
+		if p.lexer.Token == js_lexer.TEquals {
+			result = definitelyTypeParameters
+			p.lexer.Next()
+			p.skipTypeScriptType(js_ast.LLowest)
+		}
+
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+		p.lexer.Next()
+		if p.lexer.Token == js_lexer.TGreaterThan {
+			result = definitelyTypeParameters
+			break
+		}
+	}
+
+	p.lexer.ExpectGreaterThan(false /* isInsideJSXElement */)
+	return result
+}
+
+type skipTypeScriptTypeArgumentsOpts struct {
+	isInsideJSXElement               bool
+	isParseTypeArgumentsInExpression bool
+}
+
+func (p *parser) skipTypeScriptTypeArguments(opts skipTypeScriptTypeArgumentsOpts) bool {
+	switch p.lexer.Token {
+	case js_lexer.TLessThan, js_lexer.TLessThanEquals,
+		js_lexer.TLessThanLessThan, js_lexer.TLessThanLessThanEquals:
+	default:
+		return false
+	}
+
+	p.lexer.ExpectLessThan(false /* isInsideJSXElement */)
+
+	for {
+		p.skipTypeScriptType(js_ast.LLowest)
+		if p.lexer.Token != js_lexer.TComma {
+			break
+		}
+		p.lexer.Next()
+	}
+
+	// This type argument list must end with a ">"
+	if !opts.isParseTypeArgumentsInExpression {
+		// Normally TypeScript allows any token starting with ">". For example,
+		// "Array<Array<number>>()" is a type argument list even though there's a
+		// ">>" token, because ">>" starts with ">".
+		p.lexer.ExpectGreaterThan(opts.isInsideJSXElement)
+	} else {
+		// However, if we're emulating the TypeScript compiler's function called
+		// "parseTypeArgumentsInExpression" function, then we must only allow the
+		// ">" token itself. For example, "x < y >= z" is not a type argument list.
+		//
+		// This doesn't detect ">>" in "Array<Array<number>>()" because the inner
+		// type argument list isn't a call to "parseTypeArgumentsInExpression"
+		// because it's within a type context, not an expression context. So the
+		// token that we see here is ">" in that case because the first ">" has
+		// already been stripped off of the ">>" by the inner call.
+		if opts.isInsideJSXElement {
+			p.lexer.ExpectInsideJSXElement(js_lexer.TGreaterThan)
+		} else {
+			p.lexer.Expect(js_lexer.TGreaterThan)
+		}
+	}
+	return true
+}
+
+func (p *parser) trySkipTypeArgumentsInExpressionWithBacktracking() bool {
+	oldLexer := p.lexer
+	p.lexer.IsLogDisabled = true
+
+	// Implement backtracking by restoring the lexer's memory to its original state
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			p.lexer = oldLexer
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	if p.skipTypeScriptTypeArguments(skipTypeScriptTypeArgumentsOpts{isParseTypeArgumentsInExpression: true}) {
+		// Check the token after the type argument list and backtrack if it's invalid
+		if !p.tsCanFollowTypeArgumentsInExpression() {
+			p.lexer.Unexpected()
+		}
+	}
+
+	// Restore the log disabled flag. Note that we can't just set it back to false
+	// because it may have been true to start with.
+	p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
+	return true
+}
+
+func (p *parser) trySkipTypeScriptTypeParametersThenOpenParenWithBacktracking() skipTypeScriptTypeParametersResult {
+	oldLexer := p.lexer
+	p.lexer.IsLogDisabled = true
+
+	// Implement backtracking by restoring the lexer's memory to its original state
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			p.lexer = oldLexer
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	result := p.skipTypeScriptTypeParameters(allowConstModifier)
+	if p.lexer.Token != js_lexer.TOpenParen {
+		p.lexer.Unexpected()
+	}
+
+	// Restore the log disabled flag. Note that we can't just set it back to false
+	// because it may have been true to start with.
+	p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
+	return result
+}
+
+func (p *parser) trySkipTypeScriptArrowReturnTypeWithBacktracking() bool {
+	oldLexer := p.lexer
+	p.lexer.IsLogDisabled = true
+
+	// Implement backtracking by restoring the lexer's memory to its original state
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			p.lexer = oldLexer
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	p.lexer.Expect(js_lexer.TColon)
+	p.skipTypeScriptReturnType()
+
+	// Check the token after this and backtrack if it's the wrong one
+	if p.lexer.Token != js_lexer.TEqualsGreaterThan {
+		p.lexer.Unexpected()
+	}
+
+	// Restore the log disabled flag. Note that we can't just set it back to false
+	// because it may have been true to start with.
+	p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
+	return true
+}
+
+func (p *parser) trySkipTypeScriptArrowArgsWithBacktracking() bool {
+	oldLexer := p.lexer
+	p.lexer.IsLogDisabled = true
+
+	// Implement backtracking by restoring the lexer's memory to its original state
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			p.lexer = oldLexer
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	p.skipTypeScriptFnArgs()
+	p.lexer.Expect(js_lexer.TEqualsGreaterThan)
+
+	// Restore the log disabled flag. Note that we can't just set it back to false
+	// because it may have been true to start with.
+	p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
+	return true
+}
+
+func (p *parser) trySkipTypeScriptConstraintOfInferTypeWithBacktracking(flags skipTypeFlags) bool {
+	oldLexer := p.lexer
+	p.lexer.IsLogDisabled = true
+
+	// Implement backtracking by restoring the lexer's memory to its original state
+	defer func() {
+		r := recover()
+		if _, isLexerPanic := r.(js_lexer.LexerPanic); isLexerPanic {
+			p.lexer = oldLexer
+		} else if r != nil {
+			panic(r)
+		}
+	}()
+
+	p.lexer.Expect(js_lexer.TExtends)
+	p.skipTypeScriptTypeWithFlags(js_ast.LPrefix, disallowConditionalTypesFlag)
+	if !flags.has(disallowConditionalTypesFlag) && p.lexer.Token == js_lexer.TQuestion {
+		p.lexer.Unexpected()
+	}
+
+	// Restore the log disabled flag. Note that we can't just set it back to false
+	// because it may have been true to start with.
+	p.lexer.IsLogDisabled = oldLexer.IsLogDisabled
+	return true
+}
+
+// Returns true if the current less-than token is considered to be an arrow
+// function under TypeScript's rules for files containing JSX syntax
+func (p *parser) isTSArrowFnJSX() (isTSArrowFn bool) {
+	oldLexer := p.lexer
+	p.lexer.Next()
+
+	// Look ahead to see if this should be an arrow function instead
+	if p.lexer.Token == js_lexer.TConst {
+		p.lexer.Next()
+	}
+	if p.lexer.Token == js_lexer.TIdentifier {
+		p.lexer.Next()
+		if p.lexer.Token == js_lexer.TComma || p.lexer.Token == js_lexer.TEquals {
+			isTSArrowFn = true
+		} else if p.lexer.Token == js_lexer.TExtends {
+			p.lexer.Next()
+			isTSArrowFn = p.lexer.Token != js_lexer.TEquals && p.lexer.Token != js_lexer.TGreaterThan && p.lexer.Token != js_lexer.TSlash
+		}
+	}
+
+	// Restore the lexer
+	p.lexer = oldLexer
+	return
+}
+
+// This function is taken from the official TypeScript compiler source code:
+// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
+//
+// This function is pretty inefficient as written, and could be collapsed into
+// a single switch statement. But that would make it harder to keep this in
+// sync with the TypeScript compiler's source code, so we keep doing it the
+// slow way.
+func (p *parser) tsCanFollowTypeArgumentsInExpression() bool {
+	switch p.lexer.Token {
+	case
+		// These tokens can follow a type argument list in a call expression.
+		js_lexer.TOpenParen,                     // foo<x>(
+		js_lexer.TNoSubstitutionTemplateLiteral, // foo<T> `...`
+		js_lexer.TTemplateHead:                  // foo<T> `...${100}...`
+		return true
+
+	// A type argument list followed by `<` never makes sense, and a type argument list followed
+	// by `>` is ambiguous with a (re-scanned) `>>` operator, so we disqualify both. Also, in
+	// this context, `+` and `-` are unary operators, not binary operators.
+	case js_lexer.TLessThan,
+		js_lexer.TGreaterThan,
+		js_lexer.TPlus,
+		js_lexer.TMinus,
+		// TypeScript always sees "TGreaterThan" instead of these tokens since
+		// their scanner works a little differently than our lexer. So since
+		// "TGreaterThan" is forbidden above, we also forbid these too.
+		js_lexer.TGreaterThanEquals,
+		js_lexer.TGreaterThanGreaterThan,
+		js_lexer.TGreaterThanGreaterThanEquals,
+		js_lexer.TGreaterThanGreaterThanGreaterThan,
+		js_lexer.TGreaterThanGreaterThanGreaterThanEquals:
+		return false
+	}
+
+	// We favor the type argument list interpretation when it is immediately followed by
+	// a line break, a binary operator, or something that can't start an expression.
+	return p.lexer.HasNewlineBefore || p.tsIsBinaryOperator() || !p.tsIsStartOfExpression()
+}
+
+// This function is taken from the official TypeScript compiler source code:
+// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
+func (p *parser) tsIsBinaryOperator() bool {
+	switch p.lexer.Token {
+	case js_lexer.TIn:
+		return p.allowIn
+
+	case
+		js_lexer.TQuestionQuestion,
+		js_lexer.TBarBar,
+		js_lexer.TAmpersandAmpersand,
+		js_lexer.TBar,
+		js_lexer.TCaret,
+		js_lexer.TAmpersand,
+		js_lexer.TEqualsEquals,
+		js_lexer.TExclamationEquals,
+		js_lexer.TEqualsEqualsEquals,
+		js_lexer.TExclamationEqualsEquals,
+		js_lexer.TLessThan,
+		js_lexer.TGreaterThan,
+		js_lexer.TLessThanEquals,
+		js_lexer.TGreaterThanEquals,
+		js_lexer.TInstanceof,
+		js_lexer.TLessThanLessThan,
+		js_lexer.TGreaterThanGreaterThan,
+		js_lexer.TGreaterThanGreaterThanGreaterThan,
+		js_lexer.TPlus,
+		js_lexer.TMinus,
+		js_lexer.TAsterisk,
+		js_lexer.TSlash,
+		js_lexer.TPercent,
+		js_lexer.TAsteriskAsterisk:
+		return true
+
+	case js_lexer.TIdentifier:
+		if p.lexer.IsContextualKeyword("as") || p.lexer.IsContextualKeyword("satisfies") {
+			return true
+		}
+	}
+
+	return false
+}
+
+// This function is taken from the official TypeScript compiler source code:
+// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
+func (p *parser) tsIsStartOfExpression() bool {
+	if p.tsIsStartOfLeftHandSideExpression() {
+		return true
+	}
+
+	switch p.lexer.Token {
+	case
+		js_lexer.TPlus,
+		js_lexer.TMinus,
+		js_lexer.TTilde,
+		js_lexer.TExclamation,
+		js_lexer.TDelete,
+		js_lexer.TTypeof,
+		js_lexer.TVoid,
+		js_lexer.TPlusPlus,
+		js_lexer.TMinusMinus,
+		js_lexer.TLessThan,
+		js_lexer.TPrivateIdentifier,
+		js_lexer.TAt:
+		return true
+
+	default:
+		if p.lexer.Token == js_lexer.TIdentifier && (p.lexer.Identifier.String == "await" || p.lexer.Identifier.String == "yield") {
+			// Yield/await always starts an expression.  Either it is an identifier (in which case
+			// it is definitely an expression).  Or it's a keyword (either because we're in
+			// a generator or async function, or in strict mode (or both)) and it started a yield or await expression.
+			return true
+		}
+
+		// Error tolerance.  If we see the start of some binary operator, we consider
+		// that the start of an expression.  That way we'll parse out a missing identifier,
+		// give a good message about an identifier being missing, and then consume the
+		// rest of the binary expression.
+		if p.tsIsBinaryOperator() {
+			return true
+		}
+
+		return p.tsIsIdentifier()
+	}
+}
+
+// This function is taken from the official TypeScript compiler source code:
+// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
+func (p *parser) tsIsStartOfLeftHandSideExpression() bool {
+	switch p.lexer.Token {
+	case
+		js_lexer.TThis,
+		js_lexer.TSuper,
+		js_lexer.TNull,
+		js_lexer.TTrue,
+		js_lexer.TFalse,
+		js_lexer.TNumericLiteral,
+		js_lexer.TBigIntegerLiteral,
+		js_lexer.TStringLiteral,
+		js_lexer.TNoSubstitutionTemplateLiteral,
+		js_lexer.TTemplateHead,
+		js_lexer.TOpenParen,
+		js_lexer.TOpenBracket,
+		js_lexer.TOpenBrace,
+		js_lexer.TFunction,
+		js_lexer.TClass,
+		js_lexer.TNew,
+		js_lexer.TSlash,
+		js_lexer.TSlashEquals,
+		js_lexer.TIdentifier:
+		return true
+
+	case js_lexer.TImport:
+		return p.tsLookAheadNextTokenIsOpenParenOrLessThanOrDot()
+
+	default:
+		return p.tsIsIdentifier()
+	}
+}
+
+// This function is taken from the official TypeScript compiler source code:
+// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
+func (p *parser) tsLookAheadNextTokenIsOpenParenOrLessThanOrDot() (result bool) {
+	oldLexer := p.lexer
+	p.lexer.Next()
+
+	result = p.lexer.Token == js_lexer.TOpenParen ||
+		p.lexer.Token == js_lexer.TLessThan ||
+		p.lexer.Token == js_lexer.TDot
+
+	// Restore the lexer
+	p.lexer = oldLexer
+	return
+}
+
+// This function is taken from the official TypeScript compiler source code:
+// https://github.com/microsoft/TypeScript/blob/master/src/compiler/parser.ts
+func (p *parser) tsIsIdentifier() bool {
+	if p.lexer.Token == js_lexer.TIdentifier {
+		// If we have a 'yield' keyword, and we're in the [yield] context, then 'yield' is
+		// considered a keyword and is not an identifier.
+		if p.fnOrArrowDataParse.yield != allowIdent && p.lexer.Identifier.String == "yield" {
+			return false
+		}
+
+		// If we have a 'await' keyword, and we're in the [Await] context, then 'await' is
+		// considered a keyword and is not an identifier.
+		if p.fnOrArrowDataParse.await != allowIdent && p.lexer.Identifier.String == "await" {
+			return false
+		}
+
+		return true
+	}
+
+	return false
+}
+
+func (p *parser) skipTypeScriptInterfaceStmt(opts parseStmtOpts) {
+	name := p.lexer.Identifier.String
+	p.lexer.Expect(js_lexer.TIdentifier)
+
+	if opts.isModuleScope {
+		p.localTypeNames[name] = true
+	}
+
+	p.skipTypeScriptTypeParameters(allowInOutVarianceAnnotations | allowEmptyTypeParameters)
+
+	if p.lexer.Token == js_lexer.TExtends {
+		p.lexer.Next()
+		for {
+			p.skipTypeScriptType(js_ast.LLowest)
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			p.lexer.Next()
+		}
+	}
+
+	if p.lexer.IsContextualKeyword("implements") {
+		p.lexer.Next()
+		for {
+			p.skipTypeScriptType(js_ast.LLowest)
+			if p.lexer.Token != js_lexer.TComma {
+				break
+			}
+			p.lexer.Next()
+		}
+	}
+
+	p.skipTypeScriptObjectType()
+}
+
+func (p *parser) skipTypeScriptTypeStmt(opts parseStmtOpts) {
+	if opts.isExport {
+		switch p.lexer.Token {
+		case js_lexer.TOpenBrace:
+			// "export type {foo}"
+			// "export type {foo} from 'bar'"
+			p.parseExportClause()
+			if p.lexer.IsContextualKeyword("from") {
+				p.lexer.Next()
+				p.parsePath()
+			}
+			p.lexer.ExpectOrInsertSemicolon()
+			return
+
+		// This is invalid TypeScript, and is rejected by the TypeScript compiler:
+		//
+		//   example.ts:1:1 - error TS1383: Only named exports may use 'export type'.
+		//
+		//   1 export type * from './types'
+		//     ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+		//
+		// However, people may not know this and then blame esbuild for it not
+		// working. So we parse it anyway and then discard it (since we always
+		// discard all types). People who do this should be running the TypeScript
+		// type checker when using TypeScript, which will then report this error.
+		case js_lexer.TAsterisk:
+			// "export type * from 'path'"
+			p.lexer.Next()
+			if p.lexer.IsContextualKeyword("as") {
+				// "export type * as ns from 'path'"
+				p.lexer.Next()
+				p.parseClauseAlias("export")
+				p.lexer.Next()
+			}
+			p.lexer.ExpectContextualKeyword("from")
+			p.parsePath()
+			p.lexer.ExpectOrInsertSemicolon()
+			return
+		}
+	}
+
+	name := p.lexer.Identifier.String
+	p.lexer.Expect(js_lexer.TIdentifier)
+
+	if opts.isModuleScope {
+		p.localTypeNames[name] = true
+	}
+
+	p.skipTypeScriptTypeParameters(allowInOutVarianceAnnotations | allowEmptyTypeParameters)
+	p.lexer.Expect(js_lexer.TEquals)
+	p.skipTypeScriptType(js_ast.LLowest)
+	p.lexer.ExpectOrInsertSemicolon()
+}
+
+func (p *parser) parseTypeScriptEnumStmt(loc logger.Loc, opts parseStmtOpts) js_ast.Stmt {
+	p.lexer.Expect(js_lexer.TEnum)
+	nameLoc := p.lexer.Loc()
+	nameText := p.lexer.Identifier.String
+	p.lexer.Expect(js_lexer.TIdentifier)
+	name := ast.LocRef{Loc: nameLoc, Ref: ast.InvalidRef}
+
+	// Generate the namespace object
+	exportedMembers := p.getOrCreateExportedNamespaceMembers(nameText, opts.isExport)
+	tsNamespace := &js_ast.TSNamespaceScope{
+		ExportedMembers: exportedMembers,
+		ArgRef:          ast.InvalidRef,
+		IsEnumScope:     true,
+	}
+	enumMemberData := &js_ast.TSNamespaceMemberNamespace{
+		ExportedMembers: exportedMembers,
+	}
+
+	// Declare the enum and create the scope
+	scopeIndex := len(p.scopesInOrder)
+	if !opts.isTypeScriptDeclare {
+		name.Ref = p.declareSymbol(ast.SymbolTSEnum, nameLoc, nameText)
+		p.pushScopeForParsePass(js_ast.ScopeEntry, loc)
+		p.currentScope.TSNamespace = tsNamespace
+		p.refToTSNamespaceMemberData[name.Ref] = enumMemberData
+	}
+
+	p.lexer.Expect(js_lexer.TOpenBrace)
+	values := []js_ast.EnumValue{}
+
+	oldFnOrArrowData := p.fnOrArrowDataParse
+	p.fnOrArrowDataParse = fnOrArrowDataParse{
+		isThisDisallowed: true,
+		needsAsyncLoc:    logger.Loc{Start: -1},
+	}
+
+	// Parse the body
+	for p.lexer.Token != js_lexer.TCloseBrace {
+		nameRange := p.lexer.Range()
+		value := js_ast.EnumValue{
+			Loc: nameRange.Loc,
+			Ref: ast.InvalidRef,
+		}
+
+		// Parse the name
+		var nameText string
+		if p.lexer.Token == js_lexer.TStringLiteral {
+			value.Name = p.lexer.StringLiteral()
+			nameText = helpers.UTF16ToString(value.Name)
+		} else if p.lexer.IsIdentifierOrKeyword() {
+			nameText = p.lexer.Identifier.String
+			value.Name = helpers.StringToUTF16(nameText)
+		} else {
+			p.lexer.Expect(js_lexer.TIdentifier)
+		}
+		p.lexer.Next()
+
+		// Identifiers can be referenced by other values
+		if !opts.isTypeScriptDeclare && js_ast.IsIdentifierUTF16(value.Name) {
+			value.Ref = p.declareSymbol(ast.SymbolOther, value.Loc, helpers.UTF16ToString(value.Name))
+		}
+
+		// Parse the initializer
+		if p.lexer.Token == js_lexer.TEquals {
+			p.lexer.Next()
+			value.ValueOrNil = p.parseExpr(js_ast.LComma)
+		}
+
+		values = append(values, value)
+
+		// Add this enum value as a member of the enum's namespace
+		exportedMembers[nameText] = js_ast.TSNamespaceMember{
+			Loc:         value.Loc,
+			Data:        &js_ast.TSNamespaceMemberProperty{},
+			IsEnumValue: true,
+		}
+
+		if p.lexer.Token != js_lexer.TComma && p.lexer.Token != js_lexer.TSemicolon {
+			if p.lexer.IsIdentifierOrKeyword() || p.lexer.Token == js_lexer.TStringLiteral {
+				var errorLoc logger.Loc
+				var errorText string
+
+				if value.ValueOrNil.Data == nil {
+					errorLoc = logger.Loc{Start: nameRange.End()}
+					errorText = fmt.Sprintf("Expected \",\" after %q in enum", nameText)
+				} else {
+					var nextName string
+					if p.lexer.Token == js_lexer.TStringLiteral {
+						nextName = helpers.UTF16ToString(p.lexer.StringLiteral())
+					} else {
+						nextName = p.lexer.Identifier.String
+					}
+					errorLoc = p.lexer.Loc()
+					errorText = fmt.Sprintf("Expected \",\" before %q in enum", nextName)
+				}
+
+				data := p.tracker.MsgData(logger.Range{Loc: errorLoc}, errorText)
+				data.Location.Suggestion = ","
+				p.log.AddMsg(logger.Msg{Kind: logger.Error, Data: data})
+				panic(js_lexer.LexerPanic{})
+			}
+			break
+		}
+		p.lexer.Next()
+	}
+
+	p.fnOrArrowDataParse = oldFnOrArrowData
+
+	if !opts.isTypeScriptDeclare {
+		// Avoid a collision with the enum closure argument variable if the
+		// enum exports a symbol with the same name as the enum itself:
+		//
+		//   enum foo {
+		//     foo = 123,
+		//     bar = foo,
+		//   }
+		//
+		// TypeScript generates the following code in this case:
+		//
+		//   var foo;
+		//   (function (foo) {
+		//     foo[foo["foo"] = 123] = "foo";
+		//     foo[foo["bar"] = 123] = "bar";
+		//   })(foo || (foo = {}));
+		//
+		// Whereas in this case:
+		//
+		//   enum foo {
+		//     bar = foo as any,
+		//   }
+		//
+		// TypeScript generates the following code:
+		//
+		//   var foo;
+		//   (function (foo) {
+		//     foo[foo["bar"] = foo] = "bar";
+		//   })(foo || (foo = {}));
+		//
+		if _, ok := p.currentScope.Members[nameText]; ok {
+			// Add a "_" to make tests easier to read, since non-bundler tests don't
+			// run the renamer. For external-facing things the renamer will avoid
+			// collisions automatically so this isn't important for correctness.
+			tsNamespace.ArgRef = p.newSymbol(ast.SymbolHoisted, "_"+nameText)
+			p.currentScope.Generated = append(p.currentScope.Generated, tsNamespace.ArgRef)
+		} else {
+			tsNamespace.ArgRef = p.declareSymbol(ast.SymbolHoisted, nameLoc, nameText)
+		}
+		p.refToTSNamespaceMemberData[tsNamespace.ArgRef] = enumMemberData
+
+		p.popScope()
+	}
+
+	p.lexer.Expect(js_lexer.TCloseBrace)
+
+	if opts.isTypeScriptDeclare {
+		if opts.isNamespaceScope && opts.isExport {
+			p.hasNonLocalExportDeclareInsideNamespace = true
+		}
+
+		return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+	}
+
+	// Save these for when we do out-of-order enum visiting
+	if p.scopesInOrderForEnum == nil {
+		p.scopesInOrderForEnum = make(map[logger.Loc][]scopeOrder)
+	}
+
+	// Make a copy of "scopesInOrder" instead of a slice since the original
+	// array may be flattened in the future by "popAndFlattenScope"
+	p.scopesInOrderForEnum[loc] = append([]scopeOrder{}, p.scopesInOrder[scopeIndex:]...)
+
+	return js_ast.Stmt{Loc: loc, Data: &js_ast.SEnum{
+		Name:     name,
+		Arg:      tsNamespace.ArgRef,
+		Values:   values,
+		IsExport: opts.isExport,
+	}}
+}
+
+// This assumes the caller has already parsed the "import" token
+func (p *parser) parseTypeScriptImportEqualsStmt(loc logger.Loc, opts parseStmtOpts, defaultNameLoc logger.Loc, defaultName string) js_ast.Stmt {
+	p.lexer.Expect(js_lexer.TEquals)
+
+	kind := p.selectLocalKind(js_ast.LocalConst)
+	name := p.lexer.Identifier
+	value := js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.EIdentifier{Ref: p.storeNameInRef(name)}}
+	p.lexer.Expect(js_lexer.TIdentifier)
+
+	if name.String == "require" && p.lexer.Token == js_lexer.TOpenParen {
+		// "import ns = require('x')"
+		p.lexer.Next()
+		path := js_ast.Expr{Loc: p.lexer.Loc(), Data: &js_ast.EString{Value: p.lexer.StringLiteral()}}
+		p.lexer.Expect(js_lexer.TStringLiteral)
+		p.lexer.Expect(js_lexer.TCloseParen)
+		value.Data = &js_ast.ECall{
+			Target: value,
+			Args:   []js_ast.Expr{path},
+		}
+	} else {
+		// "import Foo = Bar"
+		// "import Foo = Bar.Baz"
+		for p.lexer.Token == js_lexer.TDot {
+			p.lexer.Next()
+			value.Data = &js_ast.EDot{
+				Target:               value,
+				Name:                 p.lexer.Identifier.String,
+				NameLoc:              p.lexer.Loc(),
+				CanBeRemovedIfUnused: true,
+			}
+			p.lexer.Expect(js_lexer.TIdentifier)
+		}
+	}
+
+	p.lexer.ExpectOrInsertSemicolon()
+
+	if opts.isTypeScriptDeclare {
+		// "import type foo = require('bar');"
+		// "import type foo = bar.baz;"
+		return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+	}
+
+	ref := p.declareSymbol(ast.SymbolConst, defaultNameLoc, defaultName)
+	decls := []js_ast.Decl{{
+		Binding:    js_ast.Binding{Loc: defaultNameLoc, Data: &js_ast.BIdentifier{Ref: ref}},
+		ValueOrNil: value,
+	}}
+
+	return js_ast.Stmt{Loc: loc, Data: &js_ast.SLocal{
+		Kind:              kind,
+		Decls:             decls,
+		IsExport:          opts.isExport,
+		WasTSImportEquals: true,
+	}}
+}
+
+// Generate a TypeScript namespace object for this namespace's scope. If this
+// namespace is another block that is to be merged with an existing namespace,
+// use that earlier namespace's object instead.
+func (p *parser) getOrCreateExportedNamespaceMembers(name string, isExport bool) js_ast.TSNamespaceMembers {
+	// Merge with a sibling namespace from the same scope
+	if existingMember, ok := p.currentScope.Members[name]; ok {
+		if memberData, ok := p.refToTSNamespaceMemberData[existingMember.Ref]; ok {
+			if nsMemberData, ok := memberData.(*js_ast.TSNamespaceMemberNamespace); ok {
+				return nsMemberData.ExportedMembers
+			}
+		}
+	}
+
+	// Merge with a sibling namespace from a different scope
+	if isExport {
+		if parentNamespace := p.currentScope.TSNamespace; parentNamespace != nil {
+			if existing, ok := parentNamespace.ExportedMembers[name]; ok {
+				if existing, ok := existing.Data.(*js_ast.TSNamespaceMemberNamespace); ok {
+					return existing.ExportedMembers
+				}
+			}
+		}
+	}
+
+	// Otherwise, generate a new namespace object
+	return make(js_ast.TSNamespaceMembers)
+}
+
+func (p *parser) parseTypeScriptNamespaceStmt(loc logger.Loc, opts parseStmtOpts) js_ast.Stmt {
+	// "namespace Foo {}"
+	nameLoc := p.lexer.Loc()
+	nameText := p.lexer.Identifier.String
+	p.lexer.Next()
+
+	// Generate the namespace object
+	exportedMembers := p.getOrCreateExportedNamespaceMembers(nameText, opts.isExport)
+	tsNamespace := &js_ast.TSNamespaceScope{
+		ExportedMembers: exportedMembers,
+		ArgRef:          ast.InvalidRef,
+	}
+	nsMemberData := &js_ast.TSNamespaceMemberNamespace{
+		ExportedMembers: exportedMembers,
+	}
+
+	// Declare the namespace and create the scope
+	name := ast.LocRef{Loc: nameLoc, Ref: ast.InvalidRef}
+	scopeIndex := p.pushScopeForParsePass(js_ast.ScopeEntry, loc)
+	p.currentScope.TSNamespace = tsNamespace
+
+	oldHasNonLocalExportDeclareInsideNamespace := p.hasNonLocalExportDeclareInsideNamespace
+	oldFnOrArrowData := p.fnOrArrowDataParse
+	p.hasNonLocalExportDeclareInsideNamespace = false
+	p.fnOrArrowDataParse = fnOrArrowDataParse{
+		isThisDisallowed:   true,
+		isReturnDisallowed: true,
+		needsAsyncLoc:      logger.Loc{Start: -1},
+	}
+
+	// Parse the statements inside the namespace
+	var stmts []js_ast.Stmt
+	if p.lexer.Token == js_lexer.TDot {
+		dotLoc := p.lexer.Loc()
+		p.lexer.Next()
+		stmts = []js_ast.Stmt{p.parseTypeScriptNamespaceStmt(dotLoc, parseStmtOpts{
+			isExport:            true,
+			isNamespaceScope:    true,
+			isTypeScriptDeclare: opts.isTypeScriptDeclare,
+		})}
+	} else if opts.isTypeScriptDeclare && p.lexer.Token != js_lexer.TOpenBrace {
+		p.lexer.ExpectOrInsertSemicolon()
+	} else {
+		p.lexer.Expect(js_lexer.TOpenBrace)
+		stmts = p.parseStmtsUpTo(js_lexer.TCloseBrace, parseStmtOpts{
+			isNamespaceScope:    true,
+			isTypeScriptDeclare: opts.isTypeScriptDeclare,
+		})
+		p.lexer.Next()
+	}
+
+	hasNonLocalExportDeclareInsideNamespace := p.hasNonLocalExportDeclareInsideNamespace
+	p.hasNonLocalExportDeclareInsideNamespace = oldHasNonLocalExportDeclareInsideNamespace
+	p.fnOrArrowDataParse = oldFnOrArrowData
+
+	// Add any exported members from this namespace's body as members of the
+	// associated namespace object.
+	for _, stmt := range stmts {
+		switch s := stmt.Data.(type) {
+		case *js_ast.SFunction:
+			if s.IsExport {
+				name := p.symbols[s.Fn.Name.Ref.InnerIndex].OriginalName
+				member := js_ast.TSNamespaceMember{
+					Loc:  s.Fn.Name.Loc,
+					Data: &js_ast.TSNamespaceMemberProperty{},
+				}
+				exportedMembers[name] = member
+				p.refToTSNamespaceMemberData[s.Fn.Name.Ref] = member.Data
+			}
+
+		case *js_ast.SClass:
+			if s.IsExport {
+				name := p.symbols[s.Class.Name.Ref.InnerIndex].OriginalName
+				member := js_ast.TSNamespaceMember{
+					Loc:  s.Class.Name.Loc,
+					Data: &js_ast.TSNamespaceMemberProperty{},
+				}
+				exportedMembers[name] = member
+				p.refToTSNamespaceMemberData[s.Class.Name.Ref] = member.Data
+			}
+
+		case *js_ast.SNamespace:
+			if s.IsExport {
+				if memberData, ok := p.refToTSNamespaceMemberData[s.Name.Ref]; ok {
+					if nsMemberData, ok := memberData.(*js_ast.TSNamespaceMemberNamespace); ok {
+						member := js_ast.TSNamespaceMember{
+							Loc: s.Name.Loc,
+							Data: &js_ast.TSNamespaceMemberNamespace{
+								ExportedMembers: nsMemberData.ExportedMembers,
+							},
+						}
+						exportedMembers[p.symbols[s.Name.Ref.InnerIndex].OriginalName] = member
+						p.refToTSNamespaceMemberData[s.Name.Ref] = member.Data
+					}
+				}
+			}
+
+		case *js_ast.SEnum:
+			if s.IsExport {
+				if memberData, ok := p.refToTSNamespaceMemberData[s.Name.Ref]; ok {
+					if nsMemberData, ok := memberData.(*js_ast.TSNamespaceMemberNamespace); ok {
+						member := js_ast.TSNamespaceMember{
+							Loc: s.Name.Loc,
+							Data: &js_ast.TSNamespaceMemberNamespace{
+								ExportedMembers: nsMemberData.ExportedMembers,
+							},
+						}
+						exportedMembers[p.symbols[s.Name.Ref.InnerIndex].OriginalName] = member
+						p.refToTSNamespaceMemberData[s.Name.Ref] = member.Data
+					}
+				}
+			}
+
+		case *js_ast.SLocal:
+			if s.IsExport {
+				js_ast.ForEachIdentifierBindingInDecls(s.Decls, func(loc logger.Loc, b *js_ast.BIdentifier) {
+					name := p.symbols[b.Ref.InnerIndex].OriginalName
+					member := js_ast.TSNamespaceMember{
+						Loc:  loc,
+						Data: &js_ast.TSNamespaceMemberProperty{},
+					}
+					exportedMembers[name] = member
+					p.refToTSNamespaceMemberData[b.Ref] = member.Data
+				})
+			}
+		}
+	}
+
+	// Import assignments may be only used in type expressions, not value
+	// expressions. If this is the case, the TypeScript compiler removes
+	// them entirely from the output. That can cause the namespace itself
+	// to be considered empty and thus be removed.
+	importEqualsCount := 0
+	for _, stmt := range stmts {
+		if local, ok := stmt.Data.(*js_ast.SLocal); ok && local.WasTSImportEquals && !local.IsExport {
+			importEqualsCount++
+		}
+	}
+
+	// TypeScript omits namespaces without values. These namespaces
+	// are only allowed to be used in type expressions. They are
+	// allowed to be exported, but can also only be used in type
+	// expressions when imported. So we shouldn't count them as a
+	// real export either.
+	//
+	// TypeScript also strangely counts namespaces containing only
+	// "export declare" statements as non-empty even though "declare"
+	// statements are only type annotations. We cannot omit the namespace
+	// in that case. See https://github.com/evanw/esbuild/issues/1158.
+	if (len(stmts) == importEqualsCount && !hasNonLocalExportDeclareInsideNamespace) || opts.isTypeScriptDeclare {
+		p.popAndDiscardScope(scopeIndex)
+		if opts.isModuleScope {
+			p.localTypeNames[nameText] = true
+		}
+		return js_ast.Stmt{Loc: loc, Data: js_ast.STypeScriptShared}
+	}
+
+	if !opts.isTypeScriptDeclare {
+		// Avoid a collision with the namespace closure argument variable if the
+		// namespace exports a symbol with the same name as the namespace itself:
+		//
+		//   namespace foo {
+		//     export let foo = 123
+		//     console.log(foo)
+		//   }
+		//
+		// TypeScript generates the following code in this case:
+		//
+		//   var foo;
+		//   (function (foo_1) {
+		//     foo_1.foo = 123;
+		//     console.log(foo_1.foo);
+		//   })(foo || (foo = {}));
+		//
+		if _, ok := p.currentScope.Members[nameText]; ok {
+			// Add a "_" to make tests easier to read, since non-bundler tests don't
+			// run the renamer. For external-facing things the renamer will avoid
+			// collisions automatically so this isn't important for correctness.
+			tsNamespace.ArgRef = p.newSymbol(ast.SymbolHoisted, "_"+nameText)
+			p.currentScope.Generated = append(p.currentScope.Generated, tsNamespace.ArgRef)
+		} else {
+			tsNamespace.ArgRef = p.declareSymbol(ast.SymbolHoisted, nameLoc, nameText)
+		}
+		p.refToTSNamespaceMemberData[tsNamespace.ArgRef] = nsMemberData
+	}
+
+	p.popScope()
+	if !opts.isTypeScriptDeclare {
+		name.Ref = p.declareSymbol(ast.SymbolTSNamespace, nameLoc, nameText)
+		p.refToTSNamespaceMemberData[name.Ref] = nsMemberData
+	}
+	return js_ast.Stmt{Loc: loc, Data: &js_ast.SNamespace{
+		Name:     name,
+		Arg:      tsNamespace.ArgRef,
+		Stmts:    stmts,
+		IsExport: opts.isExport,
+	}}
+}
+
+func (p *parser) generateClosureForTypeScriptNamespaceOrEnum(
+	stmts []js_ast.Stmt, stmtLoc logger.Loc, isExport bool, nameLoc logger.Loc,
+	nameRef ast.Ref, argRef ast.Ref, stmtsInsideClosure []js_ast.Stmt,
+) []js_ast.Stmt {
+	// Follow the link chain in case symbols were merged
+	symbol := p.symbols[nameRef.InnerIndex]
+	for symbol.Link != ast.InvalidRef {
+		nameRef = symbol.Link
+		symbol = p.symbols[nameRef.InnerIndex]
+	}
+
+	// Make sure to only emit a variable once for a given namespace, since there
+	// can be multiple namespace blocks for the same namespace
+	if (symbol.Kind == ast.SymbolTSNamespace || symbol.Kind == ast.SymbolTSEnum) && !p.emittedNamespaceVars[nameRef] {
+		decls := []js_ast.Decl{{Binding: js_ast.Binding{Loc: nameLoc, Data: &js_ast.BIdentifier{Ref: nameRef}}}}
+		p.emittedNamespaceVars[nameRef] = true
+		if p.currentScope == p.moduleScope {
+			// Top-level namespace: "var"
+			stmts = append(stmts, js_ast.Stmt{Loc: stmtLoc, Data: &js_ast.SLocal{
+				Kind:     js_ast.LocalVar,
+				Decls:    decls,
+				IsExport: isExport,
+			}})
+		} else {
+			// Nested namespace: "let"
+			stmts = append(stmts, js_ast.Stmt{Loc: stmtLoc, Data: &js_ast.SLocal{
+				Kind:  js_ast.LocalLet,
+				Decls: decls,
+			}})
+		}
+	}
+
+	var argExpr js_ast.Expr
+	if p.options.minifySyntax && !p.options.unsupportedJSFeatures.Has(compat.LogicalAssignment) {
+		// If the "||=" operator is supported, our minified output can be slightly smaller
+		if isExport && p.enclosingNamespaceArgRef != nil {
+			// "name = (enclosing.name ||= {})"
+			argExpr = js_ast.Assign(
+				js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: nameRef}},
+				js_ast.Expr{Loc: nameLoc, Data: &js_ast.EBinary{
+					Op: js_ast.BinOpLogicalOrAssign,
+					Left: js_ast.Expr{Loc: nameLoc, Data: p.dotOrMangledPropVisit(
+						js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: *p.enclosingNamespaceArgRef}},
+						p.symbols[nameRef.InnerIndex].OriginalName,
+						nameLoc,
+					)},
+					Right: js_ast.Expr{Loc: nameLoc, Data: &js_ast.EObject{}},
+				}},
+			)
+			p.recordUsage(*p.enclosingNamespaceArgRef)
+			p.recordUsage(nameRef)
+		} else {
+			// "name ||= {}"
+			argExpr = js_ast.Expr{Loc: nameLoc, Data: &js_ast.EBinary{
+				Op:    js_ast.BinOpLogicalOrAssign,
+				Left:  js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: nameRef}},
+				Right: js_ast.Expr{Loc: nameLoc, Data: &js_ast.EObject{}},
+			}}
+			p.recordUsage(nameRef)
+		}
+	} else {
+		if isExport && p.enclosingNamespaceArgRef != nil {
+			// "name = enclosing.name || (enclosing.name = {})"
+			name := p.symbols[nameRef.InnerIndex].OriginalName
+			argExpr = js_ast.Assign(
+				js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: nameRef}},
+				js_ast.Expr{Loc: nameLoc, Data: &js_ast.EBinary{
+					Op: js_ast.BinOpLogicalOr,
+					Left: js_ast.Expr{Loc: nameLoc, Data: p.dotOrMangledPropVisit(
+						js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: *p.enclosingNamespaceArgRef}},
+						name,
+						nameLoc,
+					)},
+					Right: js_ast.Assign(
+						js_ast.Expr{Loc: nameLoc, Data: p.dotOrMangledPropVisit(
+							js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: *p.enclosingNamespaceArgRef}},
+							name,
+							nameLoc,
+						)},
+						js_ast.Expr{Loc: nameLoc, Data: &js_ast.EObject{}},
+					),
+				}},
+			)
+			p.recordUsage(*p.enclosingNamespaceArgRef)
+			p.recordUsage(*p.enclosingNamespaceArgRef)
+			p.recordUsage(nameRef)
+		} else {
+			// "name || (name = {})"
+			argExpr = js_ast.Expr{Loc: nameLoc, Data: &js_ast.EBinary{
+				Op:   js_ast.BinOpLogicalOr,
+				Left: js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: nameRef}},
+				Right: js_ast.Assign(
+					js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: nameRef}},
+					js_ast.Expr{Loc: nameLoc, Data: &js_ast.EObject{}},
+				),
+			}}
+			p.recordUsage(nameRef)
+			p.recordUsage(nameRef)
+		}
+	}
+
+	// Try to use an arrow function if possible for compactness
+	var targetExpr js_ast.Expr
+	args := []js_ast.Arg{{Binding: js_ast.Binding{Loc: nameLoc, Data: &js_ast.BIdentifier{Ref: argRef}}}}
+	if p.options.unsupportedJSFeatures.Has(compat.Arrow) {
+		targetExpr = js_ast.Expr{Loc: stmtLoc, Data: &js_ast.EFunction{Fn: js_ast.Fn{
+			Args: args,
+			Body: js_ast.FnBody{Loc: stmtLoc, Block: js_ast.SBlock{Stmts: stmtsInsideClosure}},
+		}}}
+	} else {
+		// "(() => { foo() })()" => "(() => foo())()"
+		if p.options.minifySyntax && len(stmtsInsideClosure) == 1 {
+			if expr, ok := stmtsInsideClosure[0].Data.(*js_ast.SExpr); ok {
+				stmtsInsideClosure[0].Data = &js_ast.SReturn{ValueOrNil: expr.Value}
+			}
+		}
+		targetExpr = js_ast.Expr{Loc: stmtLoc, Data: &js_ast.EArrow{
+			Args:       args,
+			Body:       js_ast.FnBody{Loc: stmtLoc, Block: js_ast.SBlock{Stmts: stmtsInsideClosure}},
+			PreferExpr: true,
+		}}
+	}
+
+	// Call the closure with the name object
+	stmts = append(stmts, js_ast.Stmt{Loc: stmtLoc, Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmtLoc, Data: &js_ast.ECall{
+		Target: targetExpr,
+		Args:   []js_ast.Expr{argExpr},
+	}}}})
+
+	return stmts
+}
+
+func (p *parser) generateClosureForTypeScriptEnum(
+	stmts []js_ast.Stmt, stmtLoc logger.Loc, isExport bool, nameLoc logger.Loc,
+	nameRef ast.Ref, argRef ast.Ref, exprsInsideClosure []js_ast.Expr,
+	allValuesArePure bool,
+) []js_ast.Stmt {
+	// Bail back to the namespace code for enums that aren't at the top level.
+	// Doing this for nested enums is problematic for two reasons. First of all
+	// enums inside of namespaces must be property accesses off the namespace
+	// object instead of variable declarations. Also we'd need to use "let"
+	// instead of "var" which doesn't allow sibling declarations to be merged.
+	if p.currentScope != p.moduleScope {
+		stmtsInsideClosure := []js_ast.Stmt{}
+		if len(exprsInsideClosure) > 0 {
+			if p.options.minifySyntax {
+				// "a; b; c;" => "a, b, c;"
+				joined := js_ast.JoinAllWithComma(exprsInsideClosure)
+				stmtsInsideClosure = append(stmtsInsideClosure, js_ast.Stmt{Loc: joined.Loc, Data: &js_ast.SExpr{Value: joined}})
+			} else {
+				for _, expr := range exprsInsideClosure {
+					stmtsInsideClosure = append(stmtsInsideClosure, js_ast.Stmt{Loc: expr.Loc, Data: &js_ast.SExpr{Value: expr}})
+				}
+			}
+		}
+		return p.generateClosureForTypeScriptNamespaceOrEnum(
+			stmts, stmtLoc, isExport, nameLoc, nameRef, argRef, stmtsInsideClosure)
+	}
+
+	// This uses an output format for enums that's different but equivalent to
+	// what TypeScript uses. Here is TypeScript's output:
+	//
+	//   var x;
+	//   (function (x) {
+	//     x[x["y"] = 1] = "y";
+	//   })(x || (x = {}));
+	//
+	// And here's our output:
+	//
+	//   var x = /* @__PURE__ */ ((x) => {
+	//     x[x["y"] = 1] = "y";
+	//     return x;
+	//   })(x || {});
+	//
+	// One benefit is that the minified output is smaller:
+	//
+	//   // Old output minified
+	//   var x;(function(n){n[n.y=1]="y"})(x||(x={}));
+	//
+	//   // New output minified
+	//   var x=(r=>(r[r.y=1]="y",r))(x||{});
+	//
+	// Another benefit is that the @__PURE__ annotation means it automatically
+	// works with tree-shaking, even with more advanced features such as sibling
+	// enum declarations and enum/namespace merges. Ideally all uses of the enum
+	// are just direct references to enum members (and are therefore inlined as
+	// long as the enum value is a constant) and the enum definition itself is
+	// unused and can be removed as dead code.
+
+	// Follow the link chain in case symbols were merged
+	symbol := p.symbols[nameRef.InnerIndex]
+	for symbol.Link != ast.InvalidRef {
+		nameRef = symbol.Link
+		symbol = p.symbols[nameRef.InnerIndex]
+	}
+
+	// Generate the body of the closure, including a return statement at the end
+	stmtsInsideClosure := []js_ast.Stmt{}
+	argExpr := js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: argRef}}
+	if p.options.minifySyntax {
+		// "a; b; return c;" => "return a, b, c;"
+		joined := js_ast.JoinAllWithComma(exprsInsideClosure)
+		joined = js_ast.JoinWithComma(joined, argExpr)
+		stmtsInsideClosure = append(stmtsInsideClosure, js_ast.Stmt{Loc: joined.Loc, Data: &js_ast.SReturn{ValueOrNil: joined}})
+	} else {
+		for _, expr := range exprsInsideClosure {
+			stmtsInsideClosure = append(stmtsInsideClosure, js_ast.Stmt{Loc: expr.Loc, Data: &js_ast.SExpr{Value: expr}})
+		}
+		stmtsInsideClosure = append(stmtsInsideClosure, js_ast.Stmt{Loc: argExpr.Loc, Data: &js_ast.SReturn{ValueOrNil: argExpr}})
+	}
+
+	// Try to use an arrow function if possible for compactness
+	var targetExpr js_ast.Expr
+	args := []js_ast.Arg{{Binding: js_ast.Binding{Loc: nameLoc, Data: &js_ast.BIdentifier{Ref: argRef}}}}
+	if p.options.unsupportedJSFeatures.Has(compat.Arrow) {
+		targetExpr = js_ast.Expr{Loc: stmtLoc, Data: &js_ast.EFunction{Fn: js_ast.Fn{
+			Args: args,
+			Body: js_ast.FnBody{Loc: stmtLoc, Block: js_ast.SBlock{Stmts: stmtsInsideClosure}},
+		}}}
+	} else {
+		targetExpr = js_ast.Expr{Loc: stmtLoc, Data: &js_ast.EArrow{
+			Args:       args,
+			Body:       js_ast.FnBody{Loc: stmtLoc, Block: js_ast.SBlock{Stmts: stmtsInsideClosure}},
+			PreferExpr: p.options.minifySyntax,
+		}}
+	}
+
+	// Call the closure with the name object and store it to the variable
+	decls := []js_ast.Decl{{
+		Binding: js_ast.Binding{Loc: nameLoc, Data: &js_ast.BIdentifier{Ref: nameRef}},
+		ValueOrNil: js_ast.Expr{Loc: stmtLoc, Data: &js_ast.ECall{
+			Target: targetExpr,
+			Args: []js_ast.Expr{{Loc: nameLoc, Data: &js_ast.EBinary{
+				Op:    js_ast.BinOpLogicalOr,
+				Left:  js_ast.Expr{Loc: nameLoc, Data: &js_ast.EIdentifier{Ref: nameRef}},
+				Right: js_ast.Expr{Loc: nameLoc, Data: &js_ast.EObject{}},
+			}}},
+			CanBeUnwrappedIfUnused: allValuesArePure,
+		}},
+	}}
+	p.recordUsage(nameRef)
+
+	// Use a "var" statement since this is a top-level enum, but only use "export" once
+	stmts = append(stmts, js_ast.Stmt{Loc: stmtLoc, Data: &js_ast.SLocal{
+		Kind:     js_ast.LocalVar,
+		Decls:    decls,
+		IsExport: isExport && !p.emittedNamespaceVars[nameRef],
+	}})
+	p.emittedNamespaceVars[nameRef] = true
+
+	return stmts
+}
+
+func (p *parser) wrapInlinedEnum(value js_ast.Expr, comment string) js_ast.Expr {
+	if strings.Contains(comment, "*/") {
+		// Don't wrap with a comment
+		return value
+	}
+
+	// Wrap with a comment
+	return js_ast.Expr{Loc: value.Loc, Data: &js_ast.EInlinedEnum{
+		Value:   value,
+		Comment: comment,
+	}}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/js_printer/js_printer.go b/source/vendor/github.com/evanw/esbuild/internal/js_printer/js_printer.go
new file mode 100644
index 0000000..3c5cab0
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/js_printer/js_printer.go
@@ -0,0 +1,4924 @@
+package js_printer
+
+import (
+	"bytes"
+	"fmt"
+	"math"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/renamer"
+	"github.com/evanw/esbuild/internal/sourcemap"
+)
+
+var positiveInfinity = math.Inf(1)
+var negativeInfinity = math.Inf(-1)
+
+const hexChars = "0123456789ABCDEF"
+const firstASCII = 0x20
+const lastASCII = 0x7E
+const firstHighSurrogate = 0xD800
+const lastHighSurrogate = 0xDBFF
+const firstLowSurrogate = 0xDC00
+const lastLowSurrogate = 0xDFFF
+
+func QuoteIdentifier(js []byte, name string, unsupportedFeatures compat.JSFeature) []byte {
+	isASCII := false
+	asciiStart := 0
+	for i, c := range name {
+		if c >= firstASCII && c <= lastASCII {
+			// Fast path: a run of ASCII characters
+			if !isASCII {
+				isASCII = true
+				asciiStart = i
+			}
+		} else {
+			// Slow path: escape non-ACSII characters
+			if isASCII {
+				js = append(js, name[asciiStart:i]...)
+				isASCII = false
+			}
+			if c <= 0xFFFF {
+				js = append(js, '\\', 'u', hexChars[c>>12], hexChars[(c>>8)&15], hexChars[(c>>4)&15], hexChars[c&15])
+			} else if !unsupportedFeatures.Has(compat.UnicodeEscapes) {
+				js = append(js, fmt.Sprintf("\\u{%X}", c)...)
+			} else {
+				panic("Internal error: Cannot encode identifier: Unicode escapes are unsupported")
+			}
+		}
+	}
+	if isASCII {
+		// Print one final run of ASCII characters
+		js = append(js, name[asciiStart:]...)
+	}
+	return js
+}
+
+func (p *printer) printUnquotedUTF16(text []uint16, quote rune, flags printQuotedFlags) {
+	temp := make([]byte, utf8.UTFMax)
+	js := p.js
+	i := 0
+	n := len(text)
+
+	// Only compute the line length if necessary
+	var startLineLength int
+	wrapLongLines := false
+	if p.options.LineLimit > 0 && (flags&printQuotedNoWrap) == 0 {
+		startLineLength = p.currentLineLength()
+		if startLineLength > p.options.LineLimit {
+			startLineLength = p.options.LineLimit
+		}
+		wrapLongLines = true
+	}
+
+	for i < n {
+		// Wrap long lines that are over the limit using escaped newlines
+		if wrapLongLines && startLineLength+i >= p.options.LineLimit {
+			js = append(js, "\\\n"...)
+			startLineLength -= p.options.LineLimit
+		}
+
+		c := text[i]
+		i++
+
+		switch c {
+		// Special-case the null character since it may mess with code written in C
+		// that treats null characters as the end of the string.
+		case '\x00':
+			// We don't want "\x001" to be written as "\01"
+			if i < n && text[i] >= '0' && text[i] <= '9' {
+				js = append(js, "\\x00"...)
+			} else {
+				js = append(js, "\\0"...)
+			}
+
+		// Special-case the bell character since it may cause dumping this file to
+		// the terminal to make a sound, which is undesirable. Note that we can't
+		// use an octal literal to print this shorter since octal literals are not
+		// allowed in strict mode (or in template strings).
+		case '\x07':
+			js = append(js, "\\x07"...)
+
+		case '\b':
+			js = append(js, "\\b"...)
+
+		case '\f':
+			js = append(js, "\\f"...)
+
+		case '\n':
+			if quote == '`' {
+				startLineLength = -i // Printing a real newline resets the line length
+				js = append(js, '\n')
+			} else {
+				js = append(js, "\\n"...)
+			}
+
+		case '\r':
+			js = append(js, "\\r"...)
+
+		case '\v':
+			js = append(js, "\\v"...)
+
+		case '\x1B':
+			js = append(js, "\\x1B"...)
+
+		case '\\':
+			js = append(js, "\\\\"...)
+
+		case '/':
+			// Avoid generating the sequence "</script" in JS code
+			if !p.options.UnsupportedFeatures.Has(compat.InlineScript) && i >= 2 && text[i-2] == '<' && i+6 <= len(text) {
+				script := "script"
+				matches := true
+				for j := 0; j < 6; j++ {
+					a := text[i+j]
+					b := uint16(script[j])
+					if a >= 'A' && a <= 'Z' {
+						a += 'a' - 'A'
+					}
+					if a != b {
+						matches = false
+						break
+					}
+				}
+				if matches {
+					js = append(js, '\\')
+				}
+			}
+			js = append(js, '/')
+
+		case '\'':
+			if quote == '\'' {
+				js = append(js, '\\')
+			}
+			js = append(js, '\'')
+
+		case '"':
+			if quote == '"' {
+				js = append(js, '\\')
+			}
+			js = append(js, '"')
+
+		case '`':
+			if quote == '`' {
+				js = append(js, '\\')
+			}
+			js = append(js, '`')
+
+		case '$':
+			if quote == '`' && i < n && text[i] == '{' {
+				js = append(js, '\\')
+			}
+			js = append(js, '$')
+
+		case '\u2028':
+			js = append(js, "\\u2028"...)
+
+		case '\u2029':
+			js = append(js, "\\u2029"...)
+
+		case '\uFEFF':
+			js = append(js, "\\uFEFF"...)
+
+		default:
+			switch {
+			// Common case: just append a single byte
+			case c <= lastASCII:
+				js = append(js, byte(c))
+
+			// Is this a high surrogate?
+			case c >= firstHighSurrogate && c <= lastHighSurrogate:
+				// Is there a next character?
+				if i < n {
+					c2 := text[i]
+
+					// Is it a low surrogate?
+					if c2 >= firstLowSurrogate && c2 <= lastLowSurrogate {
+						r := (rune(c) << 10) + rune(c2) + (0x10000 - (firstHighSurrogate << 10) - firstLowSurrogate)
+						i++
+
+						// Escape this character if UTF-8 isn't allowed
+						if p.options.ASCIIOnly {
+							if !p.options.UnsupportedFeatures.Has(compat.UnicodeEscapes) {
+								js = append(js, fmt.Sprintf("\\u{%X}", r)...)
+							} else {
+								js = append(js,
+									'\\', 'u', hexChars[c>>12], hexChars[(c>>8)&15], hexChars[(c>>4)&15], hexChars[c&15],
+									'\\', 'u', hexChars[c2>>12], hexChars[(c2>>8)&15], hexChars[(c2>>4)&15], hexChars[c2&15],
+								)
+							}
+							continue
+						}
+
+						// Otherwise, encode to UTF-8
+						width := utf8.EncodeRune(temp, r)
+						js = append(js, temp[:width]...)
+						continue
+					}
+				}
+
+				// Write an unpaired high surrogate
+				js = append(js, '\\', 'u', hexChars[c>>12], hexChars[(c>>8)&15], hexChars[(c>>4)&15], hexChars[c&15])
+
+			// Is this an unpaired low surrogate or four-digit hex escape?
+			case (c >= firstLowSurrogate && c <= lastLowSurrogate) || (p.options.ASCIIOnly && c > 0xFF):
+				js = append(js, '\\', 'u', hexChars[c>>12], hexChars[(c>>8)&15], hexChars[(c>>4)&15], hexChars[c&15])
+
+			// Can this be a two-digit hex escape?
+			case p.options.ASCIIOnly:
+				js = append(js, '\\', 'x', hexChars[c>>4], hexChars[c&15])
+
+			// Otherwise, just encode to UTF-8
+			default:
+				width := utf8.EncodeRune(temp, rune(c))
+				js = append(js, temp[:width]...)
+			}
+		}
+	}
+
+	p.js = js
+}
+
+// JSX tag syntax doesn't support character escapes so non-ASCII identifiers
+// must be printed as UTF-8 even when the charset is set to ASCII.
+func (p *printer) printJSXTag(tagOrNil js_ast.Expr) {
+	switch e := tagOrNil.Data.(type) {
+	case *js_ast.EString:
+		p.addSourceMapping(tagOrNil.Loc)
+		p.print(helpers.UTF16ToString(e.Value))
+
+	case *js_ast.EIdentifier:
+		name := p.renamer.NameForSymbol(e.Ref)
+		p.addSourceMappingForName(tagOrNil.Loc, name, e.Ref)
+		p.print(name)
+
+	case *js_ast.EDot:
+		p.printJSXTag(e.Target)
+		p.print(".")
+		p.addSourceMapping(e.NameLoc)
+		p.print(e.Name)
+
+	default:
+		if tagOrNil.Data != nil {
+			p.printExpr(tagOrNil, js_ast.LLowest, 0)
+		}
+	}
+}
+
+type printer struct {
+	symbols                ast.SymbolMap
+	astHelpers             js_ast.HelperContext
+	renamer                renamer.Renamer
+	importRecords          []ast.ImportRecord
+	callTarget             js_ast.E
+	exprComments           map[logger.Loc][]string
+	printedExprComments    map[logger.Loc]bool
+	hasLegalComment        map[string]struct{}
+	extractedLegalComments []string
+	js                     []byte
+	jsonMetadataImports    []string
+	binaryExprStack        []binaryExprVisitor
+	options                Options
+	builder                sourcemap.ChunkBuilder
+	printNextIndentAsSpace bool
+
+	stmtStart          int
+	exportDefaultStart int
+	arrowExprStart     int
+	forOfInitStart     int
+
+	withNesting          int
+	prevOpEnd            int
+	needSpaceBeforeDot   int
+	prevRegExpEnd        int
+	noLeadingNewlineHere int
+	oldLineStart         int
+	oldLineEnd           int
+	intToBytesBuffer     [64]byte
+	needsSemicolon       bool
+	wasLazyExport        bool
+	prevOp               js_ast.OpCode
+	moduleType           js_ast.ModuleType
+}
+
+func (p *printer) print(text string) {
+	p.js = append(p.js, text...)
+}
+
+// This is the same as "print(string(bytes))" without any unnecessary temporary
+// allocations
+func (p *printer) printBytes(bytes []byte) {
+	p.js = append(p.js, bytes...)
+}
+
+type printQuotedFlags uint8
+
+const (
+	printQuotedAllowBacktick printQuotedFlags = 1 << iota
+	printQuotedNoWrap
+)
+
+func (p *printer) printQuotedUTF8(text string, flags printQuotedFlags) {
+	p.printQuotedUTF16(helpers.StringToUTF16(text), flags)
+}
+
+func (p *printer) addSourceMapping(loc logger.Loc) {
+	if p.options.AddSourceMappings {
+		p.builder.AddSourceMapping(loc, "", p.js)
+	}
+}
+
+func (p *printer) addSourceMappingForName(loc logger.Loc, name string, ref ast.Ref) {
+	if p.options.AddSourceMappings {
+		if originalName := p.symbols.Get(ast.FollowSymbols(p.symbols, ref)).OriginalName; originalName != name {
+			p.builder.AddSourceMapping(loc, originalName, p.js)
+		} else {
+			p.builder.AddSourceMapping(loc, "", p.js)
+		}
+	}
+}
+
+func (p *printer) printIndent() {
+	if p.options.MinifyWhitespace {
+		return
+	}
+
+	if p.printNextIndentAsSpace {
+		p.print(" ")
+		p.printNextIndentAsSpace = false
+		return
+	}
+
+	indent := p.options.Indent
+	if p.options.LineLimit > 0 && indent*2 >= p.options.LineLimit {
+		indent = p.options.LineLimit / 2
+	}
+	for i := 0; i < indent; i++ {
+		p.print("  ")
+	}
+}
+
+func (p *printer) mangledPropName(ref ast.Ref) string {
+	ref = ast.FollowSymbols(p.symbols, ref)
+	if name, ok := p.options.MangledProps[ref]; ok {
+		return name
+	}
+	return p.renamer.NameForSymbol(ref)
+}
+
+func (p *printer) tryToGetImportedEnumValue(target js_ast.Expr, name string) (js_ast.TSEnumValue, bool) {
+	if id, ok := target.Data.(*js_ast.EImportIdentifier); ok {
+		ref := ast.FollowSymbols(p.symbols, id.Ref)
+		if symbol := p.symbols.Get(ref); symbol.Kind == ast.SymbolTSEnum {
+			if enum, ok := p.options.TSEnums[ref]; ok {
+				value, ok := enum[name]
+				return value, ok
+			}
+		}
+	}
+	return js_ast.TSEnumValue{}, false
+}
+
+func (p *printer) tryToGetImportedEnumValueUTF16(target js_ast.Expr, name []uint16) (js_ast.TSEnumValue, string, bool) {
+	if id, ok := target.Data.(*js_ast.EImportIdentifier); ok {
+		ref := ast.FollowSymbols(p.symbols, id.Ref)
+		if symbol := p.symbols.Get(ref); symbol.Kind == ast.SymbolTSEnum {
+			if enum, ok := p.options.TSEnums[ref]; ok {
+				name := helpers.UTF16ToString(name)
+				value, ok := enum[name]
+				return value, name, ok
+			}
+		}
+	}
+	return js_ast.TSEnumValue{}, "", false
+}
+
+func (p *printer) printClauseAlias(loc logger.Loc, alias string) {
+	if js_ast.IsIdentifier(alias) {
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(loc)
+		p.printIdentifier(alias)
+	} else {
+		p.addSourceMapping(loc)
+		p.printQuotedUTF8(alias, 0)
+	}
+}
+
+// Note: The functions below check whether something can be printed as an
+// identifier or if it needs to be quoted (e.g. "x.y" vs. "x['y']") using the
+// ES5 identifier validity test to maximize cross-platform portability. Even
+// though newer JavaScript environments can handle more Unicode characters,
+// there isn't a published document that says which Unicode versions are
+// supported by which browsers. Even if a character is considered valid in the
+// latest version of Unicode, we don't know if the browser we're targeting
+// contains an older version of Unicode or not. So for safety, we quote
+// anything that isn't guaranteed to be compatible with ES5, the oldest
+// JavaScript language target that we support.
+
+func CanEscapeIdentifier(name string, UnsupportedFeatures compat.JSFeature, asciiOnly bool) bool {
+	return js_ast.IsIdentifierES5AndESNext(name) && (!asciiOnly ||
+		!UnsupportedFeatures.Has(compat.UnicodeEscapes) ||
+		!helpers.ContainsNonBMPCodePoint(name))
+}
+
+func (p *printer) canPrintIdentifier(name string) bool {
+	return js_ast.IsIdentifierES5AndESNext(name) && (!p.options.ASCIIOnly ||
+		!p.options.UnsupportedFeatures.Has(compat.UnicodeEscapes) ||
+		!helpers.ContainsNonBMPCodePoint(name))
+}
+
+func (p *printer) canPrintIdentifierUTF16(name []uint16) bool {
+	return js_ast.IsIdentifierES5AndESNextUTF16(name) && (!p.options.ASCIIOnly ||
+		!p.options.UnsupportedFeatures.Has(compat.UnicodeEscapes) ||
+		!helpers.ContainsNonBMPCodePointUTF16(name))
+}
+
+func (p *printer) printIdentifier(name string) {
+	if p.options.ASCIIOnly {
+		p.js = QuoteIdentifier(p.js, name, p.options.UnsupportedFeatures)
+	} else {
+		p.print(name)
+	}
+}
+
+// This is the same as "printIdentifier(StringToUTF16(bytes))" without any
+// unnecessary temporary allocations
+func (p *printer) printIdentifierUTF16(name []uint16) {
+	var temp [utf8.UTFMax]byte
+	n := len(name)
+
+	for i := 0; i < n; i++ {
+		c := rune(name[i])
+
+		if c >= firstHighSurrogate && c <= lastHighSurrogate && i+1 < n {
+			if c2 := rune(name[i+1]); c2 >= firstLowSurrogate && c2 <= lastLowSurrogate {
+				c = (c << 10) + c2 + (0x10000 - (firstHighSurrogate << 10) - firstLowSurrogate)
+				i++
+			}
+		}
+
+		if p.options.ASCIIOnly && c > lastASCII {
+			if c <= 0xFFFF {
+				p.js = append(p.js, '\\', 'u', hexChars[c>>12], hexChars[(c>>8)&15], hexChars[(c>>4)&15], hexChars[c&15])
+			} else if !p.options.UnsupportedFeatures.Has(compat.UnicodeEscapes) {
+				p.js = append(p.js, fmt.Sprintf("\\u{%X}", c)...)
+			} else {
+				panic("Internal error: Cannot encode identifier: Unicode escapes are unsupported")
+			}
+			continue
+		}
+
+		width := utf8.EncodeRune(temp[:], c)
+		p.js = append(p.js, temp[:width]...)
+	}
+}
+
+func (p *printer) printNumber(value float64, level js_ast.L) {
+	absValue := math.Abs(value)
+
+	if value != value {
+		p.printSpaceBeforeIdentifier()
+		if p.withNesting != 0 {
+			// "with (x) NaN" really means "x.NaN" so avoid identifiers when "with" is present
+			wrap := level >= js_ast.LMultiply
+			if wrap {
+				p.print("(")
+			}
+			if p.options.MinifyWhitespace {
+				p.print("0/0")
+			} else {
+				p.print("0 / 0")
+			}
+			if wrap {
+				p.print(")")
+			}
+		} else {
+			p.print("NaN")
+		}
+	} else if value == positiveInfinity || value == negativeInfinity {
+		// "with (x) Infinity" really means "x.Infinity" so avoid identifiers when "with" is present
+		wrap := ((p.options.MinifySyntax || p.withNesting != 0) && level >= js_ast.LMultiply) ||
+			(value == negativeInfinity && level >= js_ast.LPrefix)
+		if wrap {
+			p.print("(")
+		}
+		if value == negativeInfinity {
+			p.printSpaceBeforeOperator(js_ast.UnOpNeg)
+			p.print("-")
+		} else {
+			p.printSpaceBeforeIdentifier()
+		}
+		if !p.options.MinifySyntax && p.withNesting == 0 {
+			p.print("Infinity")
+		} else if p.options.MinifyWhitespace {
+			p.print("1/0")
+		} else {
+			p.print("1 / 0")
+		}
+		if wrap {
+			p.print(")")
+		}
+	} else {
+		if !math.Signbit(value) {
+			p.printSpaceBeforeIdentifier()
+			p.printNonNegativeFloat(absValue)
+		} else if level >= js_ast.LPrefix {
+			// Expressions such as "(-1).toString" need to wrap negative numbers.
+			// Instead of testing for "value < 0" we test for "signbit(value)" and
+			// "!isNaN(value)" because we need this to be true for "-0" and "-0 < 0"
+			// is false.
+			p.print("(-")
+			p.printNonNegativeFloat(absValue)
+			p.print(")")
+		} else {
+			p.printSpaceBeforeOperator(js_ast.UnOpNeg)
+			p.print("-")
+			p.printNonNegativeFloat(absValue)
+		}
+	}
+}
+
+func (p *printer) willPrintExprCommentsAtLoc(loc logger.Loc) bool {
+	return !p.options.MinifyWhitespace && p.exprComments[loc] != nil && !p.printedExprComments[loc]
+}
+
+func (p *printer) willPrintExprCommentsForAnyOf(exprs []js_ast.Expr) bool {
+	for _, expr := range exprs {
+		if p.willPrintExprCommentsAtLoc(expr.Loc) {
+			return true
+		}
+	}
+	return false
+}
+
+func (p *printer) printBinding(binding js_ast.Binding) {
+	switch b := binding.Data.(type) {
+	case *js_ast.BMissing:
+		p.addSourceMapping(binding.Loc)
+
+	case *js_ast.BIdentifier:
+		name := p.renamer.NameForSymbol(b.Ref)
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMappingForName(binding.Loc, name, b.Ref)
+		p.printIdentifier(name)
+
+	case *js_ast.BArray:
+		isMultiLine := (len(b.Items) > 0 && !b.IsSingleLine) || p.willPrintExprCommentsAtLoc(b.CloseBracketLoc)
+		if !p.options.MinifyWhitespace && !isMultiLine {
+			for _, item := range b.Items {
+				if p.willPrintExprCommentsAtLoc(item.Loc) {
+					isMultiLine = true
+					break
+				}
+			}
+		}
+		p.addSourceMapping(binding.Loc)
+		p.print("[")
+		if len(b.Items) > 0 || isMultiLine {
+			if isMultiLine {
+				p.options.Indent++
+			}
+
+			for i, item := range b.Items {
+				if i != 0 {
+					p.print(",")
+				}
+				if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+					if isMultiLine {
+						p.printNewline()
+						p.printIndent()
+					} else if i != 0 {
+						p.printSpace()
+					}
+				}
+				p.printExprCommentsAtLoc(item.Loc)
+				if b.HasSpread && i+1 == len(b.Items) {
+					p.addSourceMapping(item.Loc)
+					p.print("...")
+					p.printExprCommentsAtLoc(item.Binding.Loc)
+				}
+				p.printBinding(item.Binding)
+
+				if item.DefaultValueOrNil.Data != nil {
+					p.printSpace()
+					p.print("=")
+					p.printSpace()
+					p.printExprWithoutLeadingNewline(item.DefaultValueOrNil, js_ast.LComma, 0)
+				}
+
+				// Make sure there's a comma after trailing missing items
+				if _, ok := item.Binding.Data.(*js_ast.BMissing); ok && i == len(b.Items)-1 {
+					p.print(",")
+				}
+			}
+
+			if isMultiLine {
+				p.printNewline()
+				p.printExprCommentsAfterCloseTokenAtLoc(b.CloseBracketLoc)
+				p.options.Indent--
+				p.printIndent()
+			}
+		}
+		p.addSourceMapping(b.CloseBracketLoc)
+		p.print("]")
+
+	case *js_ast.BObject:
+		isMultiLine := (len(b.Properties) > 0 && !b.IsSingleLine) || p.willPrintExprCommentsAtLoc(b.CloseBraceLoc)
+		if !p.options.MinifyWhitespace && !isMultiLine {
+			for _, property := range b.Properties {
+				if p.willPrintExprCommentsAtLoc(property.Loc) {
+					isMultiLine = true
+					break
+				}
+			}
+		}
+		p.addSourceMapping(binding.Loc)
+		p.print("{")
+		if len(b.Properties) > 0 || isMultiLine {
+			if isMultiLine {
+				p.options.Indent++
+			}
+
+			for i, property := range b.Properties {
+				if i != 0 {
+					p.print(",")
+				}
+				if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+					if isMultiLine {
+						p.printNewline()
+						p.printIndent()
+					} else {
+						p.printSpace()
+					}
+				}
+
+				p.printExprCommentsAtLoc(property.Loc)
+
+				if property.IsSpread {
+					p.addSourceMapping(property.Loc)
+					p.print("...")
+					p.printExprCommentsAtLoc(property.Value.Loc)
+				} else {
+					if property.IsComputed {
+						p.addSourceMapping(property.Loc)
+						isMultiLine := p.willPrintExprCommentsAtLoc(property.Key.Loc) || p.willPrintExprCommentsAtLoc(property.CloseBracketLoc)
+						p.print("[")
+						if isMultiLine {
+							p.printNewline()
+							p.options.Indent++
+							p.printIndent()
+						}
+						p.printExpr(property.Key, js_ast.LComma, 0)
+						if isMultiLine {
+							p.printNewline()
+							p.printExprCommentsAfterCloseTokenAtLoc(property.CloseBracketLoc)
+							p.options.Indent--
+							p.printIndent()
+						}
+						if property.CloseBracketLoc.Start > property.Loc.Start {
+							p.addSourceMapping(property.CloseBracketLoc)
+						}
+						p.print("]:")
+						p.printSpace()
+						p.printBinding(property.Value)
+
+						if property.DefaultValueOrNil.Data != nil {
+							p.printSpace()
+							p.print("=")
+							p.printSpace()
+							p.printExprWithoutLeadingNewline(property.DefaultValueOrNil, js_ast.LComma, 0)
+						}
+						continue
+					}
+
+					if str, ok := property.Key.Data.(*js_ast.EString); ok && !property.PreferQuotedKey && p.canPrintIdentifierUTF16(str.Value) {
+						// Use a shorthand property if the names are the same
+						if id, ok := property.Value.Data.(*js_ast.BIdentifier); ok &&
+							!p.willPrintExprCommentsAtLoc(property.Value.Loc) &&
+							helpers.UTF16EqualsString(str.Value, p.renamer.NameForSymbol(id.Ref)) {
+							if p.options.AddSourceMappings {
+								p.addSourceMappingForName(property.Key.Loc, helpers.UTF16ToString(str.Value), id.Ref)
+							}
+							p.printIdentifierUTF16(str.Value)
+							if property.DefaultValueOrNil.Data != nil {
+								p.printSpace()
+								p.print("=")
+								p.printSpace()
+								p.printExprWithoutLeadingNewline(property.DefaultValueOrNil, js_ast.LComma, 0)
+							}
+							continue
+						}
+
+						p.addSourceMapping(property.Key.Loc)
+						p.printIdentifierUTF16(str.Value)
+					} else if mangled, ok := property.Key.Data.(*js_ast.ENameOfSymbol); ok {
+						if name := p.mangledPropName(mangled.Ref); p.canPrintIdentifier(name) {
+							p.addSourceMappingForName(property.Key.Loc, name, mangled.Ref)
+							p.printIdentifier(name)
+
+							// Use a shorthand property if the names are the same
+							if id, ok := property.Value.Data.(*js_ast.BIdentifier); ok &&
+								!p.willPrintExprCommentsAtLoc(property.Value.Loc) &&
+								name == p.renamer.NameForSymbol(id.Ref) {
+								if property.DefaultValueOrNil.Data != nil {
+									p.printSpace()
+									p.print("=")
+									p.printSpace()
+									p.printExprWithoutLeadingNewline(property.DefaultValueOrNil, js_ast.LComma, 0)
+								}
+								continue
+							}
+						} else {
+							p.addSourceMapping(property.Key.Loc)
+							p.printQuotedUTF8(name, 0)
+						}
+					} else {
+						p.printExpr(property.Key, js_ast.LLowest, 0)
+					}
+
+					p.print(":")
+					p.printSpace()
+				}
+				p.printBinding(property.Value)
+
+				if property.DefaultValueOrNil.Data != nil {
+					p.printSpace()
+					p.print("=")
+					p.printSpace()
+					p.printExprWithoutLeadingNewline(property.DefaultValueOrNil, js_ast.LComma, 0)
+				}
+			}
+
+			if isMultiLine {
+				p.printNewline()
+				p.printExprCommentsAfterCloseTokenAtLoc(b.CloseBraceLoc)
+				p.options.Indent--
+				p.printIndent()
+			} else {
+				// This block is only reached if len(b.Properties) > 0
+				p.printSpace()
+			}
+		}
+		p.addSourceMapping(b.CloseBraceLoc)
+		p.print("}")
+
+	default:
+		panic(fmt.Sprintf("Unexpected binding of type %T", binding.Data))
+	}
+}
+
+func (p *printer) printSpace() {
+	if !p.options.MinifyWhitespace {
+		p.print(" ")
+	}
+}
+
+func (p *printer) printNewline() {
+	if !p.options.MinifyWhitespace {
+		p.print("\n")
+	}
+}
+
+func (p *printer) currentLineLength() int {
+	js := p.js
+	n := len(js)
+	stop := p.oldLineEnd
+
+	// Update "oldLineStart" to the start of the current line
+	for i := n; i > stop; i-- {
+		if c := js[i-1]; c == '\r' || c == '\n' {
+			p.oldLineStart = i
+			break
+		}
+	}
+
+	p.oldLineEnd = n
+	return n - p.oldLineStart
+}
+
+func (p *printer) printNewlinePastLineLimit() bool {
+	if p.currentLineLength() < p.options.LineLimit {
+		return false
+	}
+	p.print("\n")
+	p.printIndent()
+	return true
+}
+
+func (p *printer) printSpaceBeforeOperator(next js_ast.OpCode) {
+	if p.prevOpEnd == len(p.js) {
+		prev := p.prevOp
+
+		// "+ + y" => "+ +y"
+		// "+ ++ y" => "+ ++y"
+		// "x + + y" => "x+ +y"
+		// "x ++ + y" => "x+++y"
+		// "x + ++ y" => "x+ ++y"
+		// "-- >" => "-- >"
+		// "< ! --" => "<! --"
+		if ((prev == js_ast.BinOpAdd || prev == js_ast.UnOpPos) && (next == js_ast.BinOpAdd || next == js_ast.UnOpPos || next == js_ast.UnOpPreInc)) ||
+			((prev == js_ast.BinOpSub || prev == js_ast.UnOpNeg) && (next == js_ast.BinOpSub || next == js_ast.UnOpNeg || next == js_ast.UnOpPreDec)) ||
+			(prev == js_ast.UnOpPostDec && next == js_ast.BinOpGt) ||
+			(prev == js_ast.UnOpNot && next == js_ast.UnOpPreDec && len(p.js) > 1 && p.js[len(p.js)-2] == '<') {
+			p.print(" ")
+		}
+	}
+}
+
+func (p *printer) printSemicolonAfterStatement() {
+	if !p.options.MinifyWhitespace {
+		p.print(";\n")
+	} else {
+		p.needsSemicolon = true
+	}
+}
+
+func (p *printer) printSemicolonIfNeeded() {
+	if p.needsSemicolon {
+		p.print(";")
+		p.needsSemicolon = false
+	}
+}
+
+func (p *printer) printSpaceBeforeIdentifier() {
+	if c, _ := utf8.DecodeLastRune(p.js); js_ast.IsIdentifierContinue(c) || p.prevRegExpEnd == len(p.js) {
+		p.print(" ")
+	}
+}
+
+type fnArgsOpts struct {
+	openParenLoc              logger.Loc
+	addMappingForOpenParenLoc bool
+	hasRestArg                bool
+	isArrow                   bool
+}
+
+func (p *printer) printFnArgs(args []js_ast.Arg, opts fnArgsOpts) {
+	wrap := true
+
+	// Minify "(a) => {}" as "a=>{}"
+	if p.options.MinifyWhitespace && !opts.hasRestArg && opts.isArrow && len(args) == 1 {
+		if _, ok := args[0].Binding.Data.(*js_ast.BIdentifier); ok && args[0].DefaultOrNil.Data == nil {
+			wrap = false
+		}
+	}
+
+	if wrap {
+		if opts.addMappingForOpenParenLoc {
+			p.addSourceMapping(opts.openParenLoc)
+		}
+		p.print("(")
+	}
+
+	for i, arg := range args {
+		if i != 0 {
+			p.print(",")
+			p.printSpace()
+		}
+		p.printDecorators(arg.Decorators, printSpaceAfterDecorator)
+		if opts.hasRestArg && i+1 == len(args) {
+			p.print("...")
+		}
+		p.printBinding(arg.Binding)
+
+		if arg.DefaultOrNil.Data != nil {
+			p.printSpace()
+			p.print("=")
+			p.printSpace()
+			p.printExprWithoutLeadingNewline(arg.DefaultOrNil, js_ast.LComma, 0)
+		}
+	}
+
+	if wrap {
+		p.print(")")
+	}
+}
+
+func (p *printer) printFn(fn js_ast.Fn) {
+	p.printFnArgs(fn.Args, fnArgsOpts{hasRestArg: fn.HasRestArg})
+	p.printSpace()
+	p.printBlock(fn.Body.Loc, fn.Body.Block)
+}
+
+type printAfterDecorator uint8
+
+const (
+	printNewlineAfterDecorator printAfterDecorator = iota
+	printSpaceAfterDecorator
+)
+
+func (p *printer) printDecorators(decorators []js_ast.Decorator, defaultMode printAfterDecorator) (omitIndentAfter bool) {
+	oldMode := defaultMode
+
+	for _, decorator := range decorators {
+		wrap := false
+		wasCallTarget := false
+		expr := decorator.Value
+		mode := defaultMode
+		if decorator.OmitNewlineAfter {
+			mode = printSpaceAfterDecorator
+		}
+
+	outer:
+		for {
+			isCallTarget := wasCallTarget
+			wasCallTarget = false
+
+			switch e := expr.Data.(type) {
+			case *js_ast.EIdentifier:
+				// "@foo"
+				break outer
+
+			case *js_ast.ECall:
+				// "@foo()"
+				expr = e.Target
+				wasCallTarget = true
+				continue
+
+			case *js_ast.EDot:
+				// "@foo.bar"
+				if p.canPrintIdentifier(e.Name) {
+					expr = e.Target
+					continue
+				}
+
+				// "@foo.\u30FF" => "@(foo['\u30FF'])"
+				break
+
+			case *js_ast.EIndex:
+				if _, ok := e.Index.Data.(*js_ast.EPrivateIdentifier); ok {
+					// "@foo.#bar"
+					expr = e.Target
+					continue
+				}
+
+				// "@(foo[bar])"
+				break
+
+			case *js_ast.EImportIdentifier:
+				ref := ast.FollowSymbols(p.symbols, e.Ref)
+				symbol := p.symbols.Get(ref)
+
+				if symbol.ImportItemStatus == ast.ImportItemMissing {
+					// "@(void 0)"
+					break
+				}
+
+				if symbol.NamespaceAlias != nil && isCallTarget && e.WasOriginallyIdentifier {
+					// "@((0, import_ns.fn)())"
+					break
+				}
+
+				if value := p.options.ConstValues[ref]; value.Kind != js_ast.ConstValueNone {
+					// "@(<inlined constant>)"
+					break
+				}
+
+				// "@foo"
+				// "@import_ns.fn"
+				break outer
+
+			default:
+				// "@(foo + bar)"
+				// "@(() => {})"
+				break
+			}
+
+			wrap = true
+			break outer
+		}
+
+		p.addSourceMapping(decorator.AtLoc)
+		if oldMode == printNewlineAfterDecorator {
+			p.printIndent()
+		}
+
+		p.print("@")
+		if wrap {
+			p.print("(")
+		}
+		p.printExpr(decorator.Value, js_ast.LLowest, 0)
+		if wrap {
+			p.print(")")
+		}
+
+		switch mode {
+		case printNewlineAfterDecorator:
+			p.printNewline()
+
+		case printSpaceAfterDecorator:
+			p.printSpace()
+		}
+		oldMode = mode
+	}
+
+	omitIndentAfter = oldMode == printSpaceAfterDecorator
+	return
+}
+
+func (p *printer) printClass(class js_ast.Class) {
+	if class.ExtendsOrNil.Data != nil {
+		p.print(" extends")
+		p.printSpace()
+		p.printExpr(class.ExtendsOrNil, js_ast.LNew-1, 0)
+	}
+	p.printSpace()
+
+	p.addSourceMapping(class.BodyLoc)
+	p.print("{")
+	p.printNewline()
+	p.options.Indent++
+
+	for _, item := range class.Properties {
+		p.printSemicolonIfNeeded()
+		omitIndent := p.printDecorators(item.Decorators, printNewlineAfterDecorator)
+		if !omitIndent {
+			p.printIndent()
+		}
+
+		if item.Kind == js_ast.PropertyClassStaticBlock {
+			p.addSourceMapping(item.Loc)
+			p.print("static")
+			p.printSpace()
+			p.printBlock(item.ClassStaticBlock.Loc, item.ClassStaticBlock.Block)
+			p.printNewline()
+			continue
+		}
+
+		p.printProperty(item)
+
+		// Need semicolons after class fields
+		if item.ValueOrNil.Data == nil {
+			p.printSemicolonAfterStatement()
+		} else {
+			p.printNewline()
+		}
+	}
+
+	p.needsSemicolon = false
+	p.printExprCommentsAfterCloseTokenAtLoc(class.CloseBraceLoc)
+	p.options.Indent--
+	p.printIndent()
+	if class.CloseBraceLoc.Start > class.BodyLoc.Start {
+		p.addSourceMapping(class.CloseBraceLoc)
+	}
+	p.print("}")
+}
+
+func (p *printer) printProperty(property js_ast.Property) {
+	p.printExprCommentsAtLoc(property.Loc)
+
+	if property.Kind == js_ast.PropertySpread {
+		p.addSourceMapping(property.Loc)
+		p.print("...")
+		p.printExpr(property.ValueOrNil, js_ast.LComma, 0)
+		return
+	}
+
+	// Handle key syntax compression for cross-module constant inlining of enums
+	if p.options.MinifySyntax && property.Flags.Has(js_ast.PropertyIsComputed) {
+		if dot, ok := property.Key.Data.(*js_ast.EDot); ok {
+			if value, ok := p.tryToGetImportedEnumValue(dot.Target, dot.Name); ok {
+				if value.String != nil {
+					property.Key.Data = &js_ast.EString{Value: value.String}
+
+					// Problematic key names must stay computed for correctness
+					if !helpers.UTF16EqualsString(value.String, "__proto__") &&
+						!helpers.UTF16EqualsString(value.String, "constructor") &&
+						!helpers.UTF16EqualsString(value.String, "prototype") {
+						property.Flags &= ^js_ast.PropertyIsComputed
+					}
+				} else {
+					property.Key.Data = &js_ast.ENumber{Value: value.Number}
+					property.Flags &= ^js_ast.PropertyIsComputed
+				}
+			}
+		}
+	}
+
+	if property.Flags.Has(js_ast.PropertyIsStatic) {
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(property.Loc)
+		p.print("static")
+		p.printSpace()
+	}
+
+	switch property.Kind {
+	case js_ast.PropertyGetter:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(property.Loc)
+		p.print("get")
+		p.printSpace()
+
+	case js_ast.PropertySetter:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(property.Loc)
+		p.print("set")
+		p.printSpace()
+
+	case js_ast.PropertyAutoAccessor:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(property.Loc)
+		p.print("accessor")
+		p.printSpace()
+	}
+
+	if fn, ok := property.ValueOrNil.Data.(*js_ast.EFunction); property.Kind.IsMethodDefinition() && ok {
+		if fn.Fn.IsAsync {
+			p.printSpaceBeforeIdentifier()
+			p.addSourceMapping(property.Loc)
+			p.print("async")
+			p.printSpace()
+		}
+		if fn.Fn.IsGenerator {
+			p.addSourceMapping(property.Loc)
+			p.print("*")
+		}
+	}
+
+	isComputed := property.Flags.Has(js_ast.PropertyIsComputed)
+
+	// Automatically print numbers that would cause a syntax error as computed properties
+	if !isComputed {
+		if key, ok := property.Key.Data.(*js_ast.ENumber); ok {
+			if math.Signbit(key.Value) || (key.Value == positiveInfinity && p.options.MinifySyntax) {
+				// "{ -1: 0 }" must be printed as "{ [-1]: 0 }"
+				// "{ 1/0: 0 }" must be printed as "{ [1/0]: 0 }"
+				isComputed = true
+			}
+		}
+	}
+
+	if isComputed {
+		p.addSourceMapping(property.Loc)
+		isMultiLine := p.willPrintExprCommentsAtLoc(property.Key.Loc) || p.willPrintExprCommentsAtLoc(property.CloseBracketLoc)
+		p.print("[")
+		if isMultiLine {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+		}
+		p.printExpr(property.Key, js_ast.LComma, 0)
+		if isMultiLine {
+			p.printNewline()
+			p.printExprCommentsAfterCloseTokenAtLoc(property.CloseBracketLoc)
+			p.options.Indent--
+			p.printIndent()
+		}
+		if property.CloseBracketLoc.Start > property.Loc.Start {
+			p.addSourceMapping(property.CloseBracketLoc)
+		}
+		p.print("]")
+
+		if property.ValueOrNil.Data != nil {
+			if fn, ok := property.ValueOrNil.Data.(*js_ast.EFunction); property.Kind.IsMethodDefinition() && ok {
+				p.printFn(fn.Fn)
+				return
+			}
+
+			p.print(":")
+			p.printSpace()
+			p.printExprWithoutLeadingNewline(property.ValueOrNil, js_ast.LComma, 0)
+		}
+
+		if property.InitializerOrNil.Data != nil {
+			p.printSpace()
+			p.print("=")
+			p.printSpace()
+			p.printExprWithoutLeadingNewline(property.InitializerOrNil, js_ast.LComma, 0)
+		}
+		return
+	}
+
+	switch key := property.Key.Data.(type) {
+	case *js_ast.EPrivateIdentifier:
+		name := p.renamer.NameForSymbol(key.Ref)
+		p.addSourceMappingForName(property.Key.Loc, name, key.Ref)
+		p.printIdentifier(name)
+
+	case *js_ast.ENameOfSymbol:
+		if name := p.mangledPropName(key.Ref); p.canPrintIdentifier(name) {
+			p.printSpaceBeforeIdentifier()
+			p.addSourceMappingForName(property.Key.Loc, name, key.Ref)
+			p.printIdentifier(name)
+
+			// Use a shorthand property if the names are the same
+			if !p.options.UnsupportedFeatures.Has(compat.ObjectExtensions) && property.ValueOrNil.Data != nil && !p.willPrintExprCommentsAtLoc(property.ValueOrNil.Loc) {
+				switch e := property.ValueOrNil.Data.(type) {
+				case *js_ast.EIdentifier:
+					if name == p.renamer.NameForSymbol(e.Ref) {
+						if property.InitializerOrNil.Data != nil {
+							p.printSpace()
+							p.print("=")
+							p.printSpace()
+							p.printExprWithoutLeadingNewline(property.InitializerOrNil, js_ast.LComma, 0)
+						}
+						return
+					}
+
+				case *js_ast.EImportIdentifier:
+					// Make sure we're not using a property access instead of an identifier
+					ref := ast.FollowSymbols(p.symbols, e.Ref)
+					if symbol := p.symbols.Get(ref); symbol.NamespaceAlias == nil && name == p.renamer.NameForSymbol(ref) &&
+						p.options.ConstValues[ref].Kind == js_ast.ConstValueNone {
+						if property.InitializerOrNil.Data != nil {
+							p.printSpace()
+							p.print("=")
+							p.printSpace()
+							p.printExprWithoutLeadingNewline(property.InitializerOrNil, js_ast.LComma, 0)
+						}
+						return
+					}
+				}
+			}
+		} else {
+			p.addSourceMapping(property.Key.Loc)
+			p.printQuotedUTF8(name, 0)
+		}
+
+	case *js_ast.EString:
+		if !property.Flags.Has(js_ast.PropertyPreferQuotedKey) && p.canPrintIdentifierUTF16(key.Value) {
+			p.printSpaceBeforeIdentifier()
+
+			// Use a shorthand property if the names are the same
+			if !p.options.UnsupportedFeatures.Has(compat.ObjectExtensions) && property.ValueOrNil.Data != nil && !p.willPrintExprCommentsAtLoc(property.ValueOrNil.Loc) {
+				switch e := property.ValueOrNil.Data.(type) {
+				case *js_ast.EIdentifier:
+					if canUseShorthandProperty(key.Value, p.renamer.NameForSymbol(e.Ref), property.Flags) {
+						if p.options.AddSourceMappings {
+							p.addSourceMappingForName(property.Key.Loc, helpers.UTF16ToString(key.Value), e.Ref)
+						}
+						p.printIdentifierUTF16(key.Value)
+						if property.InitializerOrNil.Data != nil {
+							p.printSpace()
+							p.print("=")
+							p.printSpace()
+							p.printExprWithoutLeadingNewline(property.InitializerOrNil, js_ast.LComma, 0)
+						}
+						return
+					}
+
+				case *js_ast.EImportIdentifier:
+					// Make sure we're not using a property access instead of an identifier
+					ref := ast.FollowSymbols(p.symbols, e.Ref)
+					if symbol := p.symbols.Get(ref); symbol.NamespaceAlias == nil && canUseShorthandProperty(key.Value, p.renamer.NameForSymbol(ref), property.Flags) &&
+						p.options.ConstValues[ref].Kind == js_ast.ConstValueNone {
+						if p.options.AddSourceMappings {
+							p.addSourceMappingForName(property.Key.Loc, helpers.UTF16ToString(key.Value), ref)
+						}
+						p.printIdentifierUTF16(key.Value)
+						if property.InitializerOrNil.Data != nil {
+							p.printSpace()
+							p.print("=")
+							p.printSpace()
+							p.printExprWithoutLeadingNewline(property.InitializerOrNil, js_ast.LComma, 0)
+						}
+						return
+					}
+				}
+			}
+
+			// The JavaScript specification special-cases the property identifier
+			// "__proto__" with a colon after it to set the prototype of the object.
+			// If we keep the identifier but add a colon then we'll cause a behavior
+			// change because the prototype will now be set. Avoid using an identifier
+			// by using a computed property with a string instead. For more info see:
+			// https://tc39.es/ecma262/#sec-runtime-semantics-propertydefinitionevaluation
+			if property.Flags.Has(js_ast.PropertyWasShorthand) && !p.options.UnsupportedFeatures.Has(compat.ObjectExtensions) &&
+				helpers.UTF16EqualsString(key.Value, "__proto__") {
+				p.print("[")
+				p.addSourceMapping(property.Key.Loc)
+				p.printQuotedUTF16(key.Value, 0)
+				p.print("]")
+				break
+			}
+
+			p.addSourceMapping(property.Key.Loc)
+			p.printIdentifierUTF16(key.Value)
+		} else {
+			p.addSourceMapping(property.Key.Loc)
+			p.printQuotedUTF16(key.Value, 0)
+		}
+
+	default:
+		p.printExpr(property.Key, js_ast.LLowest, 0)
+	}
+
+	if fn, ok := property.ValueOrNil.Data.(*js_ast.EFunction); property.Kind.IsMethodDefinition() && ok {
+		p.printFn(fn.Fn)
+		return
+	}
+
+	if property.ValueOrNil.Data != nil {
+		p.print(":")
+		p.printSpace()
+		p.printExprWithoutLeadingNewline(property.ValueOrNil, js_ast.LComma, 0)
+	}
+
+	if property.InitializerOrNil.Data != nil {
+		p.printSpace()
+		p.print("=")
+		p.printSpace()
+		p.printExprWithoutLeadingNewline(property.InitializerOrNil, js_ast.LComma, 0)
+	}
+}
+
+func canUseShorthandProperty(key []uint16, name string, flags js_ast.PropertyFlags) bool {
+	// The JavaScript specification special-cases the property identifier
+	// "__proto__" with a colon after it to set the prototype of the object. If
+	// we remove the colon then we'll cause a behavior change because the
+	// prototype will no longer be set, but we also don't want to add a colon
+	// if it was omitted. Always use a shorthand property if the property is not
+	// "__proto__", otherwise try to preserve the original shorthand status. See:
+	// https://tc39.es/ecma262/#sec-runtime-semantics-propertydefinitionevaluation
+	if !helpers.UTF16EqualsString(key, name) {
+		return false
+	}
+	return helpers.UTF16EqualsString(key, name) && (name != "__proto__" || flags.Has(js_ast.PropertyWasShorthand))
+}
+
+func (p *printer) printQuotedUTF16(data []uint16, flags printQuotedFlags) {
+	if p.options.UnsupportedFeatures.Has(compat.TemplateLiteral) {
+		flags &= ^printQuotedAllowBacktick
+	}
+
+	singleCost := 0
+	doubleCost := 0
+	backtickCost := 0
+
+	for i, c := range data {
+		switch c {
+		case '\n':
+			if p.options.MinifySyntax {
+				// The backslash for the newline costs an extra character for old-style
+				// string literals when compared to a template literal
+				backtickCost--
+			}
+		case '\'':
+			singleCost++
+		case '"':
+			doubleCost++
+		case '`':
+			backtickCost++
+		case '$':
+			// "${" sequences need to be escaped in template literals
+			if i+1 < len(data) && data[i+1] == '{' {
+				backtickCost++
+			}
+		}
+	}
+
+	c := "\""
+	if doubleCost > singleCost {
+		c = "'"
+		if singleCost > backtickCost && (flags&printQuotedAllowBacktick) != 0 {
+			c = "`"
+		}
+	} else if doubleCost > backtickCost && (flags&printQuotedAllowBacktick) != 0 {
+		c = "`"
+	}
+
+	p.print(c)
+	p.printUnquotedUTF16(data, rune(c[0]), flags)
+	p.print(c)
+}
+
+func (p *printer) printRequireOrImportExpr(importRecordIndex uint32, level js_ast.L, flags printExprFlags, closeParenLoc logger.Loc) {
+	record := &p.importRecords[importRecordIndex]
+
+	if level >= js_ast.LNew || (flags&forbidCall) != 0 {
+		p.print("(")
+		defer p.print(")")
+		level = js_ast.LLowest
+	}
+
+	if !record.SourceIndex.IsValid() {
+		// External "require()"
+		if record.Kind != ast.ImportDynamic {
+			// Wrap this with a call to "__toESM()" if this is a CommonJS file
+			wrapWithToESM := record.Flags.Has(ast.WrapWithToESM)
+			if wrapWithToESM {
+				p.printSpaceBeforeIdentifier()
+				p.printIdentifier(p.renamer.NameForSymbol(p.options.ToESMRef))
+				p.print("(")
+			}
+
+			// Potentially substitute our own "__require" stub for "require"
+			p.printSpaceBeforeIdentifier()
+			if record.Flags.Has(ast.CallRuntimeRequire) {
+				p.printIdentifier(p.renamer.NameForSymbol(p.options.RuntimeRequireRef))
+			} else {
+				p.print("require")
+			}
+
+			isMultiLine := p.willPrintExprCommentsAtLoc(record.Range.Loc) || p.willPrintExprCommentsAtLoc(closeParenLoc)
+			p.print("(")
+			if isMultiLine {
+				p.printNewline()
+				p.options.Indent++
+				p.printIndent()
+			}
+			p.printExprCommentsAtLoc(record.Range.Loc)
+			p.printPath(importRecordIndex, ast.ImportRequire)
+			if isMultiLine {
+				p.printNewline()
+				p.printExprCommentsAfterCloseTokenAtLoc(closeParenLoc)
+				p.options.Indent--
+				p.printIndent()
+			}
+			if closeParenLoc.Start > record.Range.Loc.Start {
+				p.addSourceMapping(closeParenLoc)
+			}
+			p.print(")")
+
+			// Finish the call to "__toESM()"
+			if wrapWithToESM {
+				if p.moduleType.IsESM() {
+					p.print(",")
+					p.printSpace()
+					p.print("1")
+				}
+				p.print(")")
+			}
+			return
+		}
+
+		// External "import()"
+		kind := ast.ImportDynamic
+		if !p.options.UnsupportedFeatures.Has(compat.DynamicImport) {
+			p.printSpaceBeforeIdentifier()
+			p.print("import(")
+		} else {
+			kind = ast.ImportRequire
+			p.printSpaceBeforeIdentifier()
+			p.print("Promise.resolve()")
+			p.printDotThenPrefix()
+			defer p.printDotThenSuffix()
+
+			// Wrap this with a call to "__toESM()" if this is a CommonJS file
+			if record.Flags.Has(ast.WrapWithToESM) {
+				p.printSpaceBeforeIdentifier()
+				p.printIdentifier(p.renamer.NameForSymbol(p.options.ToESMRef))
+				p.print("(")
+				defer func() {
+					if p.moduleType.IsESM() {
+						p.print(",")
+						p.printSpace()
+						p.print("1")
+					}
+					p.print(")")
+				}()
+			}
+
+			// Potentially substitute our own "__require" stub for "require"
+			p.printSpaceBeforeIdentifier()
+			if record.Flags.Has(ast.CallRuntimeRequire) {
+				p.printIdentifier(p.renamer.NameForSymbol(p.options.RuntimeRequireRef))
+			} else {
+				p.print("require")
+			}
+
+			p.print("(")
+		}
+		isMultiLine := p.willPrintExprCommentsAtLoc(record.Range.Loc) ||
+			p.willPrintExprCommentsAtLoc(closeParenLoc) ||
+			(record.AssertOrWith != nil &&
+				!p.options.UnsupportedFeatures.Has(compat.DynamicImport) &&
+				(!p.options.UnsupportedFeatures.Has(compat.ImportAssertions) ||
+					!p.options.UnsupportedFeatures.Has(compat.ImportAttributes)) &&
+				p.willPrintExprCommentsAtLoc(record.AssertOrWith.OuterOpenBraceLoc))
+		if isMultiLine {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+		}
+		p.printExprCommentsAtLoc(record.Range.Loc)
+		p.printPath(importRecordIndex, kind)
+		if !p.options.UnsupportedFeatures.Has(compat.DynamicImport) {
+			p.printImportCallAssertOrWith(record.AssertOrWith, isMultiLine)
+		}
+		if isMultiLine {
+			p.printNewline()
+			p.printExprCommentsAfterCloseTokenAtLoc(closeParenLoc)
+			p.options.Indent--
+			p.printIndent()
+		}
+		if closeParenLoc.Start > record.Range.Loc.Start {
+			p.addSourceMapping(closeParenLoc)
+		}
+		p.print(")")
+		return
+	}
+
+	meta := p.options.RequireOrImportMetaForSource(record.SourceIndex.GetIndex())
+
+	// Don't need the namespace object if the result is unused anyway
+	if (flags & exprResultIsUnused) != 0 {
+		meta.ExportsRef = ast.InvalidRef
+	}
+
+	// Internal "import()" of async ESM
+	if record.Kind == ast.ImportDynamic && meta.IsWrapperAsync {
+		p.printSpaceBeforeIdentifier()
+		p.printIdentifier(p.renamer.NameForSymbol(meta.WrapperRef))
+		p.print("()")
+		if meta.ExportsRef != ast.InvalidRef {
+			p.printDotThenPrefix()
+			p.printSpaceBeforeIdentifier()
+			p.printIdentifier(p.renamer.NameForSymbol(meta.ExportsRef))
+			p.printDotThenSuffix()
+		}
+		return
+	}
+
+	// Internal "require()" or "import()"
+	if record.Kind == ast.ImportDynamic {
+		p.printSpaceBeforeIdentifier()
+		p.print("Promise.resolve()")
+		level = p.printDotThenPrefix()
+		defer p.printDotThenSuffix()
+	}
+
+	// Make sure the comma operator is properly wrapped
+	if meta.ExportsRef != ast.InvalidRef && level >= js_ast.LComma {
+		p.print("(")
+		defer p.print(")")
+	}
+
+	// Wrap this with a call to "__toESM()" if this is a CommonJS file
+	wrapWithToESM := record.Flags.Has(ast.WrapWithToESM)
+	if wrapWithToESM {
+		p.printSpaceBeforeIdentifier()
+		p.printIdentifier(p.renamer.NameForSymbol(p.options.ToESMRef))
+		p.print("(")
+	}
+
+	// Call the wrapper
+	p.printSpaceBeforeIdentifier()
+	p.printIdentifier(p.renamer.NameForSymbol(meta.WrapperRef))
+	p.print("()")
+
+	// Return the namespace object if this is an ESM file
+	if meta.ExportsRef != ast.InvalidRef {
+		p.print(",")
+		p.printSpace()
+
+		// Wrap this with a call to "__toCommonJS()" if this is an ESM file
+		wrapWithTpCJS := record.Flags.Has(ast.WrapWithToCJS)
+		if wrapWithTpCJS {
+			p.printIdentifier(p.renamer.NameForSymbol(p.options.ToCommonJSRef))
+			p.print("(")
+		}
+		p.printIdentifier(p.renamer.NameForSymbol(meta.ExportsRef))
+		if wrapWithTpCJS {
+			p.print(")")
+		}
+	}
+
+	// Finish the call to "__toESM()"
+	if wrapWithToESM {
+		if p.moduleType.IsESM() {
+			p.print(",")
+			p.printSpace()
+			p.print("1")
+		}
+		p.print(")")
+	}
+}
+
+func (p *printer) printDotThenPrefix() js_ast.L {
+	if p.options.UnsupportedFeatures.Has(compat.Arrow) {
+		p.print(".then(function()")
+		p.printSpace()
+		p.print("{")
+		p.printNewline()
+		p.options.Indent++
+		p.printIndent()
+		p.print("return")
+		p.printSpace()
+		return js_ast.LLowest
+	} else {
+		p.print(".then(()")
+		p.printSpace()
+		p.print("=>")
+		p.printSpace()
+		return js_ast.LComma
+	}
+}
+
+func (p *printer) printDotThenSuffix() {
+	if p.options.UnsupportedFeatures.Has(compat.Arrow) {
+		if !p.options.MinifyWhitespace {
+			p.print(";")
+		}
+		p.printNewline()
+		p.options.Indent--
+		p.printIndent()
+		p.print("})")
+	} else {
+		p.print(")")
+	}
+}
+
+func (p *printer) printUndefined(loc logger.Loc, level js_ast.L) {
+	if level >= js_ast.LPrefix {
+		p.addSourceMapping(loc)
+		p.print("(void 0)")
+	} else {
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(loc)
+		p.print("void 0")
+	}
+}
+
+// Call this before printing an expression to see if it turned out to be empty.
+// We use this to do inlining of empty functions at print time. It can't happen
+// during parse time because a) parse time only has two passes and we only know
+// if a function can be inlined at the end of the second pass (due to is-mutated
+// analysis) and b) we want to enable cross-module inlining of empty functions
+// which has to happen after linking.
+//
+// This function returns "nil" to indicate that the expression should be removed
+// completely.
+//
+// This function doesn't need to search everywhere inside the entire expression
+// for calls to inline. Calls are automatically inlined when printed. However,
+// the printer replaces the call with "undefined" since the result may still
+// be needed by the caller. If the caller knows that it doesn't need the result,
+// it should call this function first instead so we don't print "undefined".
+//
+// This is a separate function instead of trying to work this logic into the
+// printer because it's too late to eliminate the expression entirely when we're
+// in the printer. We may have already printed the leading indent, for example.
+func (p *printer) simplifyUnusedExpr(expr js_ast.Expr) js_ast.Expr {
+	switch e := expr.Data.(type) {
+	case *js_ast.EBinary:
+		// Calls to be inlined may be hidden inside a comma operator chain
+		if e.Op == js_ast.BinOpComma {
+			left := p.simplifyUnusedExpr(e.Left)
+			right := p.simplifyUnusedExpr(e.Right)
+			if left.Data != e.Left.Data || right.Data != e.Right.Data {
+				return js_ast.JoinWithComma(left, right)
+			}
+		}
+
+	case *js_ast.ECall:
+		var symbolFlags ast.SymbolFlags
+		switch target := e.Target.Data.(type) {
+		case *js_ast.EIdentifier:
+			symbolFlags = p.symbols.Get(target.Ref).Flags
+		case *js_ast.EImportIdentifier:
+			ref := ast.FollowSymbols(p.symbols, target.Ref)
+			symbolFlags = p.symbols.Get(ref).Flags
+		}
+
+		// Replace non-mutated empty functions with their arguments at print time
+		if (symbolFlags & (ast.IsEmptyFunction | ast.CouldPotentiallyBeMutated)) == ast.IsEmptyFunction {
+			var replacement js_ast.Expr
+			for _, arg := range e.Args {
+				if _, ok := arg.Data.(*js_ast.ESpread); ok {
+					arg.Data = &js_ast.EArray{Items: []js_ast.Expr{arg}, IsSingleLine: true}
+				}
+				replacement = js_ast.JoinWithComma(replacement, p.astHelpers.SimplifyUnusedExpr(p.simplifyUnusedExpr(arg), p.options.UnsupportedFeatures))
+			}
+			return replacement // Don't add "undefined" here because the result isn't used
+		}
+
+		// Inline non-mutated identity functions at print time
+		if (symbolFlags&(ast.IsIdentityFunction|ast.CouldPotentiallyBeMutated)) == ast.IsIdentityFunction && len(e.Args) == 1 {
+			arg := e.Args[0]
+			if _, ok := arg.Data.(*js_ast.ESpread); !ok {
+				return p.astHelpers.SimplifyUnusedExpr(p.simplifyUnusedExpr(arg), p.options.UnsupportedFeatures)
+			}
+		}
+	}
+
+	return expr
+}
+
+// This assumes the original expression was some form of indirect value, such
+// as a value returned from a function call or the result of a comma operator.
+// In this case, there is no special behavior with the "delete" operator or
+// with function calls. If we substitute this indirect value for another value
+// due to inlining, we have to make sure we don't accidentally introduce special
+// behavior.
+func (p *printer) guardAgainstBehaviorChangeDueToSubstitution(expr js_ast.Expr, flags printExprFlags) js_ast.Expr {
+	wrap := false
+
+	if (flags & isDeleteTarget) != 0 {
+		// "delete id(x)" must not become "delete x"
+		// "delete (empty(), x)" must not become "delete x"
+		if binary, ok := expr.Data.(*js_ast.EBinary); !ok || binary.Op != js_ast.BinOpComma {
+			wrap = true
+		}
+	} else if (flags & isCallTargetOrTemplateTag) != 0 {
+		// "id(x.y)()" must not become "x.y()"
+		// "id(x.y)``" must not become "x.y``"
+		// "(empty(), x.y)()" must not become "x.y()"
+		// "(empty(), eval)()" must not become "eval()"
+		switch expr.Data.(type) {
+		case *js_ast.EDot, *js_ast.EIndex:
+			wrap = true
+		case *js_ast.EIdentifier:
+			if p.isUnboundEvalIdentifier(expr) {
+				wrap = true
+			}
+		}
+	}
+
+	if wrap {
+		expr.Data = &js_ast.EBinary{
+			Op:    js_ast.BinOpComma,
+			Left:  js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: 0}},
+			Right: expr,
+		}
+	}
+
+	return expr
+}
+
+// Constant folding is already implemented once in the parser. A smaller form
+// of constant folding (just for numbers) is implemented here to clean up cross-
+// module numeric constants and bitwise operations. This is not an general-
+// purpose/optimal approach and never will be. For example, we can't affect
+// tree shaking at this stage because it has already happened.
+func (p *printer) lateConstantFoldUnaryOrBinaryOrIfExpr(expr js_ast.Expr) js_ast.Expr {
+	switch e := expr.Data.(type) {
+	case *js_ast.EImportIdentifier:
+		ref := ast.FollowSymbols(p.symbols, e.Ref)
+		if value := p.options.ConstValues[ref]; value.Kind != js_ast.ConstValueNone {
+			return js_ast.ConstValueToExpr(expr.Loc, value)
+		}
+
+	case *js_ast.EDot:
+		if value, ok := p.tryToGetImportedEnumValue(e.Target, e.Name); ok {
+			var inlinedValue js_ast.Expr
+			if value.String != nil {
+				inlinedValue = js_ast.Expr{Loc: expr.Loc, Data: &js_ast.EString{Value: value.String}}
+			} else {
+				inlinedValue = js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: value.Number}}
+			}
+
+			if strings.Contains(e.Name, "*/") {
+				// Don't wrap with a comment
+				return inlinedValue
+			}
+
+			// Wrap with a comment
+			return js_ast.Expr{Loc: inlinedValue.Loc, Data: &js_ast.EInlinedEnum{
+				Value:   inlinedValue,
+				Comment: e.Name,
+			}}
+		}
+
+	case *js_ast.EUnary:
+		value := p.lateConstantFoldUnaryOrBinaryOrIfExpr(e.Value)
+
+		// Only fold again if something chained
+		if value.Data != e.Value.Data {
+			// Only fold certain operations (just like the parser)
+			if v, ok := js_ast.ToNumberWithoutSideEffects(value.Data); ok {
+				switch e.Op {
+				case js_ast.UnOpPos:
+					return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: v}}
+
+				case js_ast.UnOpNeg:
+					return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: -v}}
+
+				case js_ast.UnOpCpl:
+					return js_ast.Expr{Loc: expr.Loc, Data: &js_ast.ENumber{Value: float64(^js_ast.ToInt32(v))}}
+				}
+			}
+
+			// Don't mutate the original AST
+			expr.Data = &js_ast.EUnary{Op: e.Op, Value: value}
+		}
+
+	case *js_ast.EBinary:
+		left := p.lateConstantFoldUnaryOrBinaryOrIfExpr(e.Left)
+		right := p.lateConstantFoldUnaryOrBinaryOrIfExpr(e.Right)
+
+		// Only fold again if something changed
+		if left.Data != e.Left.Data || right.Data != e.Right.Data {
+			binary := &js_ast.EBinary{Op: e.Op, Left: left, Right: right}
+
+			// Only fold certain operations (just like the parser)
+			if js_ast.ShouldFoldBinaryOperatorWhenMinifying(binary) {
+				if result := js_ast.FoldBinaryOperator(expr.Loc, binary); result.Data != nil {
+					return result
+				}
+			}
+
+			// Don't mutate the original AST
+			expr.Data = binary
+		}
+
+	case *js_ast.EIf:
+		test := p.lateConstantFoldUnaryOrBinaryOrIfExpr(e.Test)
+
+		// Only fold again if something changed
+		if test.Data != e.Test.Data {
+			if boolean, sideEffects, ok := js_ast.ToBooleanWithSideEffects(test.Data); ok && sideEffects == js_ast.NoSideEffects {
+				if boolean {
+					return p.lateConstantFoldUnaryOrBinaryOrIfExpr(e.Yes)
+				} else {
+					return p.lateConstantFoldUnaryOrBinaryOrIfExpr(e.No)
+				}
+			}
+
+			// Don't mutate the original AST
+			expr.Data = &js_ast.EIf{Test: test, Yes: e.Yes, No: e.No}
+		}
+	}
+
+	return expr
+}
+
+func (p *printer) isUnboundIdentifier(expr js_ast.Expr) bool {
+	id, ok := expr.Data.(*js_ast.EIdentifier)
+	return ok && p.symbols.Get(ast.FollowSymbols(p.symbols, id.Ref)).Kind == ast.SymbolUnbound
+}
+
+func (p *printer) isIdentifierOrNumericConstantOrPropertyAccess(expr js_ast.Expr) bool {
+	switch e := expr.Data.(type) {
+	case *js_ast.EIdentifier, *js_ast.EDot, *js_ast.EIndex:
+		return true
+	case *js_ast.ENumber:
+		return math.IsInf(e.Value, 1) || math.IsNaN(e.Value)
+	}
+	return false
+}
+
+type exprStartFlags uint8
+
+const (
+	stmtStartFlag exprStartFlags = 1 << iota
+	exportDefaultStartFlag
+	arrowExprStartFlag
+	forOfInitStartFlag
+)
+
+func (p *printer) saveExprStartFlags() (flags exprStartFlags) {
+	n := len(p.js)
+	if p.stmtStart == n {
+		flags |= stmtStartFlag
+	}
+	if p.exportDefaultStart == n {
+		flags |= exportDefaultStartFlag
+	}
+	if p.arrowExprStart == n {
+		flags |= arrowExprStartFlag
+	}
+	if p.forOfInitStart == n {
+		flags |= forOfInitStartFlag
+	}
+	return
+}
+
+func (p *printer) restoreExprStartFlags(flags exprStartFlags) {
+	if flags != 0 {
+		n := len(p.js)
+		if (flags & stmtStartFlag) != 0 {
+			p.stmtStart = n
+		}
+		if (flags & exportDefaultStartFlag) != 0 {
+			p.exportDefaultStart = n
+		}
+		if (flags & arrowExprStartFlag) != 0 {
+			p.arrowExprStart = n
+		}
+		if (flags & forOfInitStartFlag) != 0 {
+			p.forOfInitStart = n
+		}
+	}
+}
+
+// Print any stored comments that are associated with this location
+func (p *printer) printExprCommentsAtLoc(loc logger.Loc) {
+	if p.options.MinifyWhitespace {
+		return
+	}
+	if comments := p.exprComments[loc]; comments != nil && !p.printedExprComments[loc] {
+		flags := p.saveExprStartFlags()
+
+		// We must never generate a newline before certain expressions. For example,
+		// generating a newline before the expression in a "return" statement will
+		// cause a semicolon to be inserted, which would change the code's behavior.
+		if p.noLeadingNewlineHere == len(p.js) {
+			for _, comment := range comments {
+				if strings.HasPrefix(comment, "//") {
+					p.print("/*")
+					p.print(comment[2:])
+					if strings.HasPrefix(comment, "// ") {
+						p.print(" ")
+					}
+					p.print("*/")
+				} else {
+					p.print(strings.Join(strings.Split(comment, "\n"), ""))
+				}
+				p.printSpace()
+			}
+		} else {
+			for _, comment := range comments {
+				p.printIndentedComment(comment)
+				p.printIndent()
+			}
+		}
+
+		// Mark these comments as printed so we don't print them again
+		p.printedExprComments[loc] = true
+
+		p.restoreExprStartFlags(flags)
+	}
+}
+
+func (p *printer) printExprCommentsAfterCloseTokenAtLoc(loc logger.Loc) {
+	if comments := p.exprComments[loc]; comments != nil && !p.printedExprComments[loc] {
+		flags := p.saveExprStartFlags()
+
+		for _, comment := range comments {
+			p.printIndent()
+			p.printIndentedComment(comment)
+		}
+
+		// Mark these comments as printed so we don't print them again
+		p.printedExprComments[loc] = true
+
+		p.restoreExprStartFlags(flags)
+	}
+}
+
+func (p *printer) printExprWithoutLeadingNewline(expr js_ast.Expr, level js_ast.L, flags printExprFlags) {
+	if !p.options.MinifyWhitespace && p.willPrintExprCommentsAtLoc(expr.Loc) {
+		p.print("(")
+		p.printNewline()
+		p.options.Indent++
+		p.printIndent()
+		p.printExpr(expr, level, flags)
+		p.printNewline()
+		p.options.Indent--
+		p.printIndent()
+		p.print(")")
+		return
+	}
+
+	p.noLeadingNewlineHere = len(p.js)
+	p.printExpr(expr, level, flags)
+}
+
+type printExprFlags uint16
+
+const (
+	forbidCall printExprFlags = 1 << iota
+	forbidIn
+	hasNonOptionalChainParent
+	exprResultIsUnused
+	didAlreadySimplifyUnusedExprs
+	isFollowedByOf
+	isInsideForAwait
+	isDeleteTarget
+	isCallTargetOrTemplateTag
+	isPropertyAccessTarget
+	parentWasUnaryOrBinaryOrIfTest
+)
+
+func (p *printer) printExpr(expr js_ast.Expr, level js_ast.L, flags printExprFlags) {
+	// If syntax compression is enabled, do a pre-pass over unary and binary
+	// operators to inline bitwise operations of cross-module inlined constants.
+	// This makes the output a little tighter if people construct bit masks in
+	// other files. This is not a general-purpose constant folding pass. In
+	// particular, it has no effect on tree shaking because that pass has already
+	// been run.
+	//
+	// This sets a flag to avoid doing this when the parent is a unary or binary
+	// operator so that we don't trigger O(n^2) behavior when traversing over a
+	// large expression tree.
+	if p.options.MinifySyntax && (flags&parentWasUnaryOrBinaryOrIfTest) == 0 {
+		switch expr.Data.(type) {
+		case *js_ast.EUnary, *js_ast.EBinary, *js_ast.EIf:
+			expr = p.lateConstantFoldUnaryOrBinaryOrIfExpr(expr)
+		}
+	}
+
+	p.printExprCommentsAtLoc(expr.Loc)
+
+	switch e := expr.Data.(type) {
+	case *js_ast.EMissing:
+		p.addSourceMapping(expr.Loc)
+
+	case *js_ast.EAnnotation:
+		p.printExpr(e.Value, level, flags)
+
+	case *js_ast.EUndefined:
+		p.printUndefined(expr.Loc, level)
+
+	case *js_ast.ESuper:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("super")
+
+	case *js_ast.ENull:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("null")
+
+	case *js_ast.EThis:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("this")
+
+	case *js_ast.ESpread:
+		p.addSourceMapping(expr.Loc)
+		p.print("...")
+		p.printExpr(e.Value, js_ast.LComma, 0)
+
+	case *js_ast.ENewTarget:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("new.target")
+
+	case *js_ast.EImportMeta:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("import.meta")
+
+	case *js_ast.ENameOfSymbol:
+		name := p.mangledPropName(e.Ref)
+		p.addSourceMappingForName(expr.Loc, name, e.Ref)
+
+		if !p.options.MinifyWhitespace && e.HasPropertyKeyComment {
+			p.print("/* @__KEY__ */ ")
+		}
+
+		p.printQuotedUTF8(name, printQuotedAllowBacktick)
+
+	case *js_ast.EJSXElement:
+		// Start the opening tag
+		p.addSourceMapping(expr.Loc)
+		p.print("<")
+		p.printJSXTag(e.TagOrNil)
+		if !e.IsTagSingleLine {
+			p.options.Indent++
+		}
+
+		// Print the attributes
+		for _, property := range e.Properties {
+			if e.IsTagSingleLine {
+				p.printSpace()
+			} else {
+				p.printNewline()
+				p.printIndent()
+			}
+
+			if property.Kind == js_ast.PropertySpread {
+				if p.willPrintExprCommentsAtLoc(property.Loc) {
+					p.print("{")
+					p.printNewline()
+					p.options.Indent++
+					p.printIndent()
+					p.printExprCommentsAtLoc(property.Loc)
+					p.print("...")
+					p.printExpr(property.ValueOrNil, js_ast.LComma, 0)
+					p.printNewline()
+					p.options.Indent--
+					p.printIndent()
+					p.print("}")
+				} else {
+					p.print("{...")
+					p.printExpr(property.ValueOrNil, js_ast.LComma, 0)
+					p.print("}")
+				}
+				continue
+			}
+
+			p.printSpaceBeforeIdentifier()
+			if mangled, ok := property.Key.Data.(*js_ast.ENameOfSymbol); ok {
+				name := p.mangledPropName(mangled.Ref)
+				p.addSourceMappingForName(property.Key.Loc, name, mangled.Ref)
+				p.printIdentifier(name)
+			} else if str, ok := property.Key.Data.(*js_ast.EString); ok {
+				p.addSourceMapping(property.Key.Loc)
+				p.print(helpers.UTF16ToString(str.Value))
+			} else {
+				p.print("{...{")
+				p.printSpace()
+				p.print("[")
+				p.printExpr(property.Key, js_ast.LComma, 0)
+				p.print("]:")
+				p.printSpace()
+				p.printExpr(property.ValueOrNil, js_ast.LComma, 0)
+				p.printSpace()
+				p.print("}}")
+				continue
+			}
+
+			isMultiLine := p.willPrintExprCommentsAtLoc(property.ValueOrNil.Loc)
+
+			if property.Flags.Has(js_ast.PropertyWasShorthand) {
+				// Implicit "true" value
+				if boolean, ok := property.ValueOrNil.Data.(*js_ast.EBoolean); ok && boolean.Value {
+					continue
+				}
+
+				// JSX element as JSX attribute value
+				if _, ok := property.ValueOrNil.Data.(*js_ast.EJSXElement); ok {
+					p.print("=")
+					p.printExpr(property.ValueOrNil, js_ast.LLowest, 0)
+					continue
+				}
+			}
+
+			// Special-case raw text
+			if text, ok := property.ValueOrNil.Data.(*js_ast.EJSXText); ok {
+				p.print("=")
+				p.addSourceMapping(property.ValueOrNil.Loc)
+				p.print(text.Raw)
+				continue
+			}
+
+			// Generic JS value
+			p.print("={")
+			if isMultiLine {
+				p.printNewline()
+				p.options.Indent++
+				p.printIndent()
+			}
+			p.printExpr(property.ValueOrNil, js_ast.LComma, 0)
+			if isMultiLine {
+				p.printNewline()
+				p.options.Indent--
+				p.printIndent()
+			}
+			p.print("}")
+		}
+
+		// End the opening tag
+		if !e.IsTagSingleLine {
+			p.options.Indent--
+			if len(e.Properties) > 0 {
+				p.printNewline()
+				p.printIndent()
+			}
+		}
+		if e.TagOrNil.Data != nil && len(e.NullableChildren) == 0 {
+			if e.IsTagSingleLine || len(e.Properties) == 0 {
+				p.printSpace()
+			}
+			p.addSourceMapping(e.CloseLoc)
+			p.print("/>")
+			break
+		}
+		p.print(">")
+
+		// Print the children
+		for _, childOrNil := range e.NullableChildren {
+			if _, ok := childOrNil.Data.(*js_ast.EJSXElement); ok {
+				p.printExpr(childOrNil, js_ast.LLowest, 0)
+			} else if text, ok := childOrNil.Data.(*js_ast.EJSXText); ok {
+				p.addSourceMapping(childOrNil.Loc)
+				p.print(text.Raw)
+			} else if childOrNil.Data != nil {
+				isMultiLine := p.willPrintExprCommentsAtLoc(childOrNil.Loc)
+				p.print("{")
+				if isMultiLine {
+					p.printNewline()
+					p.options.Indent++
+					p.printIndent()
+				}
+				p.printExpr(childOrNil, js_ast.LComma, 0)
+				if isMultiLine {
+					p.printNewline()
+					p.options.Indent--
+					p.printIndent()
+				}
+				p.print("}")
+			} else {
+				p.print("{")
+				if p.willPrintExprCommentsAtLoc(childOrNil.Loc) {
+					// Note: Some people use these comments for AST transformations
+					p.printNewline()
+					p.options.Indent++
+					p.printExprCommentsAfterCloseTokenAtLoc(childOrNil.Loc)
+					p.options.Indent--
+					p.printIndent()
+				}
+				p.print("}")
+			}
+		}
+
+		// Print the closing tag
+		p.addSourceMapping(e.CloseLoc)
+		p.print("</")
+		p.printJSXTag(e.TagOrNil)
+		p.print(">")
+
+	case *js_ast.ENew:
+		wrap := level >= js_ast.LCall
+
+		hasPureComment := !p.options.MinifyWhitespace && e.CanBeUnwrappedIfUnused
+		if hasPureComment && level >= js_ast.LPostfix {
+			wrap = true
+		}
+
+		if wrap {
+			p.print("(")
+		}
+
+		if hasPureComment {
+			p.addSourceMapping(expr.Loc)
+			p.print("/* @__PURE__ */ ")
+		}
+
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("new")
+		p.printSpace()
+		p.printExpr(e.Target, js_ast.LNew, forbidCall)
+
+		// Omit the "()" when minifying, but only when safe to do so
+		isMultiLine := !p.options.MinifyWhitespace && ((e.IsMultiLine && len(e.Args) > 0) ||
+			p.willPrintExprCommentsForAnyOf(e.Args) ||
+			p.willPrintExprCommentsAtLoc(e.CloseParenLoc))
+		if !p.options.MinifyWhitespace || len(e.Args) > 0 || level >= js_ast.LPostfix || isMultiLine {
+			needsNewline := true
+			p.print("(")
+			if isMultiLine {
+				p.options.Indent++
+			}
+			for i, arg := range e.Args {
+				if i != 0 {
+					p.print(",")
+				}
+				if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+					if isMultiLine {
+						if needsNewline {
+							p.printNewline()
+						}
+						p.printIndent()
+					} else if i != 0 {
+						p.printSpace()
+					}
+				}
+				p.printExpr(arg, js_ast.LComma, 0)
+				needsNewline = true
+			}
+			if isMultiLine {
+				if needsNewline || p.willPrintExprCommentsAtLoc(e.CloseParenLoc) {
+					p.printNewline()
+				}
+				p.printExprCommentsAfterCloseTokenAtLoc(e.CloseParenLoc)
+				p.options.Indent--
+				p.printIndent()
+			}
+			if e.CloseParenLoc.Start > expr.Loc.Start {
+				p.addSourceMapping(e.CloseParenLoc)
+			}
+			p.print(")")
+		}
+
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.ECall:
+		if p.options.MinifySyntax {
+			var symbolFlags ast.SymbolFlags
+			switch target := e.Target.Data.(type) {
+			case *js_ast.EIdentifier:
+				symbolFlags = p.symbols.Get(target.Ref).Flags
+			case *js_ast.EImportIdentifier:
+				ref := ast.FollowSymbols(p.symbols, target.Ref)
+				symbolFlags = p.symbols.Get(ref).Flags
+			}
+
+			// Replace non-mutated empty functions with their arguments at print time
+			if (symbolFlags & (ast.IsEmptyFunction | ast.CouldPotentiallyBeMutated)) == ast.IsEmptyFunction {
+				var replacement js_ast.Expr
+				for _, arg := range e.Args {
+					if _, ok := arg.Data.(*js_ast.ESpread); ok {
+						arg.Data = &js_ast.EArray{Items: []js_ast.Expr{arg}, IsSingleLine: true}
+					}
+					replacement = js_ast.JoinWithComma(replacement, p.astHelpers.SimplifyUnusedExpr(arg, p.options.UnsupportedFeatures))
+				}
+				if replacement.Data == nil || (flags&exprResultIsUnused) == 0 {
+					replacement = js_ast.JoinWithComma(replacement, js_ast.Expr{Loc: expr.Loc, Data: js_ast.EUndefinedShared})
+				}
+				p.printExpr(p.guardAgainstBehaviorChangeDueToSubstitution(replacement, flags), level, flags)
+				break
+			}
+
+			// Inline non-mutated identity functions at print time
+			if (symbolFlags&(ast.IsIdentityFunction|ast.CouldPotentiallyBeMutated)) == ast.IsIdentityFunction && len(e.Args) == 1 {
+				arg := e.Args[0]
+				if _, ok := arg.Data.(*js_ast.ESpread); !ok {
+					if (flags & exprResultIsUnused) != 0 {
+						arg = p.astHelpers.SimplifyUnusedExpr(arg, p.options.UnsupportedFeatures)
+					}
+					p.printExpr(p.guardAgainstBehaviorChangeDueToSubstitution(arg, flags), level, flags)
+					break
+				}
+			}
+
+			// Inline IIFEs that return expressions at print time
+			if len(e.Args) == 0 {
+				// Note: Do not inline async arrow functions as they are not IIFEs. In
+				// particular, they are not necessarily invoked immediately, and any
+				// exceptions involved in their evaluation will be swallowed without
+				// bubbling up to the surrounding context.
+				if arrow, ok := e.Target.Data.(*js_ast.EArrow); ok && len(arrow.Args) == 0 && !arrow.IsAsync {
+					stmts := arrow.Body.Block.Stmts
+
+					// "(() => {})()" => "void 0"
+					if len(stmts) == 0 {
+						value := js_ast.Expr{Loc: expr.Loc, Data: js_ast.EUndefinedShared}
+						p.printExpr(p.guardAgainstBehaviorChangeDueToSubstitution(value, flags), level, flags)
+						break
+					}
+
+					// "(() => 123)()" => "123"
+					if len(stmts) == 1 {
+						if stmt, ok := stmts[0].Data.(*js_ast.SReturn); ok {
+							value := stmt.ValueOrNil
+							if value.Data == nil {
+								value.Data = js_ast.EUndefinedShared
+							}
+							p.printExpr(p.guardAgainstBehaviorChangeDueToSubstitution(value, flags), level, flags)
+							break
+						}
+					}
+				}
+			}
+		}
+
+		wrap := level >= js_ast.LNew || (flags&forbidCall) != 0
+		var targetFlags printExprFlags
+		if e.OptionalChain == js_ast.OptionalChainNone {
+			targetFlags = hasNonOptionalChainParent
+		} else if (flags & hasNonOptionalChainParent) != 0 {
+			wrap = true
+		}
+
+		hasPureComment := !p.options.MinifyWhitespace && e.CanBeUnwrappedIfUnused
+		if hasPureComment && level >= js_ast.LPostfix {
+			wrap = true
+		}
+
+		if wrap {
+			p.print("(")
+		}
+
+		if hasPureComment {
+			flags := p.saveExprStartFlags()
+			p.addSourceMapping(expr.Loc)
+			p.print("/* @__PURE__ */ ")
+			p.restoreExprStartFlags(flags)
+		}
+
+		// We don't ever want to accidentally generate a direct eval expression here
+		p.callTarget = e.Target.Data
+		if (e.Kind != js_ast.DirectEval && p.isUnboundEvalIdentifier(e.Target) && e.OptionalChain == js_ast.OptionalChainNone) ||
+			(e.Kind != js_ast.TargetWasOriginallyPropertyAccess && js_ast.IsPropertyAccess(e.Target)) {
+			p.print("(0,")
+			p.printSpace()
+			p.printExpr(e.Target, js_ast.LPostfix, isCallTargetOrTemplateTag)
+			p.print(")")
+		} else {
+			p.printExpr(e.Target, js_ast.LPostfix, isCallTargetOrTemplateTag|targetFlags)
+		}
+
+		if e.OptionalChain == js_ast.OptionalChainStart {
+			p.print("?.")
+		}
+
+		isMultiLine := !p.options.MinifyWhitespace && ((e.IsMultiLine && len(e.Args) > 0) ||
+			p.willPrintExprCommentsForAnyOf(e.Args) ||
+			p.willPrintExprCommentsAtLoc(e.CloseParenLoc))
+		p.print("(")
+		if isMultiLine {
+			p.options.Indent++
+		}
+		for i, arg := range e.Args {
+			if i != 0 {
+				p.print(",")
+			}
+			if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+				if isMultiLine {
+					p.printNewline()
+					p.printIndent()
+				} else if i != 0 {
+					p.printSpace()
+				}
+			}
+			p.printExpr(arg, js_ast.LComma, 0)
+		}
+		if isMultiLine {
+			p.printNewline()
+			p.printExprCommentsAfterCloseTokenAtLoc(e.CloseParenLoc)
+			p.options.Indent--
+			p.printIndent()
+		}
+		if e.CloseParenLoc.Start > expr.Loc.Start {
+			p.addSourceMapping(e.CloseParenLoc)
+		}
+		p.print(")")
+
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.ERequireString:
+		p.addSourceMapping(expr.Loc)
+		p.printRequireOrImportExpr(e.ImportRecordIndex, level, flags, e.CloseParenLoc)
+
+	case *js_ast.ERequireResolveString:
+		recordLoc := p.importRecords[e.ImportRecordIndex].Range.Loc
+		isMultiLine := p.willPrintExprCommentsAtLoc(recordLoc) || p.willPrintExprCommentsAtLoc(e.CloseParenLoc)
+		wrap := level >= js_ast.LNew || (flags&forbidCall) != 0
+		if wrap {
+			p.print("(")
+		}
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("require.resolve(")
+		if isMultiLine {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+			p.printExprCommentsAtLoc(recordLoc)
+		}
+		p.printPath(e.ImportRecordIndex, ast.ImportRequireResolve)
+		if isMultiLine {
+			p.printNewline()
+			p.printExprCommentsAfterCloseTokenAtLoc(e.CloseParenLoc)
+			p.options.Indent--
+			p.printIndent()
+		}
+		if e.CloseParenLoc.Start > expr.Loc.Start {
+			p.addSourceMapping(e.CloseParenLoc)
+		}
+		p.print(")")
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EImportString:
+		p.addSourceMapping(expr.Loc)
+		p.printRequireOrImportExpr(e.ImportRecordIndex, level, flags, e.CloseParenLoc)
+
+	case *js_ast.EImportCall:
+		// Only print the second argument if either import assertions or import attributes are supported
+		printImportAssertOrWith := e.OptionsOrNil.Data != nil && (!p.options.UnsupportedFeatures.Has(compat.ImportAssertions) || !p.options.UnsupportedFeatures.Has(compat.ImportAttributes))
+		isMultiLine := !p.options.MinifyWhitespace &&
+			(p.willPrintExprCommentsAtLoc(e.Expr.Loc) ||
+				(printImportAssertOrWith && p.willPrintExprCommentsAtLoc(e.OptionsOrNil.Loc)) ||
+				p.willPrintExprCommentsAtLoc(e.CloseParenLoc))
+		wrap := level >= js_ast.LNew || (flags&forbidCall) != 0
+		if wrap {
+			p.print("(")
+		}
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("import(")
+		if isMultiLine {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+		}
+		p.printExpr(e.Expr, js_ast.LComma, 0)
+
+		if printImportAssertOrWith {
+			p.print(",")
+			if isMultiLine {
+				p.printNewline()
+				p.printIndent()
+			} else {
+				p.printSpace()
+			}
+			p.printExpr(e.OptionsOrNil, js_ast.LComma, 0)
+		}
+
+		if isMultiLine {
+			p.printNewline()
+			p.printExprCommentsAfterCloseTokenAtLoc(e.CloseParenLoc)
+			p.options.Indent--
+			p.printIndent()
+		}
+		p.print(")")
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EDot:
+		wrap := false
+		if e.OptionalChain == js_ast.OptionalChainNone {
+			flags |= hasNonOptionalChainParent
+
+			// Inline cross-module TypeScript enum references here
+			if value, ok := p.tryToGetImportedEnumValue(e.Target, e.Name); ok {
+				if value.String != nil {
+					p.printQuotedUTF16(value.String, printQuotedAllowBacktick)
+				} else {
+					p.printNumber(value.Number, level)
+				}
+				if !p.options.MinifyWhitespace && !p.options.MinifyIdentifiers && !strings.Contains(e.Name, "*/") {
+					p.print(" /* ")
+					p.print(e.Name)
+					p.print(" */")
+				}
+				break
+			}
+		} else {
+			if (flags & hasNonOptionalChainParent) != 0 {
+				wrap = true
+				p.print("(")
+			}
+			flags &= ^hasNonOptionalChainParent
+		}
+		p.printExpr(e.Target, js_ast.LPostfix, (flags&(forbidCall|hasNonOptionalChainParent))|isPropertyAccessTarget)
+		if p.canPrintIdentifier(e.Name) {
+			if e.OptionalChain != js_ast.OptionalChainStart && p.needSpaceBeforeDot == len(p.js) {
+				// "1.toString" is a syntax error, so print "1 .toString" instead
+				p.print(" ")
+			}
+			if e.OptionalChain == js_ast.OptionalChainStart {
+				p.print("?.")
+			} else {
+				p.print(".")
+			}
+			if p.options.LineLimit > 0 {
+				p.printNewlinePastLineLimit()
+			}
+			p.addSourceMapping(e.NameLoc)
+			p.printIdentifier(e.Name)
+		} else {
+			if e.OptionalChain == js_ast.OptionalChainStart {
+				p.print("?.")
+			}
+			p.print("[")
+			p.addSourceMapping(e.NameLoc)
+			p.printQuotedUTF8(e.Name, printQuotedAllowBacktick)
+			p.print("]")
+		}
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EIndex:
+		if e.OptionalChain == js_ast.OptionalChainNone {
+			flags |= hasNonOptionalChainParent
+
+			// Inline cross-module TypeScript enum references here
+			if index, ok := e.Index.Data.(*js_ast.EString); ok {
+				if value, name, ok := p.tryToGetImportedEnumValueUTF16(e.Target, index.Value); ok {
+					if value.String != nil {
+						p.printQuotedUTF16(value.String, printQuotedAllowBacktick)
+					} else {
+						p.printNumber(value.Number, level)
+					}
+					if !p.options.MinifyWhitespace && !p.options.MinifyIdentifiers && !strings.Contains(name, "*/") {
+						p.print(" /* ")
+						p.print(name)
+						p.print(" */")
+					}
+					break
+				}
+			}
+		} else {
+			if (flags & hasNonOptionalChainParent) != 0 {
+				p.print("(")
+				defer p.print(")")
+			}
+			flags &= ^hasNonOptionalChainParent
+		}
+		p.printExpr(e.Target, js_ast.LPostfix, (flags&(forbidCall|hasNonOptionalChainParent))|isPropertyAccessTarget)
+		if e.OptionalChain == js_ast.OptionalChainStart {
+			p.print("?.")
+		}
+
+		switch index := e.Index.Data.(type) {
+		case *js_ast.EPrivateIdentifier:
+			if e.OptionalChain != js_ast.OptionalChainStart {
+				p.print(".")
+			}
+			name := p.renamer.NameForSymbol(index.Ref)
+			p.addSourceMappingForName(e.Index.Loc, name, index.Ref)
+			p.printIdentifier(name)
+			return
+
+		case *js_ast.ENameOfSymbol:
+			if name := p.mangledPropName(index.Ref); p.canPrintIdentifier(name) {
+				if e.OptionalChain != js_ast.OptionalChainStart {
+					p.print(".")
+				}
+				p.addSourceMappingForName(e.Index.Loc, name, index.Ref)
+				p.printIdentifier(name)
+				return
+			}
+
+		case *js_ast.EInlinedEnum:
+			if p.options.MinifySyntax {
+				if str, ok := index.Value.Data.(*js_ast.EString); ok && p.canPrintIdentifierUTF16(str.Value) {
+					if e.OptionalChain != js_ast.OptionalChainStart {
+						p.print(".")
+					}
+					p.addSourceMapping(index.Value.Loc)
+					p.printIdentifierUTF16(str.Value)
+					return
+				}
+			}
+
+		case *js_ast.EDot:
+			if p.options.MinifySyntax {
+				if value, ok := p.tryToGetImportedEnumValue(index.Target, index.Name); ok && value.String != nil && p.canPrintIdentifierUTF16(value.String) {
+					if e.OptionalChain != js_ast.OptionalChainStart {
+						p.print(".")
+					}
+					p.addSourceMapping(e.Index.Loc)
+					p.printIdentifierUTF16(value.String)
+					return
+				}
+			}
+		}
+
+		isMultiLine := p.willPrintExprCommentsAtLoc(e.Index.Loc) || p.willPrintExprCommentsAtLoc(e.CloseBracketLoc)
+		p.print("[")
+		if isMultiLine {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+		}
+		p.printExpr(e.Index, js_ast.LLowest, 0)
+		if isMultiLine {
+			p.printNewline()
+			p.printExprCommentsAfterCloseTokenAtLoc(e.CloseBracketLoc)
+			p.options.Indent--
+			p.printIndent()
+		}
+		if e.CloseBracketLoc.Start > expr.Loc.Start {
+			p.addSourceMapping(e.CloseBracketLoc)
+		}
+		p.print("]")
+
+	case *js_ast.EIf:
+		wrap := level >= js_ast.LConditional
+		if wrap {
+			p.print("(")
+			flags &= ^forbidIn
+		}
+		p.printExpr(e.Test, js_ast.LConditional, (flags&forbidIn)|parentWasUnaryOrBinaryOrIfTest)
+		p.printSpace()
+		p.print("?")
+		if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+			p.printSpace()
+		}
+		p.printExprWithoutLeadingNewline(e.Yes, js_ast.LYield, 0)
+		p.printSpace()
+		p.print(":")
+		if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+			p.printSpace()
+		}
+		p.printExprWithoutLeadingNewline(e.No, js_ast.LYield, flags&forbidIn)
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EArrow:
+		wrap := level >= js_ast.LAssign
+
+		if wrap {
+			p.print("(")
+		}
+		if !p.options.MinifyWhitespace && e.HasNoSideEffectsComment {
+			p.print("/* @__NO_SIDE_EFFECTS__ */ ")
+		}
+		if e.IsAsync {
+			p.addSourceMapping(expr.Loc)
+			p.printSpaceBeforeIdentifier()
+			p.print("async")
+			p.printSpace()
+		}
+
+		p.printFnArgs(e.Args, fnArgsOpts{
+			openParenLoc:              expr.Loc,
+			addMappingForOpenParenLoc: !e.IsAsync,
+			hasRestArg:                e.HasRestArg,
+			isArrow:                   true,
+		})
+		p.printSpace()
+		p.print("=>")
+		p.printSpace()
+
+		wasPrinted := false
+		if len(e.Body.Block.Stmts) == 1 && e.PreferExpr {
+			if s, ok := e.Body.Block.Stmts[0].Data.(*js_ast.SReturn); ok && s.ValueOrNil.Data != nil {
+				p.arrowExprStart = len(p.js)
+				p.printExprWithoutLeadingNewline(s.ValueOrNil, js_ast.LComma, flags&forbidIn)
+				wasPrinted = true
+			}
+		}
+		if !wasPrinted {
+			p.printBlock(e.Body.Loc, e.Body.Block)
+		}
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EFunction:
+		n := len(p.js)
+		wrap := p.stmtStart == n || p.exportDefaultStart == n ||
+			((flags&isPropertyAccessTarget) != 0 && p.options.UnsupportedFeatures.Has(compat.FunctionOrClassPropertyAccess))
+		if wrap {
+			p.print("(")
+		}
+		if !p.options.MinifyWhitespace && e.Fn.HasNoSideEffectsComment {
+			p.print("/* @__NO_SIDE_EFFECTS__ */ ")
+		}
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		if e.Fn.IsAsync {
+			p.print("async ")
+		}
+		p.print("function")
+		if e.Fn.IsGenerator {
+			p.print("*")
+			p.printSpace()
+		}
+		if e.Fn.Name != nil {
+			p.printSpaceBeforeIdentifier()
+			name := p.renamer.NameForSymbol(e.Fn.Name.Ref)
+			p.addSourceMappingForName(e.Fn.Name.Loc, name, e.Fn.Name.Ref)
+			p.printIdentifier(name)
+		}
+		p.printFn(e.Fn)
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EClass:
+		n := len(p.js)
+		wrap := p.stmtStart == n || p.exportDefaultStart == n ||
+			((flags&isPropertyAccessTarget) != 0 && p.options.UnsupportedFeatures.Has(compat.FunctionOrClassPropertyAccess))
+		if wrap {
+			p.print("(")
+		}
+		p.printDecorators(e.Class.Decorators, printSpaceAfterDecorator)
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("class")
+		if e.Class.Name != nil {
+			p.print(" ")
+			name := p.renamer.NameForSymbol(e.Class.Name.Ref)
+			p.addSourceMappingForName(e.Class.Name.Loc, name, e.Class.Name.Ref)
+			p.printIdentifier(name)
+		}
+		p.printClass(e.Class)
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EArray:
+		isMultiLine := (len(e.Items) > 0 && !e.IsSingleLine) || p.willPrintExprCommentsForAnyOf(e.Items) || p.willPrintExprCommentsAtLoc(e.CloseBracketLoc)
+		p.addSourceMapping(expr.Loc)
+		p.print("[")
+		if len(e.Items) > 0 || isMultiLine {
+			if isMultiLine {
+				p.options.Indent++
+			}
+
+			for i, item := range e.Items {
+				if i != 0 {
+					p.print(",")
+				}
+				if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+					if isMultiLine {
+						p.printNewline()
+						p.printIndent()
+					} else if i != 0 {
+						p.printSpace()
+					}
+				}
+				p.printExpr(item, js_ast.LComma, 0)
+
+				// Make sure there's a comma after trailing missing items
+				_, ok := item.Data.(*js_ast.EMissing)
+				if ok && i == len(e.Items)-1 {
+					p.print(",")
+				}
+			}
+
+			if isMultiLine {
+				p.printNewline()
+				p.printExprCommentsAfterCloseTokenAtLoc(e.CloseBracketLoc)
+				p.options.Indent--
+				p.printIndent()
+			}
+		}
+		if e.CloseBracketLoc.Start > expr.Loc.Start {
+			p.addSourceMapping(e.CloseBracketLoc)
+		}
+		p.print("]")
+
+	case *js_ast.EObject:
+		isMultiLine := (len(e.Properties) > 0 && !e.IsSingleLine) || p.willPrintExprCommentsAtLoc(e.CloseBraceLoc)
+		if !p.options.MinifyWhitespace && !isMultiLine {
+			for _, property := range e.Properties {
+				if p.willPrintExprCommentsAtLoc(property.Loc) {
+					isMultiLine = true
+					break
+				}
+			}
+		}
+		n := len(p.js)
+		wrap := p.stmtStart == n || p.arrowExprStart == n
+		if wrap {
+			p.print("(")
+		}
+		p.addSourceMapping(expr.Loc)
+		p.print("{")
+		if len(e.Properties) > 0 || isMultiLine {
+			if isMultiLine {
+				p.options.Indent++
+			}
+
+			for i, item := range e.Properties {
+				if i != 0 {
+					p.print(",")
+				}
+				if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+					if isMultiLine {
+						p.printNewline()
+						p.printIndent()
+					} else {
+						p.printSpace()
+					}
+				}
+				p.printProperty(item)
+			}
+
+			if isMultiLine {
+				p.printNewline()
+				p.printExprCommentsAfterCloseTokenAtLoc(e.CloseBraceLoc)
+				p.options.Indent--
+				p.printIndent()
+			} else if len(e.Properties) > 0 {
+				p.printSpace()
+			}
+		}
+		if e.CloseBraceLoc.Start > expr.Loc.Start {
+			p.addSourceMapping(e.CloseBraceLoc)
+		}
+		p.print("}")
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EBoolean:
+		p.addSourceMapping(expr.Loc)
+		if p.options.MinifySyntax {
+			if level >= js_ast.LPrefix {
+				if e.Value {
+					p.print("(!0)")
+				} else {
+					p.print("(!1)")
+				}
+			} else {
+				if e.Value {
+					p.print("!0")
+				} else {
+					p.print("!1")
+				}
+			}
+		} else {
+			p.printSpaceBeforeIdentifier()
+			if e.Value {
+				p.print("true")
+			} else {
+				p.print("false")
+			}
+		}
+
+	case *js_ast.EString:
+		var flags printQuotedFlags
+		if e.ContainsUniqueKey {
+			flags = printQuotedNoWrap
+		}
+		p.addSourceMapping(expr.Loc)
+
+		if !p.options.MinifyWhitespace && e.HasPropertyKeyComment {
+			p.print("/* @__KEY__ */ ")
+		}
+
+		// If this was originally a template literal, print it as one as long as we're not minifying
+		if e.PreferTemplate && !p.options.MinifySyntax && !p.options.UnsupportedFeatures.Has(compat.TemplateLiteral) {
+			p.print("`")
+			p.printUnquotedUTF16(e.Value, '`', flags)
+			p.print("`")
+			return
+		}
+
+		p.printQuotedUTF16(e.Value, flags|printQuotedAllowBacktick)
+
+	case *js_ast.ETemplate:
+		if e.TagOrNil.Data == nil && (p.options.MinifySyntax || p.wasLazyExport) {
+			// Inline enums and mangled properties when minifying
+			var replaced []js_ast.TemplatePart
+			for i, part := range e.Parts {
+				var inlinedValue js_ast.E
+				switch e2 := part.Value.Data.(type) {
+				case *js_ast.ENameOfSymbol:
+					inlinedValue = &js_ast.EString{Value: helpers.StringToUTF16(p.mangledPropName(e2.Ref))}
+				case *js_ast.EDot:
+					if value, ok := p.tryToGetImportedEnumValue(e2.Target, e2.Name); ok {
+						if value.String != nil {
+							inlinedValue = &js_ast.EString{Value: value.String}
+						} else {
+							inlinedValue = &js_ast.ENumber{Value: value.Number}
+						}
+					}
+				}
+				if inlinedValue != nil {
+					if replaced == nil {
+						replaced = make([]js_ast.TemplatePart, 0, len(e.Parts))
+						replaced = append(replaced, e.Parts[:i]...)
+					}
+					part.Value.Data = inlinedValue
+					replaced = append(replaced, part)
+				} else if replaced != nil {
+					replaced = append(replaced, part)
+				}
+			}
+			if replaced != nil {
+				copy := *e
+				copy.Parts = replaced
+				switch e2 := js_ast.InlinePrimitivesIntoTemplate(logger.Loc{}, &copy).Data.(type) {
+				case *js_ast.EString:
+					p.printQuotedUTF16(e2.Value, printQuotedAllowBacktick)
+					return
+				case *js_ast.ETemplate:
+					e = e2
+				}
+			}
+
+			// Convert no-substitution template literals into strings if it's smaller
+			if len(e.Parts) == 0 {
+				p.addSourceMapping(expr.Loc)
+				p.printQuotedUTF16(e.HeadCooked, printQuotedAllowBacktick)
+				return
+			}
+		}
+
+		if e.TagOrNil.Data != nil {
+			tagIsPropertyAccess := false
+			switch e.TagOrNil.Data.(type) {
+			case *js_ast.EDot, *js_ast.EIndex:
+				tagIsPropertyAccess = true
+			}
+			if !e.TagWasOriginallyPropertyAccess && tagIsPropertyAccess {
+				// Prevent "x``" from becoming "y.z``"
+				p.print("(0,")
+				p.printSpace()
+				p.printExpr(e.TagOrNil, js_ast.LLowest, isCallTargetOrTemplateTag)
+				p.print(")")
+			} else if js_ast.IsOptionalChain(e.TagOrNil) {
+				// Optional chains are forbidden in template tags
+				p.print("(")
+				p.printExpr(e.TagOrNil, js_ast.LLowest, isCallTargetOrTemplateTag)
+				p.print(")")
+			} else {
+				p.printExpr(e.TagOrNil, js_ast.LPostfix, isCallTargetOrTemplateTag)
+			}
+		} else {
+			p.addSourceMapping(expr.Loc)
+		}
+		p.print("`")
+		if e.TagOrNil.Data != nil {
+			p.print(e.HeadRaw)
+		} else {
+			p.printUnquotedUTF16(e.HeadCooked, '`', 0)
+		}
+		for _, part := range e.Parts {
+			p.print("${")
+			p.printExpr(part.Value, js_ast.LLowest, 0)
+			p.addSourceMapping(part.TailLoc)
+			p.print("}")
+			if e.TagOrNil.Data != nil {
+				p.print(part.TailRaw)
+			} else {
+				p.printUnquotedUTF16(part.TailCooked, '`', 0)
+			}
+		}
+		p.print("`")
+
+	case *js_ast.ERegExp:
+		buffer := p.js
+		n := len(buffer)
+
+		// Avoid forming a single-line comment or "</script" sequence
+		if !p.options.UnsupportedFeatures.Has(compat.InlineScript) && n > 0 {
+			if last := buffer[n-1]; last == '/' || (last == '<' && len(e.Value) >= 7 && strings.EqualFold(e.Value[:7], "/script")) {
+				p.print(" ")
+			}
+		}
+
+		p.addSourceMapping(expr.Loc)
+		p.print(e.Value)
+
+		// Need a space before the next identifier to avoid it turning into flags
+		p.prevRegExpEnd = len(p.js)
+
+	case *js_ast.EInlinedEnum:
+		p.printExpr(e.Value, level, flags)
+
+		if !p.options.MinifyWhitespace && !p.options.MinifyIdentifiers {
+			p.print(" /* ")
+			p.print(e.Comment)
+			p.print(" */")
+		}
+
+	case *js_ast.EBigInt:
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print(e.Value)
+		p.print("n")
+
+	case *js_ast.ENumber:
+		p.addSourceMapping(expr.Loc)
+		p.printNumber(e.Value, level)
+
+	case *js_ast.EIdentifier:
+		name := p.renamer.NameForSymbol(e.Ref)
+		wrap := len(p.js) == p.forOfInitStart && (name == "let" ||
+			((flags&isFollowedByOf) != 0 && (flags&isInsideForAwait) == 0 && name == "async"))
+
+		if wrap {
+			p.print("(")
+		}
+
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMappingForName(expr.Loc, name, e.Ref)
+		p.printIdentifier(name)
+
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EImportIdentifier:
+		// Potentially use a property access instead of an identifier
+		ref := ast.FollowSymbols(p.symbols, e.Ref)
+		symbol := p.symbols.Get(ref)
+
+		if symbol.ImportItemStatus == ast.ImportItemMissing {
+			p.printUndefined(expr.Loc, level)
+		} else if symbol.NamespaceAlias != nil {
+			wrap := p.callTarget == e && e.WasOriginallyIdentifier
+			if wrap {
+				p.print("(0,")
+				p.printSpace()
+			}
+			p.printSpaceBeforeIdentifier()
+			p.addSourceMapping(expr.Loc)
+			p.printIdentifier(p.renamer.NameForSymbol(symbol.NamespaceAlias.NamespaceRef))
+			alias := symbol.NamespaceAlias.Alias
+			if !e.PreferQuotedKey && p.canPrintIdentifier(alias) {
+				p.print(".")
+				p.addSourceMappingForName(expr.Loc, alias, ref)
+				p.printIdentifier(alias)
+			} else {
+				p.print("[")
+				p.addSourceMappingForName(expr.Loc, alias, ref)
+				p.printQuotedUTF8(alias, printQuotedAllowBacktick)
+				p.print("]")
+			}
+			if wrap {
+				p.print(")")
+			}
+		} else if value := p.options.ConstValues[ref]; value.Kind != js_ast.ConstValueNone {
+			// Handle inlined constants
+			p.printExpr(js_ast.ConstValueToExpr(expr.Loc, value), level, flags)
+		} else {
+			p.printSpaceBeforeIdentifier()
+			name := p.renamer.NameForSymbol(ref)
+			p.addSourceMappingForName(expr.Loc, name, ref)
+			p.printIdentifier(name)
+		}
+
+	case *js_ast.EAwait:
+		wrap := level >= js_ast.LPrefix
+
+		if wrap {
+			p.print("(")
+		}
+
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("await")
+		p.printSpace()
+		p.printExpr(e.Value, js_ast.LPrefix-1, 0)
+
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EYield:
+		wrap := level >= js_ast.LAssign
+
+		if wrap {
+			p.print("(")
+		}
+
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(expr.Loc)
+		p.print("yield")
+
+		if e.ValueOrNil.Data != nil {
+			if e.IsStar {
+				p.print("*")
+			}
+			p.printSpace()
+			p.printExprWithoutLeadingNewline(e.ValueOrNil, js_ast.LYield, 0)
+		}
+
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EUnary:
+		entry := js_ast.OpTable[e.Op]
+		wrap := level >= entry.Level
+
+		if wrap {
+			p.print("(")
+		}
+
+		if !e.Op.IsPrefix() {
+			p.printExpr(e.Value, js_ast.LPostfix-1, parentWasUnaryOrBinaryOrIfTest)
+		}
+
+		if entry.IsKeyword {
+			p.printSpaceBeforeIdentifier()
+			if e.Op.IsPrefix() {
+				p.addSourceMapping(expr.Loc)
+			}
+			p.print(entry.Text)
+			p.printSpace()
+		} else {
+			p.printSpaceBeforeOperator(e.Op)
+			if e.Op.IsPrefix() {
+				p.addSourceMapping(expr.Loc)
+			}
+			p.print(entry.Text)
+			p.prevOp = e.Op
+			p.prevOpEnd = len(p.js)
+		}
+
+		if e.Op.IsPrefix() {
+			valueFlags := parentWasUnaryOrBinaryOrIfTest
+			if e.Op == js_ast.UnOpDelete {
+				valueFlags |= isDeleteTarget
+			}
+
+			// Never turn "typeof (0, x)" into "typeof x" or "delete (0, x)" into "delete x"
+			if (e.Op == js_ast.UnOpTypeof && !e.WasOriginallyTypeofIdentifier && p.isUnboundIdentifier(e.Value)) ||
+				(e.Op == js_ast.UnOpDelete && !e.WasOriginallyDeleteOfIdentifierOrPropertyAccess && p.isIdentifierOrNumericConstantOrPropertyAccess(e.Value)) {
+				p.print("(0,")
+				p.printSpace()
+				p.printExpr(e.Value, js_ast.LPrefix-1, valueFlags)
+				p.print(")")
+			} else {
+				p.printExpr(e.Value, js_ast.LPrefix-1, valueFlags)
+			}
+		}
+
+		if wrap {
+			p.print(")")
+		}
+
+	case *js_ast.EBinary:
+		// The handling of binary expressions is convoluted because we're using
+		// iteration on the heap instead of recursion on the call stack to avoid
+		// stack overflow for deeply-nested ASTs. See the comments for the similar
+		// code in the JavaScript parser for details.
+		v := binaryExprVisitor{
+			e:     e,
+			level: level,
+			flags: flags,
+		}
+
+		// Use a single stack to reduce allocation overhead
+		stackBottom := len(p.binaryExprStack)
+
+		for {
+			// Check whether this node is a special case, and stop if it is
+			if !v.checkAndPrepare(p) {
+				break
+			}
+
+			left := v.e.Left
+			leftBinary, ok := left.Data.(*js_ast.EBinary)
+
+			// Stop iterating if iteration doesn't apply to the left node
+			if !ok {
+				p.printExpr(left, v.leftLevel, v.leftFlags)
+				v.visitRightAndFinish(p)
+				break
+			}
+
+			// Manually run the code at the start of "printExpr"
+			p.printExprCommentsAtLoc(left.Loc)
+
+			// Only allocate heap memory on the stack for nested binary expressions
+			p.binaryExprStack = append(p.binaryExprStack, v)
+			v = binaryExprVisitor{
+				e:     leftBinary,
+				level: v.leftLevel,
+				flags: v.leftFlags,
+			}
+		}
+
+		// Process all binary operations from the deepest-visited node back toward
+		// our original top-level binary operation
+		for {
+			n := len(p.binaryExprStack) - 1
+			if n < stackBottom {
+				break
+			}
+			v := p.binaryExprStack[n]
+			p.binaryExprStack = p.binaryExprStack[:n]
+			v.visitRightAndFinish(p)
+		}
+
+	default:
+		panic(fmt.Sprintf("Unexpected expression of type %T", expr.Data))
+	}
+}
+
+// The handling of binary expressions is convoluted because we're using
+// iteration on the heap instead of recursion on the call stack to avoid
+// stack overflow for deeply-nested ASTs. See the comments for the similar
+// code in the JavaScript parser for details.
+type binaryExprVisitor struct {
+	// Inputs
+	e     *js_ast.EBinary
+	level js_ast.L
+	flags printExprFlags
+
+	// Input for visiting the left child
+	leftLevel js_ast.L
+	leftFlags printExprFlags
+
+	// "Local variables" passed from "checkAndPrepare" to "visitRightAndFinish"
+	entry      js_ast.OpTableEntry
+	wrap       bool
+	rightLevel js_ast.L
+}
+
+func (v *binaryExprVisitor) checkAndPrepare(p *printer) bool {
+	e := v.e
+
+	// If this is a comma operator then either the result is unused (and we
+	// should have already simplified unused expressions), or the result is used
+	// (and we can still simplify unused expressions inside the left operand)
+	if e.Op == js_ast.BinOpComma {
+		if (v.flags & didAlreadySimplifyUnusedExprs) == 0 {
+			left := p.simplifyUnusedExpr(e.Left)
+			right := e.Right
+			if (v.flags & exprResultIsUnused) != 0 {
+				right = p.simplifyUnusedExpr(right)
+			}
+			if left.Data != e.Left.Data || right.Data != e.Right.Data {
+				// Pass a flag so we don't needlessly re-simplify the same expression
+				p.printExpr(p.guardAgainstBehaviorChangeDueToSubstitution(js_ast.JoinWithComma(left, right), v.flags), v.level, v.flags|didAlreadySimplifyUnusedExprs)
+				return false
+			}
+		} else {
+			// Pass a flag so we don't needlessly re-simplify the same expression
+			v.flags |= didAlreadySimplifyUnusedExprs
+		}
+	}
+
+	v.entry = js_ast.OpTable[e.Op]
+	v.wrap = v.level >= v.entry.Level || (e.Op == js_ast.BinOpIn && (v.flags&forbidIn) != 0)
+
+	// Destructuring assignments must be parenthesized
+	if n := len(p.js); p.stmtStart == n || p.arrowExprStart == n {
+		if _, ok := e.Left.Data.(*js_ast.EObject); ok {
+			v.wrap = true
+		}
+	}
+
+	if v.wrap {
+		p.print("(")
+		v.flags &= ^forbidIn
+	}
+
+	v.leftLevel = v.entry.Level - 1
+	v.rightLevel = v.entry.Level - 1
+
+	if e.Op.IsRightAssociative() {
+		v.leftLevel = v.entry.Level
+	}
+	if e.Op.IsLeftAssociative() {
+		v.rightLevel = v.entry.Level
+	}
+
+	switch e.Op {
+	case js_ast.BinOpNullishCoalescing:
+		// "??" can't directly contain "||" or "&&" without being wrapped in parentheses
+		if left, ok := e.Left.Data.(*js_ast.EBinary); ok && (left.Op == js_ast.BinOpLogicalOr || left.Op == js_ast.BinOpLogicalAnd) {
+			v.leftLevel = js_ast.LPrefix
+		}
+		if right, ok := e.Right.Data.(*js_ast.EBinary); ok && (right.Op == js_ast.BinOpLogicalOr || right.Op == js_ast.BinOpLogicalAnd) {
+			v.rightLevel = js_ast.LPrefix
+		}
+
+	case js_ast.BinOpPow:
+		// "**" can't contain certain unary expressions
+		if left, ok := e.Left.Data.(*js_ast.EUnary); ok && left.Op.UnaryAssignTarget() == js_ast.AssignTargetNone {
+			v.leftLevel = js_ast.LCall
+		} else if _, ok := e.Left.Data.(*js_ast.EAwait); ok {
+			v.leftLevel = js_ast.LCall
+		} else if _, ok := e.Left.Data.(*js_ast.EUndefined); ok {
+			// Undefined is printed as "void 0"
+			v.leftLevel = js_ast.LCall
+		} else if _, ok := e.Left.Data.(*js_ast.ENumber); ok {
+			// Negative numbers are printed using a unary operator
+			v.leftLevel = js_ast.LCall
+		} else if p.options.MinifySyntax {
+			// When minifying, booleans are printed as "!0 and "!1"
+			if _, ok := e.Left.Data.(*js_ast.EBoolean); ok {
+				v.leftLevel = js_ast.LCall
+			}
+		}
+	}
+
+	// Special-case "#foo in bar"
+	if private, ok := e.Left.Data.(*js_ast.EPrivateIdentifier); ok && e.Op == js_ast.BinOpIn {
+		name := p.renamer.NameForSymbol(private.Ref)
+		p.addSourceMappingForName(e.Left.Loc, name, private.Ref)
+		p.printIdentifier(name)
+		v.visitRightAndFinish(p)
+		return false
+	}
+
+	if e.Op == js_ast.BinOpComma {
+		// The result of the left operand of the comma operator is unused
+		v.leftFlags = (v.flags & forbidIn) | exprResultIsUnused | parentWasUnaryOrBinaryOrIfTest
+	} else {
+		v.leftFlags = (v.flags & forbidIn) | parentWasUnaryOrBinaryOrIfTest
+	}
+	return true
+}
+
+func (v *binaryExprVisitor) visitRightAndFinish(p *printer) {
+	e := v.e
+
+	if e.Op != js_ast.BinOpComma {
+		p.printSpace()
+	}
+
+	if v.entry.IsKeyword {
+		p.printSpaceBeforeIdentifier()
+		p.print(v.entry.Text)
+	} else {
+		p.printSpaceBeforeOperator(e.Op)
+		p.print(v.entry.Text)
+		p.prevOp = e.Op
+		p.prevOpEnd = len(p.js)
+	}
+
+	if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+		p.printSpace()
+	}
+
+	if e.Op == js_ast.BinOpComma {
+		// The result of the right operand of the comma operator is unused if the caller doesn't use it
+		p.printExpr(e.Right, v.rightLevel, (v.flags&(forbidIn|exprResultIsUnused))|parentWasUnaryOrBinaryOrIfTest)
+	} else {
+		p.printExpr(e.Right, v.rightLevel, (v.flags&forbidIn)|parentWasUnaryOrBinaryOrIfTest)
+	}
+
+	if v.wrap {
+		p.print(")")
+	}
+}
+
+func (p *printer) isUnboundEvalIdentifier(value js_ast.Expr) bool {
+	if id, ok := value.Data.(*js_ast.EIdentifier); ok {
+		// Using the original name here is ok since unbound symbols are not renamed
+		symbol := p.symbols.Get(ast.FollowSymbols(p.symbols, id.Ref))
+		return symbol.Kind == ast.SymbolUnbound && symbol.OriginalName == "eval"
+	}
+	return false
+}
+
+// Convert an integer to a byte slice without any allocations
+func (p *printer) smallIntToBytes(n int) []byte {
+	wasNegative := n < 0
+	if wasNegative {
+		// This assumes that -math.MinInt isn't a problem. This is fine because
+		// these integers are floating-point exponents which never go up that high.
+		n = -n
+	}
+
+	bytes := p.intToBytesBuffer[:]
+	start := len(bytes)
+
+	// Write out the number from the end to the front
+	for {
+		start--
+		bytes[start] = '0' + byte(n%10)
+		n /= 10
+		if n == 0 {
+			break
+		}
+	}
+
+	// Stick a negative sign on the front if needed
+	if wasNegative {
+		start--
+		bytes[start] = '-'
+	}
+
+	return bytes[start:]
+}
+
+func parseSmallInt(bytes []byte) int {
+	wasNegative := bytes[0] == '-'
+	if wasNegative {
+		bytes = bytes[1:]
+	}
+
+	// Parse the integer without any error checking. This doesn't need to handle
+	// integer overflow because these integers are floating-point exponents which
+	// never go up that high.
+	n := 0
+	for _, c := range bytes {
+		n = n*10 + int(c-'0')
+	}
+
+	if wasNegative {
+		return -n
+	}
+	return n
+}
+
+func (p *printer) printNonNegativeFloat(absValue float64) {
+	// We can avoid the slow call to strconv.FormatFloat() for integers less than
+	// 1000 because we know that exponential notation will always be longer than
+	// the integer representation. This is not the case for 1000 which is "1e3".
+	if absValue < 1000 {
+		if asInt := int64(absValue); absValue == float64(asInt) {
+			p.printBytes(p.smallIntToBytes(int(asInt)))
+
+			// Integers always need a space before "." to avoid making a decimal point
+			p.needSpaceBeforeDot = len(p.js)
+			return
+		}
+	}
+
+	// Format this number into a byte slice so we can mutate it in place without
+	// further reallocation
+	result := []byte(strconv.FormatFloat(absValue, 'g', -1, 64))
+
+	// Simplify the exponent
+	// "e+05" => "e5"
+	// "e-05" => "e-5"
+	if e := bytes.LastIndexByte(result, 'e'); e != -1 {
+		from := e + 1
+		to := from
+
+		switch result[from] {
+		case '+':
+			// Strip off the leading "+"
+			from++
+
+		case '-':
+			// Skip past the leading "-"
+			to++
+			from++
+		}
+
+		// Strip off leading zeros
+		for from < len(result) && result[from] == '0' {
+			from++
+		}
+
+		result = append(result[:to], result[from:]...)
+	}
+
+	dot := bytes.IndexByte(result, '.')
+
+	if dot == 1 && result[0] == '0' {
+		// Simplify numbers starting with "0."
+		afterDot := 2
+
+		// Strip off the leading zero when minifying
+		// "0.5" => ".5"
+		if p.options.MinifyWhitespace {
+			result = result[1:]
+			afterDot--
+		}
+
+		// Try using an exponent
+		// "0.001" => "1e-3"
+		if result[afterDot] == '0' {
+			i := afterDot + 1
+			for result[i] == '0' {
+				i++
+			}
+			remaining := result[i:]
+			exponent := p.smallIntToBytes(afterDot - i - len(remaining))
+
+			// Only switch if it's actually shorter
+			if len(result) > len(remaining)+1+len(exponent) {
+				result = append(append(remaining, 'e'), exponent...)
+			}
+		}
+	} else if dot != -1 {
+		// Try to get rid of a "." and maybe also an "e"
+		if e := bytes.LastIndexByte(result, 'e'); e != -1 {
+			integer := result[:dot]
+			fraction := result[dot+1 : e]
+			exponent := parseSmallInt(result[e+1:]) - len(fraction)
+
+			// Handle small exponents by appending zeros instead
+			if exponent >= 0 && exponent <= 2 {
+				// "1.2e1" => "12"
+				// "1.2e2" => "120"
+				// "1.2e3" => "1200"
+				if len(result) >= len(integer)+len(fraction)+exponent {
+					result = append(integer, fraction...)
+					for i := 0; i < exponent; i++ {
+						result = append(result, '0')
+					}
+				}
+			} else {
+				// "1.2e4" => "12e3"
+				exponent := p.smallIntToBytes(exponent)
+				if len(result) >= len(integer)+len(fraction)+1+len(exponent) {
+					result = append(append(append(integer, fraction...), 'e'), exponent...)
+				}
+			}
+		}
+	} else if result[len(result)-1] == '0' {
+		// Simplify numbers ending with "0" by trying to use an exponent
+		// "1000" => "1e3"
+		i := len(result) - 1
+		for i > 0 && result[i-1] == '0' {
+			i--
+		}
+		remaining := result[:i]
+		exponent := p.smallIntToBytes(len(result) - i)
+
+		// Only switch if it's actually shorter
+		if len(result) > len(remaining)+1+len(exponent) {
+			result = append(append(remaining, 'e'), exponent...)
+		}
+	}
+
+	// Numbers in this range can potentially be printed with one fewer byte as
+	// hex. This compares against 0xFFFF_FFFF_FFFF_F800 instead of comparing
+	// against 0xFFFF_FFFF_FFFF_FFFF because 0xFFFF_FFFF_FFFF_FFFF when converted
+	// to float64 rounds up to 0x1_0000_0000_0000_0180, which can no longer fit
+	// into uint64. In Go, the result of converting float64 to uint64 outside of
+	// the uint64 range is implementation-dependent and is different on amd64 vs.
+	// arm64. The float64 value 0xFFFF_FFFF_FFFF_F800 is the biggest value that
+	// is below the float64 value 0x1_0000_0000_0000_0180, so we use that instead.
+	if p.options.MinifyWhitespace && absValue >= 1_000_000_000_000 && absValue <= 0xFFFF_FFFF_FFFF_F800 {
+		if asInt := uint64(absValue); absValue == float64(asInt) {
+			if hex := strconv.FormatUint(asInt, 16); 2+len(hex) < len(result) {
+				result = append(append(result[:0], '0', 'x'), hex...)
+			}
+		}
+	}
+
+	p.printBytes(result)
+
+	// We'll need a space before "." if it could be parsed as a decimal point
+	if !bytes.ContainsAny(result, ".ex") {
+		p.needSpaceBeforeDot = len(p.js)
+	}
+}
+
+func (p *printer) printDeclStmt(isExport bool, keyword string, decls []js_ast.Decl) {
+	p.printIndent()
+	p.printSpaceBeforeIdentifier()
+	if isExport {
+		p.print("export ")
+	}
+	p.printDecls(keyword, decls, 0)
+	p.printSemicolonAfterStatement()
+}
+
+func (p *printer) printForLoopInit(init js_ast.Stmt, flags printExprFlags) {
+	switch s := init.Data.(type) {
+	case *js_ast.SExpr:
+		p.printExpr(s.Value, js_ast.LLowest, flags|exprResultIsUnused)
+	case *js_ast.SLocal:
+		switch s.Kind {
+		case js_ast.LocalAwaitUsing:
+			p.printDecls("await using", s.Decls, flags)
+		case js_ast.LocalConst:
+			p.printDecls("const", s.Decls, flags)
+		case js_ast.LocalLet:
+			p.printDecls("let", s.Decls, flags)
+		case js_ast.LocalUsing:
+			p.printDecls("using", s.Decls, flags)
+		case js_ast.LocalVar:
+			p.printDecls("var", s.Decls, flags)
+		}
+	default:
+		panic("Internal error")
+	}
+}
+
+func (p *printer) printDecls(keyword string, decls []js_ast.Decl, flags printExprFlags) {
+	p.print(keyword)
+	p.printSpace()
+
+	for i, decl := range decls {
+		if i != 0 {
+			p.print(",")
+			if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+				p.printSpace()
+			}
+		}
+		p.printBinding(decl.Binding)
+
+		if decl.ValueOrNil.Data != nil {
+			p.printSpace()
+			p.print("=")
+			p.printSpace()
+			p.printExprWithoutLeadingNewline(decl.ValueOrNil, js_ast.LComma, flags)
+		}
+	}
+}
+
+func (p *printer) printBody(body js_ast.Stmt, isSingleLine bool) {
+	if block, ok := body.Data.(*js_ast.SBlock); ok {
+		p.printSpace()
+		p.printBlock(body.Loc, *block)
+		p.printNewline()
+	} else if isSingleLine {
+		p.printNextIndentAsSpace = true
+		p.printStmt(body, 0)
+	} else {
+		p.printNewline()
+		p.options.Indent++
+		p.printStmt(body, 0)
+		p.options.Indent--
+	}
+}
+
+func (p *printer) printBlock(loc logger.Loc, block js_ast.SBlock) {
+	p.addSourceMapping(loc)
+	p.print("{")
+	p.printNewline()
+
+	p.options.Indent++
+	for _, stmt := range block.Stmts {
+		p.printSemicolonIfNeeded()
+		p.printStmt(stmt, canOmitStatement)
+	}
+	p.options.Indent--
+	p.needsSemicolon = false
+
+	p.printIndent()
+	if block.CloseBraceLoc.Start > loc.Start {
+		p.addSourceMapping(block.CloseBraceLoc)
+	}
+	p.print("}")
+}
+
+func wrapToAvoidAmbiguousElse(s js_ast.S) bool {
+	for {
+		switch current := s.(type) {
+		case *js_ast.SIf:
+			if current.NoOrNil.Data == nil {
+				return true
+			}
+			s = current.NoOrNil.Data
+
+		case *js_ast.SFor:
+			s = current.Body.Data
+
+		case *js_ast.SForIn:
+			s = current.Body.Data
+
+		case *js_ast.SForOf:
+			s = current.Body.Data
+
+		case *js_ast.SWhile:
+			s = current.Body.Data
+
+		case *js_ast.SWith:
+			s = current.Body.Data
+
+		case *js_ast.SLabel:
+			s = current.Stmt.Data
+
+		default:
+			return false
+		}
+	}
+}
+
+func (p *printer) printIf(s *js_ast.SIf) {
+	p.printSpaceBeforeIdentifier()
+	p.print("if")
+	p.printSpace()
+	p.print("(")
+	if p.willPrintExprCommentsAtLoc(s.Test.Loc) {
+		p.printNewline()
+		p.options.Indent++
+		p.printIndent()
+		p.printExpr(s.Test, js_ast.LLowest, 0)
+		p.printNewline()
+		p.options.Indent--
+		p.printIndent()
+	} else {
+		p.printExpr(s.Test, js_ast.LLowest, 0)
+	}
+	p.print(")")
+
+	// Simplify the else branch, which may disappear entirely
+	no := s.NoOrNil
+	if expr, ok := no.Data.(*js_ast.SExpr); ok {
+		if value := p.simplifyUnusedExpr(expr.Value); value.Data == nil {
+			no.Data = nil
+		} else if value.Data != expr.Value.Data {
+			no.Data = &js_ast.SExpr{Value: value}
+		}
+	}
+
+	if yes, ok := s.Yes.Data.(*js_ast.SBlock); ok {
+		p.printSpace()
+		p.printBlock(s.Yes.Loc, *yes)
+
+		if no.Data != nil {
+			p.printSpace()
+		} else {
+			p.printNewline()
+		}
+	} else if wrapToAvoidAmbiguousElse(s.Yes.Data) {
+		p.printSpace()
+		p.print("{")
+		p.printNewline()
+
+		p.options.Indent++
+		p.printStmt(s.Yes, canOmitStatement)
+		p.options.Indent--
+		p.needsSemicolon = false
+
+		p.printIndent()
+		p.print("}")
+
+		if no.Data != nil {
+			p.printSpace()
+		} else {
+			p.printNewline()
+		}
+	} else {
+		p.printBody(s.Yes, s.IsSingleLineYes)
+
+		if no.Data != nil {
+			p.printIndent()
+		}
+	}
+
+	if no.Data != nil {
+		p.printSemicolonIfNeeded()
+		p.printSpaceBeforeIdentifier()
+		p.print("else")
+
+		if block, ok := no.Data.(*js_ast.SBlock); ok {
+			p.printSpace()
+			p.printBlock(no.Loc, *block)
+			p.printNewline()
+		} else if ifStmt, ok := no.Data.(*js_ast.SIf); ok {
+			p.printIf(ifStmt)
+		} else {
+			p.printBody(no, s.IsSingleLineNo)
+		}
+	}
+}
+
+func (p *printer) printIndentedComment(text string) {
+	// Avoid generating a comment containing the character sequence "</script"
+	if !p.options.UnsupportedFeatures.Has(compat.InlineScript) {
+		text = helpers.EscapeClosingTag(text, "/script")
+	}
+
+	if strings.HasPrefix(text, "/*") {
+		// Re-indent multi-line comments
+		for {
+			newline := strings.IndexByte(text, '\n')
+			if newline == -1 {
+				break
+			}
+			p.print(text[:newline+1])
+			p.printIndent()
+			text = text[newline+1:]
+		}
+		p.print(text)
+		p.printNewline()
+	} else {
+		// Print a mandatory newline after single-line comments
+		p.print(text)
+		p.print("\n")
+	}
+}
+
+func (p *printer) printPath(importRecordIndex uint32, importKind ast.ImportKind) {
+	record := p.importRecords[importRecordIndex]
+	p.addSourceMapping(record.Range.Loc)
+	p.printQuotedUTF8(record.Path.Text, printQuotedNoWrap)
+
+	if p.options.NeedsMetafile {
+		external := ""
+		if (record.Flags & ast.ShouldNotBeExternalInMetafile) == 0 {
+			external = ",\n          \"external\": true"
+		}
+		p.jsonMetadataImports = append(p.jsonMetadataImports, fmt.Sprintf("\n        {\n          \"path\": %s,\n          \"kind\": %s%s\n        }",
+			helpers.QuoteForJSON(record.Path.Text, p.options.ASCIIOnly),
+			helpers.QuoteForJSON(importKind.StringForMetafile(), p.options.ASCIIOnly),
+			external))
+	}
+
+	if record.AssertOrWith != nil && importKind == ast.ImportStmt {
+		feature := compat.ImportAttributes
+		if record.AssertOrWith.Keyword == ast.AssertKeyword {
+			feature = compat.ImportAssertions
+		}
+
+		// Omit import assertions/attributes on this import statement if they would cause a syntax error
+		if p.options.UnsupportedFeatures.Has(feature) {
+			return
+		}
+
+		p.printSpace()
+		p.addSourceMapping(record.AssertOrWith.KeywordLoc)
+		p.print(record.AssertOrWith.Keyword.String())
+		p.printSpace()
+		p.printImportAssertOrWithClause(*record.AssertOrWith)
+	}
+}
+
+func (p *printer) printImportCallAssertOrWith(assertOrWith *ast.ImportAssertOrWith, outerIsMultiLine bool) {
+	// Omit import assertions/attributes if we know the "import()" syntax doesn't
+	// support a second argument (i.e. both import assertions and import
+	// attributes aren't supported) and doing so would cause a syntax error
+	if assertOrWith == nil || (p.options.UnsupportedFeatures.Has(compat.ImportAssertions) && p.options.UnsupportedFeatures.Has(compat.ImportAttributes)) {
+		return
+	}
+
+	isMultiLine := p.willPrintExprCommentsAtLoc(assertOrWith.KeywordLoc) ||
+		p.willPrintExprCommentsAtLoc(assertOrWith.InnerOpenBraceLoc) ||
+		p.willPrintExprCommentsAtLoc(assertOrWith.OuterCloseBraceLoc)
+
+	p.print(",")
+	if outerIsMultiLine {
+		p.printNewline()
+		p.printIndent()
+	} else {
+		p.printSpace()
+	}
+	p.printExprCommentsAtLoc(assertOrWith.OuterOpenBraceLoc)
+	p.addSourceMapping(assertOrWith.OuterOpenBraceLoc)
+	p.print("{")
+
+	if isMultiLine {
+		p.printNewline()
+		p.options.Indent++
+		p.printIndent()
+	} else {
+		p.printSpace()
+	}
+
+	p.printExprCommentsAtLoc(assertOrWith.KeywordLoc)
+	p.addSourceMapping(assertOrWith.KeywordLoc)
+	p.print(assertOrWith.Keyword.String())
+	p.print(":")
+
+	if p.willPrintExprCommentsAtLoc(assertOrWith.InnerOpenBraceLoc) {
+		p.printNewline()
+		p.options.Indent++
+		p.printIndent()
+		p.printExprCommentsAtLoc(assertOrWith.InnerOpenBraceLoc)
+		p.printImportAssertOrWithClause(*assertOrWith)
+		p.options.Indent--
+	} else {
+		p.printSpace()
+		p.printImportAssertOrWithClause(*assertOrWith)
+	}
+
+	if isMultiLine {
+		p.printNewline()
+		p.printExprCommentsAfterCloseTokenAtLoc(assertOrWith.OuterCloseBraceLoc)
+		p.options.Indent--
+		p.printIndent()
+	} else {
+		p.printSpace()
+	}
+
+	p.addSourceMapping(assertOrWith.OuterCloseBraceLoc)
+	p.print("}")
+}
+
+func (p *printer) printImportAssertOrWithClause(assertOrWith ast.ImportAssertOrWith) {
+	isMultiLine := p.willPrintExprCommentsAtLoc(assertOrWith.InnerCloseBraceLoc)
+	if !isMultiLine {
+		for _, entry := range assertOrWith.Entries {
+			if p.willPrintExprCommentsAtLoc(entry.KeyLoc) || p.willPrintExprCommentsAtLoc(entry.ValueLoc) {
+				isMultiLine = true
+				break
+			}
+		}
+	}
+
+	p.addSourceMapping(assertOrWith.InnerOpenBraceLoc)
+	p.print("{")
+	if isMultiLine {
+		p.options.Indent++
+	}
+
+	for i, entry := range assertOrWith.Entries {
+		if i > 0 {
+			p.print(",")
+		}
+		if isMultiLine {
+			p.printNewline()
+			p.printIndent()
+		} else {
+			p.printSpace()
+		}
+
+		p.printExprCommentsAtLoc(entry.KeyLoc)
+		p.addSourceMapping(entry.KeyLoc)
+		if !entry.PreferQuotedKey && p.canPrintIdentifierUTF16(entry.Key) {
+			p.printSpaceBeforeIdentifier()
+			p.printIdentifierUTF16(entry.Key)
+		} else {
+			p.printQuotedUTF16(entry.Key, 0)
+		}
+
+		p.print(":")
+
+		if p.willPrintExprCommentsAtLoc(entry.ValueLoc) {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+			p.printExprCommentsAtLoc(entry.ValueLoc)
+			p.addSourceMapping(entry.ValueLoc)
+			p.printQuotedUTF16(entry.Value, 0)
+			p.options.Indent--
+		} else {
+			p.printSpace()
+			p.addSourceMapping(entry.ValueLoc)
+			p.printQuotedUTF16(entry.Value, 0)
+		}
+	}
+
+	if isMultiLine {
+		p.printNewline()
+		p.printExprCommentsAfterCloseTokenAtLoc(assertOrWith.InnerCloseBraceLoc)
+		p.options.Indent--
+		p.printIndent()
+	} else if len(assertOrWith.Entries) > 0 {
+		p.printSpace()
+	}
+
+	p.addSourceMapping(assertOrWith.InnerCloseBraceLoc)
+	p.print("}")
+}
+
+type printStmtFlags uint8
+
+const (
+	canOmitStatement printStmtFlags = 1 << iota
+)
+
+func (p *printer) printStmt(stmt js_ast.Stmt, flags printStmtFlags) {
+	if p.options.LineLimit > 0 {
+		p.printNewlinePastLineLimit()
+	}
+
+	switch s := stmt.Data.(type) {
+	case *js_ast.SComment:
+		text := s.Text
+
+		if s.IsLegalComment {
+			switch p.options.LegalComments {
+			case config.LegalCommentsNone:
+				return
+
+			case config.LegalCommentsEndOfFile,
+				config.LegalCommentsLinkedWithComment,
+				config.LegalCommentsExternalWithoutComment:
+
+				// Don't record the same legal comment more than once per file
+				if p.hasLegalComment == nil {
+					p.hasLegalComment = make(map[string]struct{})
+				} else if _, ok := p.hasLegalComment[text]; ok {
+					return
+				}
+				p.hasLegalComment[text] = struct{}{}
+				p.extractedLegalComments = append(p.extractedLegalComments, text)
+				return
+			}
+		}
+
+		p.printIndent()
+		p.addSourceMapping(stmt.Loc)
+		p.printIndentedComment(text)
+
+	case *js_ast.SFunction:
+		if !p.options.MinifyWhitespace && s.Fn.HasNoSideEffectsComment {
+			p.printIndent()
+			p.print("// @__NO_SIDE_EFFECTS__\n")
+		}
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		if s.IsExport {
+			p.print("export ")
+		}
+		if s.Fn.IsAsync {
+			p.print("async ")
+		}
+		p.print("function")
+		if s.Fn.IsGenerator {
+			p.print("*")
+			p.printSpace()
+		}
+		p.printSpaceBeforeIdentifier()
+		name := p.renamer.NameForSymbol(s.Fn.Name.Ref)
+		p.addSourceMappingForName(s.Fn.Name.Loc, name, s.Fn.Name.Ref)
+		p.printIdentifier(name)
+		p.printFn(s.Fn)
+		p.printNewline()
+
+	case *js_ast.SClass:
+		omitIndent := p.printDecorators(s.Class.Decorators, printNewlineAfterDecorator)
+		if !omitIndent {
+			p.printIndent()
+		}
+		p.printSpaceBeforeIdentifier()
+		p.addSourceMapping(stmt.Loc)
+		if s.IsExport {
+			p.print("export ")
+		}
+		p.print("class ")
+		name := p.renamer.NameForSymbol(s.Class.Name.Ref)
+		p.addSourceMappingForName(s.Class.Name.Loc, name, s.Class.Name.Ref)
+		p.printIdentifier(name)
+		p.printClass(s.Class)
+		p.printNewline()
+
+	case *js_ast.SEmpty:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.print(";")
+		p.printNewline()
+
+	case *js_ast.SExportDefault:
+		if !p.options.MinifyWhitespace {
+			if s2, ok := s.Value.Data.(*js_ast.SFunction); ok && s2.Fn.HasNoSideEffectsComment {
+				p.printIndent()
+				p.print("// @__NO_SIDE_EFFECTS__\n")
+			}
+		}
+		omitIndent := false
+		if s2, ok := s.Value.Data.(*js_ast.SClass); ok {
+			omitIndent = p.printDecorators(s2.Class.Decorators, printNewlineAfterDecorator)
+		}
+		p.addSourceMapping(stmt.Loc)
+		if !omitIndent {
+			p.printIndent()
+		}
+		p.printSpaceBeforeIdentifier()
+		p.print("export default")
+		p.printSpace()
+
+		switch s2 := s.Value.Data.(type) {
+		case *js_ast.SExpr:
+			// Functions and classes must be wrapped to avoid confusion with their statement forms
+			p.exportDefaultStart = len(p.js)
+
+			p.printExprWithoutLeadingNewline(s2.Value, js_ast.LComma, 0)
+			p.printSemicolonAfterStatement()
+			return
+
+		case *js_ast.SFunction:
+			p.printSpaceBeforeIdentifier()
+			if s2.Fn.IsAsync {
+				p.print("async ")
+			}
+			p.print("function")
+			if s2.Fn.IsGenerator {
+				p.print("*")
+				p.printSpace()
+			}
+			if s2.Fn.Name != nil {
+				p.printSpaceBeforeIdentifier()
+				name := p.renamer.NameForSymbol(s2.Fn.Name.Ref)
+				p.addSourceMappingForName(s2.Fn.Name.Loc, name, s2.Fn.Name.Ref)
+				p.printIdentifier(name)
+			}
+			p.printFn(s2.Fn)
+			p.printNewline()
+
+		case *js_ast.SClass:
+			p.printSpaceBeforeIdentifier()
+			p.print("class")
+			if s2.Class.Name != nil {
+				p.print(" ")
+				name := p.renamer.NameForSymbol(s2.Class.Name.Ref)
+				p.addSourceMappingForName(s2.Class.Name.Loc, name, s2.Class.Name.Ref)
+				p.printIdentifier(name)
+			}
+			p.printClass(s2.Class)
+			p.printNewline()
+
+		default:
+			panic("Internal error")
+		}
+
+	case *js_ast.SExportStar:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("export")
+		p.printSpace()
+		p.print("*")
+		p.printSpace()
+		if s.Alias != nil {
+			p.print("as")
+			p.printSpace()
+			p.printClauseAlias(s.Alias.Loc, s.Alias.OriginalName)
+			p.printSpace()
+			p.printSpaceBeforeIdentifier()
+		}
+		p.print("from")
+		p.printSpace()
+		p.printPath(s.ImportRecordIndex, ast.ImportStmt)
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SExportClause:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("export")
+		p.printSpace()
+		p.print("{")
+
+		if !s.IsSingleLine {
+			p.options.Indent++
+		}
+
+		for i, item := range s.Items {
+			if i != 0 {
+				p.print(",")
+			}
+
+			if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+				if s.IsSingleLine {
+					p.printSpace()
+				} else {
+					p.printNewline()
+					p.printIndent()
+				}
+			}
+
+			name := p.renamer.NameForSymbol(item.Name.Ref)
+			p.addSourceMappingForName(item.Name.Loc, name, item.Name.Ref)
+			p.printIdentifier(name)
+			if name != item.Alias {
+				p.print(" as")
+				p.printSpace()
+				p.printClauseAlias(item.AliasLoc, item.Alias)
+			}
+		}
+
+		if !s.IsSingleLine {
+			p.options.Indent--
+			p.printNewline()
+			p.printIndent()
+		} else if len(s.Items) > 0 {
+			p.printSpace()
+		}
+
+		p.print("}")
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SExportFrom:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("export")
+		p.printSpace()
+		p.print("{")
+
+		if !s.IsSingleLine {
+			p.options.Indent++
+		}
+
+		for i, item := range s.Items {
+			if i != 0 {
+				p.print(",")
+			}
+
+			if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+				if s.IsSingleLine {
+					p.printSpace()
+				} else {
+					p.printNewline()
+					p.printIndent()
+				}
+			}
+
+			p.printClauseAlias(item.Name.Loc, item.OriginalName)
+			if item.OriginalName != item.Alias {
+				p.printSpace()
+				p.printSpaceBeforeIdentifier()
+				p.print("as")
+				p.printSpace()
+				p.printClauseAlias(item.AliasLoc, item.Alias)
+			}
+		}
+
+		if !s.IsSingleLine {
+			p.options.Indent--
+			p.printNewline()
+			p.printIndent()
+		} else if len(s.Items) > 0 {
+			p.printSpace()
+		}
+
+		p.print("}")
+		p.printSpace()
+		p.print("from")
+		p.printSpace()
+		p.printPath(s.ImportRecordIndex, ast.ImportStmt)
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SLocal:
+		p.addSourceMapping(stmt.Loc)
+		switch s.Kind {
+		case js_ast.LocalAwaitUsing:
+			p.printDeclStmt(s.IsExport, "await using", s.Decls)
+		case js_ast.LocalConst:
+			p.printDeclStmt(s.IsExport, "const", s.Decls)
+		case js_ast.LocalLet:
+			p.printDeclStmt(s.IsExport, "let", s.Decls)
+		case js_ast.LocalUsing:
+			p.printDeclStmt(s.IsExport, "using", s.Decls)
+		case js_ast.LocalVar:
+			p.printDeclStmt(s.IsExport, "var", s.Decls)
+		}
+
+	case *js_ast.SIf:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printIf(s)
+
+	case *js_ast.SDoWhile:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("do")
+		if block, ok := s.Body.Data.(*js_ast.SBlock); ok {
+			p.printSpace()
+			p.printBlock(s.Body.Loc, *block)
+			p.printSpace()
+		} else {
+			p.printNewline()
+			p.options.Indent++
+			p.printStmt(s.Body, 0)
+			p.printSemicolonIfNeeded()
+			p.options.Indent--
+			p.printIndent()
+		}
+		p.print("while")
+		p.printSpace()
+		p.print("(")
+		if p.willPrintExprCommentsAtLoc(s.Test.Loc) {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+			p.printExpr(s.Test, js_ast.LLowest, 0)
+			p.printNewline()
+			p.options.Indent--
+			p.printIndent()
+		} else {
+			p.printExpr(s.Test, js_ast.LLowest, 0)
+		}
+		p.print(")")
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SForIn:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("for")
+		p.printSpace()
+		p.print("(")
+		hasInitComment := p.willPrintExprCommentsAtLoc(s.Init.Loc)
+		hasValueComment := p.willPrintExprCommentsAtLoc(s.Value.Loc)
+		if hasInitComment || hasValueComment {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+		}
+		p.printForLoopInit(s.Init, forbidIn)
+		p.printSpace()
+		p.printSpaceBeforeIdentifier()
+		p.print("in")
+		if hasValueComment {
+			p.printNewline()
+			p.printIndent()
+		} else {
+			p.printSpace()
+		}
+		p.printExpr(s.Value, js_ast.LLowest, 0)
+		if hasInitComment || hasValueComment {
+			p.printNewline()
+			p.options.Indent--
+			p.printIndent()
+		}
+		p.print(")")
+		p.printBody(s.Body, s.IsSingleLineBody)
+
+	case *js_ast.SForOf:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("for")
+		if s.Await.Len > 0 {
+			p.print(" await")
+		}
+		p.printSpace()
+		p.print("(")
+		hasInitComment := p.willPrintExprCommentsAtLoc(s.Init.Loc)
+		hasValueComment := p.willPrintExprCommentsAtLoc(s.Value.Loc)
+		flags := forbidIn | isFollowedByOf
+		if s.Await.Len > 0 {
+			flags |= isInsideForAwait
+		}
+		if hasInitComment || hasValueComment {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+		}
+		p.forOfInitStart = len(p.js)
+		p.printForLoopInit(s.Init, flags)
+		p.printSpace()
+		p.printSpaceBeforeIdentifier()
+		p.print("of")
+		if hasValueComment {
+			p.printNewline()
+			p.printIndent()
+		} else {
+			p.printSpace()
+		}
+		p.printExpr(s.Value, js_ast.LComma, 0)
+		if hasInitComment || hasValueComment {
+			p.printNewline()
+			p.options.Indent--
+			p.printIndent()
+		}
+		p.print(")")
+		p.printBody(s.Body, s.IsSingleLineBody)
+
+	case *js_ast.SWhile:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("while")
+		p.printSpace()
+		p.print("(")
+		if p.willPrintExprCommentsAtLoc(s.Test.Loc) {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+			p.printExpr(s.Test, js_ast.LLowest, 0)
+			p.printNewline()
+			p.options.Indent--
+			p.printIndent()
+		} else {
+			p.printExpr(s.Test, js_ast.LLowest, 0)
+		}
+		p.print(")")
+		p.printBody(s.Body, s.IsSingleLineBody)
+
+	case *js_ast.SWith:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("with")
+		p.printSpace()
+		p.print("(")
+		if p.willPrintExprCommentsAtLoc(s.Value.Loc) {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+			p.printExpr(s.Value, js_ast.LLowest, 0)
+			p.printNewline()
+			p.options.Indent--
+			p.printIndent()
+		} else {
+			p.printExpr(s.Value, js_ast.LLowest, 0)
+		}
+		p.print(")")
+		p.withNesting++
+		p.printBody(s.Body, s.IsSingleLineBody)
+		p.withNesting--
+
+	case *js_ast.SLabel:
+		// Avoid printing a source mapping that masks the one from the label
+		if !p.options.MinifyWhitespace && (p.options.Indent > 0 || p.printNextIndentAsSpace) {
+			p.addSourceMapping(stmt.Loc)
+			p.printIndent()
+		}
+
+		p.printSpaceBeforeIdentifier()
+		name := p.renamer.NameForSymbol(s.Name.Ref)
+		p.addSourceMappingForName(s.Name.Loc, name, s.Name.Ref)
+		p.printIdentifier(name)
+		p.print(":")
+		p.printBody(s.Stmt, s.IsSingleLineStmt)
+
+	case *js_ast.STry:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("try")
+		p.printSpace()
+		p.printBlock(s.BlockLoc, s.Block)
+
+		if s.Catch != nil {
+			p.printSpace()
+			p.print("catch")
+			if s.Catch.BindingOrNil.Data != nil {
+				p.printSpace()
+				p.print("(")
+				p.printBinding(s.Catch.BindingOrNil)
+				p.print(")")
+			}
+			p.printSpace()
+			p.printBlock(s.Catch.BlockLoc, s.Catch.Block)
+		}
+
+		if s.Finally != nil {
+			p.printSpace()
+			p.print("finally")
+			p.printSpace()
+			p.printBlock(s.Finally.Loc, s.Finally.Block)
+		}
+
+		p.printNewline()
+
+	case *js_ast.SFor:
+		init := s.InitOrNil
+		update := s.UpdateOrNil
+
+		// Omit calls to empty functions from the output completely
+		if p.options.MinifySyntax {
+			if expr, ok := init.Data.(*js_ast.SExpr); ok {
+				if value := p.simplifyUnusedExpr(expr.Value); value.Data == nil {
+					init.Data = nil
+				} else if value.Data != expr.Value.Data {
+					init.Data = &js_ast.SExpr{Value: value}
+				}
+			}
+			if update.Data != nil {
+				update = p.simplifyUnusedExpr(update)
+			}
+		}
+
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("for")
+		p.printSpace()
+		p.print("(")
+		isMultiLine :=
+			(init.Data != nil && p.willPrintExprCommentsAtLoc(init.Loc)) ||
+				(s.TestOrNil.Data != nil && p.willPrintExprCommentsAtLoc(s.TestOrNil.Loc)) ||
+				(update.Data != nil && p.willPrintExprCommentsAtLoc(update.Loc))
+		if isMultiLine {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+		}
+		if init.Data != nil {
+			p.printForLoopInit(init, forbidIn)
+		}
+		p.print(";")
+		if isMultiLine {
+			p.printNewline()
+			p.printIndent()
+		} else {
+			p.printSpace()
+		}
+		if s.TestOrNil.Data != nil {
+			p.printExpr(s.TestOrNil, js_ast.LLowest, 0)
+		}
+		p.print(";")
+		if !isMultiLine {
+			p.printSpace()
+		} else if update.Data != nil {
+			p.printNewline()
+			p.printIndent()
+		}
+		if update.Data != nil {
+			p.printExpr(update, js_ast.LLowest, exprResultIsUnused)
+		}
+		if isMultiLine {
+			p.printNewline()
+			p.options.Indent--
+			p.printIndent()
+		}
+		p.print(")")
+		p.printBody(s.Body, s.IsSingleLineBody)
+
+	case *js_ast.SSwitch:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("switch")
+		p.printSpace()
+		p.print("(")
+		if p.willPrintExprCommentsAtLoc(s.Test.Loc) {
+			p.printNewline()
+			p.options.Indent++
+			p.printIndent()
+			p.printExpr(s.Test, js_ast.LLowest, 0)
+			p.printNewline()
+			p.options.Indent--
+			p.printIndent()
+		} else {
+			p.printExpr(s.Test, js_ast.LLowest, 0)
+		}
+		p.print(")")
+		p.printSpace()
+		p.addSourceMapping(s.BodyLoc)
+		p.print("{")
+		p.printNewline()
+		p.options.Indent++
+
+		for _, c := range s.Cases {
+			p.printSemicolonIfNeeded()
+			p.printIndent()
+			p.printExprCommentsAtLoc(c.Loc)
+			p.addSourceMapping(c.Loc)
+
+			if c.ValueOrNil.Data != nil {
+				p.print("case")
+				p.printSpace()
+				p.printExpr(c.ValueOrNil, js_ast.LLogicalAnd, 0)
+			} else {
+				p.print("default")
+			}
+			p.print(":")
+
+			if len(c.Body) == 1 {
+				if block, ok := c.Body[0].Data.(*js_ast.SBlock); ok {
+					p.printSpace()
+					p.printBlock(c.Body[0].Loc, *block)
+					p.printNewline()
+					continue
+				}
+			}
+
+			p.printNewline()
+			p.options.Indent++
+			for _, stmt := range c.Body {
+				p.printSemicolonIfNeeded()
+				p.printStmt(stmt, canOmitStatement)
+			}
+			p.options.Indent--
+		}
+
+		p.options.Indent--
+		p.printIndent()
+		p.addSourceMapping(s.CloseBraceLoc)
+		p.print("}")
+		p.printNewline()
+		p.needsSemicolon = false
+
+	case *js_ast.SImport:
+		itemCount := 0
+
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("import")
+		p.printSpace()
+
+		if s.DefaultName != nil {
+			p.printSpaceBeforeIdentifier()
+			name := p.renamer.NameForSymbol(s.DefaultName.Ref)
+			p.addSourceMappingForName(s.DefaultName.Loc, name, s.DefaultName.Ref)
+			p.printIdentifier(name)
+			itemCount++
+		}
+
+		if s.Items != nil {
+			if itemCount > 0 {
+				p.print(",")
+				p.printSpace()
+			}
+
+			p.print("{")
+			if !s.IsSingleLine {
+				p.options.Indent++
+			}
+
+			for i, item := range *s.Items {
+				if i != 0 {
+					p.print(",")
+				}
+
+				if p.options.LineLimit <= 0 || !p.printNewlinePastLineLimit() {
+					if s.IsSingleLine {
+						p.printSpace()
+					} else {
+						p.printNewline()
+						p.printIndent()
+					}
+				}
+
+				p.printClauseAlias(item.AliasLoc, item.Alias)
+
+				name := p.renamer.NameForSymbol(item.Name.Ref)
+				if name != item.Alias {
+					p.printSpace()
+					p.printSpaceBeforeIdentifier()
+					p.print("as ")
+					p.addSourceMappingForName(item.Name.Loc, name, item.Name.Ref)
+					p.printIdentifier(name)
+				}
+			}
+
+			if !s.IsSingleLine {
+				p.options.Indent--
+				p.printNewline()
+				p.printIndent()
+			} else if len(*s.Items) > 0 {
+				p.printSpace()
+			}
+
+			p.print("}")
+			itemCount++
+		}
+
+		if s.StarNameLoc != nil {
+			if itemCount > 0 {
+				p.print(",")
+				p.printSpace()
+			}
+
+			p.print("*")
+			p.printSpace()
+			p.print("as ")
+			name := p.renamer.NameForSymbol(s.NamespaceRef)
+			p.addSourceMappingForName(*s.StarNameLoc, name, s.NamespaceRef)
+			p.printIdentifier(name)
+			itemCount++
+		}
+
+		if itemCount > 0 {
+			p.printSpace()
+			p.printSpaceBeforeIdentifier()
+			p.print("from")
+			p.printSpace()
+		}
+
+		p.printPath(s.ImportRecordIndex, ast.ImportStmt)
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SBlock:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printBlock(stmt.Loc, *s)
+		p.printNewline()
+
+	case *js_ast.SDebugger:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("debugger")
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SDirective:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.printQuotedUTF16(s.Value, 0)
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SBreak:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("break")
+		if s.Label != nil {
+			p.print(" ")
+			name := p.renamer.NameForSymbol(s.Label.Ref)
+			p.addSourceMappingForName(s.Label.Loc, name, s.Label.Ref)
+			p.printIdentifier(name)
+		}
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SContinue:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("continue")
+		if s.Label != nil {
+			p.print(" ")
+			name := p.renamer.NameForSymbol(s.Label.Ref)
+			p.addSourceMappingForName(s.Label.Loc, name, s.Label.Ref)
+			p.printIdentifier(name)
+		}
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SReturn:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("return")
+		if s.ValueOrNil.Data != nil {
+			p.printSpace()
+			p.printExprWithoutLeadingNewline(s.ValueOrNil, js_ast.LLowest, 0)
+		}
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SThrow:
+		p.addSourceMapping(stmt.Loc)
+		p.printIndent()
+		p.printSpaceBeforeIdentifier()
+		p.print("throw")
+		p.printSpace()
+		p.printExprWithoutLeadingNewline(s.Value, js_ast.LLowest, 0)
+		p.printSemicolonAfterStatement()
+
+	case *js_ast.SExpr:
+		value := s.Value
+
+		// Omit calls to empty functions from the output completely
+		if p.options.MinifySyntax {
+			value = p.simplifyUnusedExpr(value)
+			if value.Data == nil {
+				// If this statement is not in a block, then we still need to emit something
+				if (flags & canOmitStatement) == 0 {
+					// "if (x) empty();" => "if (x) ;"
+					p.addSourceMapping(stmt.Loc)
+					p.printIndent()
+					p.print(";")
+					p.printNewline()
+				} else {
+					// "if (x) { empty(); }" => "if (x) {}"
+				}
+				break
+			}
+		}
+
+		// Avoid printing a source mapping when the expression would print one in
+		// the same spot. We don't want to accidentally mask the mapping it emits.
+		if !p.options.MinifyWhitespace && (p.options.Indent > 0 || p.printNextIndentAsSpace) {
+			p.addSourceMapping(stmt.Loc)
+			p.printIndent()
+		}
+
+		p.stmtStart = len(p.js)
+		p.printExpr(value, js_ast.LLowest, exprResultIsUnused)
+		p.printSemicolonAfterStatement()
+
+	default:
+		panic(fmt.Sprintf("Unexpected statement of type %T", stmt.Data))
+	}
+}
+
+type Options struct {
+	RequireOrImportMetaForSource func(uint32) RequireOrImportMeta
+
+	// Cross-module inlining of TypeScript enums is actually done during printing
+	TSEnums map[ast.Ref]map[string]js_ast.TSEnumValue
+
+	// Cross-module inlining of detected inlinable constants is also done during printing
+	ConstValues map[ast.Ref]js_ast.ConstValue
+
+	// Property mangling results go here
+	MangledProps map[ast.Ref]string
+
+	// This will be present if the input file had a source map. In that case we
+	// want to map all the way back to the original input file(s).
+	InputSourceMap *sourcemap.SourceMap
+
+	// If we're writing out a source map, this table of line start indices lets
+	// us do binary search on to figure out what line a given AST node came from
+	LineOffsetTables []sourcemap.LineOffsetTable
+
+	ToCommonJSRef       ast.Ref
+	ToESMRef            ast.Ref
+	RuntimeRequireRef   ast.Ref
+	UnsupportedFeatures compat.JSFeature
+	Indent              int
+	LineLimit           int
+	OutputFormat        config.Format
+	MinifyWhitespace    bool
+	MinifyIdentifiers   bool
+	MinifySyntax        bool
+	ASCIIOnly           bool
+	LegalComments       config.LegalComments
+	SourceMap           config.SourceMap
+	AddSourceMappings   bool
+	NeedsMetafile       bool
+}
+
+type RequireOrImportMeta struct {
+	// CommonJS files will return the "require_*" wrapper function and an invalid
+	// exports object reference. Lazily-initialized ESM files will return the
+	// "init_*" wrapper function and the exports object for that file.
+	WrapperRef     ast.Ref
+	ExportsRef     ast.Ref
+	IsWrapperAsync bool
+}
+
+type PrintResult struct {
+	JS                     []byte
+	ExtractedLegalComments []string
+	JSONMetadataImports    []string
+
+	// This source map chunk just contains the VLQ-encoded offsets for the "JS"
+	// field above. It's not a full source map. The bundler will be joining many
+	// source map chunks together to form the final source map.
+	SourceMapChunk sourcemap.Chunk
+}
+
+func Print(tree js_ast.AST, symbols ast.SymbolMap, r renamer.Renamer, options Options) PrintResult {
+	p := &printer{
+		symbols:       symbols,
+		renamer:       r,
+		importRecords: tree.ImportRecords,
+		options:       options,
+		moduleType:    tree.ModuleTypeData.Type,
+		exprComments:  tree.ExprComments,
+		wasLazyExport: tree.HasLazyExport,
+
+		stmtStart:          -1,
+		exportDefaultStart: -1,
+		arrowExprStart:     -1,
+		forOfInitStart:     -1,
+
+		prevOpEnd:            -1,
+		needSpaceBeforeDot:   -1,
+		prevRegExpEnd:        -1,
+		noLeadingNewlineHere: -1,
+		builder:              sourcemap.MakeChunkBuilder(options.InputSourceMap, options.LineOffsetTables, options.ASCIIOnly),
+	}
+
+	if p.exprComments != nil {
+		p.printedExprComments = make(map[logger.Loc]bool)
+	}
+
+	p.astHelpers = js_ast.MakeHelperContext(func(ref ast.Ref) bool {
+		ref = ast.FollowSymbols(symbols, ref)
+		return symbols.Get(ref).Kind == ast.SymbolUnbound
+	})
+
+	// Add the top-level directive if present
+	for _, directive := range tree.Directives {
+		p.printIndent()
+		p.printQuotedUTF8(directive, 0)
+		p.print(";")
+		p.printNewline()
+	}
+
+	for _, part := range tree.Parts {
+		for _, stmt := range part.Stmts {
+			p.printStmt(stmt, canOmitStatement)
+			p.printSemicolonIfNeeded()
+		}
+	}
+
+	result := PrintResult{
+		JS:                     p.js,
+		JSONMetadataImports:    p.jsonMetadataImports,
+		ExtractedLegalComments: p.extractedLegalComments,
+	}
+	if options.SourceMap != config.SourceMapNone {
+		// This is expensive. Only do this if it's necessary.
+		result.SourceMapChunk = p.builder.GenerateChunk(p.js)
+	}
+	return result
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/linker/debug.go b/source/vendor/github.com/evanw/esbuild/internal/linker/debug.go
new file mode 100644
index 0000000..04d0a39
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/linker/debug.go
@@ -0,0 +1,148 @@
+package linker
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/graph"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+)
+
+// Set this to true and then load the resulting metafile in "graph-debugger.html"
+// to debug graph information.
+//
+// This is deliberately not exposed in the final binary. It is *very* internal
+// and only exists to help debug esbuild itself. Make sure this is always set
+// back to false before committing.
+const debugVerboseMetafile = false
+
+func (c *linkerContext) generateExtraDataForFileJS(sourceIndex uint32) string {
+	if !debugVerboseMetafile {
+		return ""
+	}
+
+	file := &c.graph.Files[sourceIndex]
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+	sb := strings.Builder{}
+	isFirstPartWithStmts := true
+
+	quoteSym := func(ref ast.Ref) string {
+		name := fmt.Sprintf("%d:%d [%s]", ref.SourceIndex, ref.InnerIndex, c.graph.Symbols.Get(ref).OriginalName)
+		return string(helpers.QuoteForJSON(name, c.options.ASCIIOnly))
+	}
+
+	sb.WriteString(`,"parts":[`)
+	for partIndex, part := range repr.AST.Parts {
+		if partIndex > 0 {
+			sb.WriteByte(',')
+		}
+		var isFirst bool
+		code := ""
+
+		sb.WriteString(fmt.Sprintf(`{"isLive":%v`, part.IsLive))
+		sb.WriteString(fmt.Sprintf(`,"canBeRemovedIfUnused":%v`, part.CanBeRemovedIfUnused))
+
+		if partIndex == int(js_ast.NSExportPartIndex) {
+			sb.WriteString(`,"nsExportPartIndex":true`)
+		} else if ast.MakeIndex32(uint32(partIndex)) == repr.Meta.WrapperPartIndex {
+			sb.WriteString(`,"wrapperPartIndex":true`)
+		} else if len(part.Stmts) > 0 {
+			contents := file.InputFile.Source.Contents
+			start := int(part.Stmts[0].Loc.Start)
+			if isFirstPartWithStmts {
+				start = 0
+				isFirstPartWithStmts = false
+			}
+			end := len(contents)
+			if partIndex+1 < len(repr.AST.Parts) {
+				if nextStmts := repr.AST.Parts[partIndex+1].Stmts; len(nextStmts) > 0 {
+					if nextStart := int(nextStmts[0].Loc.Start); nextStart >= start {
+						end = int(nextStart)
+					}
+				}
+			}
+			start = moveBeforeExport(contents, start)
+			end = moveBeforeExport(contents, end)
+			code = contents[start:end]
+		}
+
+		// importRecords
+		sb.WriteString(`,"importRecords":[`)
+		isFirst = true
+		for _, importRecordIndex := range part.ImportRecordIndices {
+			record := repr.AST.ImportRecords[importRecordIndex]
+			if !record.SourceIndex.IsValid() {
+				continue
+			}
+			if isFirst {
+				isFirst = false
+			} else {
+				sb.WriteByte(',')
+			}
+			path := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Source.PrettyPath
+			sb.WriteString(fmt.Sprintf(`{"source":%s}`, helpers.QuoteForJSON(path, c.options.ASCIIOnly)))
+		}
+		sb.WriteByte(']')
+
+		// declaredSymbols
+		sb.WriteString(`,"declaredSymbols":[`)
+		isFirst = true
+		for _, declSym := range part.DeclaredSymbols {
+			if !declSym.IsTopLevel {
+				continue
+			}
+			if isFirst {
+				isFirst = false
+			} else {
+				sb.WriteByte(',')
+			}
+			sb.WriteString(fmt.Sprintf(`{"name":%s}`, quoteSym(declSym.Ref)))
+		}
+		sb.WriteByte(']')
+
+		// symbolUses
+		sb.WriteString(`,"symbolUses":[`)
+		isFirst = true
+		for ref, uses := range part.SymbolUses {
+			if isFirst {
+				isFirst = false
+			} else {
+				sb.WriteByte(',')
+			}
+			sb.WriteString(fmt.Sprintf(`{"name":%s,"countEstimate":%d}`, quoteSym(ref), uses.CountEstimate))
+		}
+		sb.WriteByte(']')
+
+		// dependencies
+		sb.WriteString(`,"dependencies":[`)
+		for i, dep := range part.Dependencies {
+			if i > 0 {
+				sb.WriteByte(',')
+			}
+			sb.WriteString(fmt.Sprintf(`{"source":%s,"partIndex":%d}`,
+				helpers.QuoteForJSON(c.graph.Files[dep.SourceIndex].InputFile.Source.PrettyPath, c.options.ASCIIOnly),
+				dep.PartIndex,
+			))
+		}
+		sb.WriteByte(']')
+
+		// code
+		sb.WriteString(`,"code":`)
+		sb.Write(helpers.QuoteForJSON(code, c.options.ASCIIOnly))
+
+		sb.WriteByte('}')
+	}
+	sb.WriteString(`]`)
+
+	return sb.String()
+}
+
+func moveBeforeExport(contents string, i int) int {
+	contents = strings.TrimRight(contents[:i], " \t\r\n")
+	if strings.HasSuffix(contents, "export") {
+		return len(contents) - 6
+	}
+	return i
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/linker/linker.go b/source/vendor/github.com/evanw/esbuild/internal/linker/linker.go
new file mode 100644
index 0000000..219fe8e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/linker/linker.go
@@ -0,0 +1,7154 @@
+package linker
+
+// This package implements the second phase of the bundling operation that
+// generates the output files when given a module graph. It has been split off
+// into separate package to allow two linkers to cleanly exist in the same code
+// base. This will be useful when rewriting the linker because the new one can
+// be off by default to minimize disruption, but can still be enabled by anyone
+// to assist in giving feedback on the rewrite.
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/binary"
+	"fmt"
+	"hash"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/bundler"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/css_lexer"
+	"github.com/evanw/esbuild/internal/css_parser"
+	"github.com/evanw/esbuild/internal/css_printer"
+	"github.com/evanw/esbuild/internal/fs"
+	"github.com/evanw/esbuild/internal/graph"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_lexer"
+	"github.com/evanw/esbuild/internal/js_printer"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/renamer"
+	"github.com/evanw/esbuild/internal/resolver"
+	"github.com/evanw/esbuild/internal/runtime"
+	"github.com/evanw/esbuild/internal/sourcemap"
+	"github.com/evanw/esbuild/internal/xxhash"
+)
+
+type linkerContext struct {
+	options *config.Options
+	timer   *helpers.Timer
+	log     logger.Log
+	fs      fs.FS
+	res     *resolver.Resolver
+	graph   graph.LinkerGraph
+	chunks  []chunkInfo
+
+	// This helps avoid an infinite loop when matching imports to exports
+	cycleDetector []importTracker
+
+	// This represents the parallel computation of source map related data.
+	// Calling this will block until the computation is done. The resulting value
+	// is shared between threads and must be treated as immutable.
+	dataForSourceMaps func() []bundler.DataForSourceMap
+
+	// This is passed to us from the bundling phase
+	uniqueKeyPrefix      string
+	uniqueKeyPrefixBytes []byte // This is just "uniqueKeyPrefix" in byte form
+
+	// Property mangling results go here
+	mangledProps map[ast.Ref]string
+
+	// We may need to refer to the CommonJS "module" symbol for exports
+	unboundModuleRef ast.Ref
+
+	// We may need to refer to the "__esm" and/or "__commonJS" runtime symbols
+	cjsRuntimeRef ast.Ref
+	esmRuntimeRef ast.Ref
+}
+
+type partRange struct {
+	sourceIndex    uint32
+	partIndexBegin uint32
+	partIndexEnd   uint32
+}
+
+type chunkInfo struct {
+	// This is a random string and is used to represent the output path of this
+	// chunk before the final output path has been computed.
+	uniqueKey string
+
+	filesWithPartsInChunk map[uint32]bool
+	entryBits             helpers.BitSet
+
+	// For code splitting
+	crossChunkImports []chunkImport
+
+	// This is the representation-specific information
+	chunkRepr chunkRepr
+
+	// This is the final path of this chunk relative to the output directory, but
+	// without the substitution of the final hash (since it hasn't been computed).
+	finalTemplate []config.PathTemplate
+
+	// This is the final path of this chunk relative to the output directory. It
+	// is the substitution of the final hash into "finalTemplate".
+	finalRelPath string
+
+	// If non-empty, this chunk needs to generate an external legal comments file.
+	externalLegalComments []byte
+
+	// This contains the hash for just this chunk without including information
+	// from the hashes of other chunks. Later on in the linking process, the
+	// final hash for this chunk will be constructed by merging the isolated
+	// hashes of all transitive dependencies of this chunk. This is separated
+	// into two phases like this to handle cycles in the chunk import graph.
+	waitForIsolatedHash func() []byte
+
+	// Other fields relating to the output file for this chunk
+	jsonMetadataChunkCallback func(finalOutputSize int) helpers.Joiner
+	outputSourceMap           sourcemap.SourceMapPieces
+
+	// When this chunk is initially generated in isolation, the output pieces
+	// will contain slices of the output with the unique keys of other chunks
+	// omitted.
+	intermediateOutput intermediateOutput
+
+	// This information is only useful if "isEntryPoint" is true
+	entryPointBit uint   // An index into "c.graph.EntryPoints"
+	sourceIndex   uint32 // An index into "c.sources"
+	isEntryPoint  bool
+
+	isExecutable bool
+}
+
+type chunkImport struct {
+	chunkIndex uint32
+	importKind ast.ImportKind
+}
+
+type outputPieceIndexKind uint8
+
+const (
+	outputPieceNone outputPieceIndexKind = iota
+	outputPieceAssetIndex
+	outputPieceChunkIndex
+)
+
+// This is a chunk of source code followed by a reference to another chunk. For
+// example, the file "@import 'CHUNK0001'; body { color: black; }" would be
+// represented by two pieces, one with the data "@import '" and another with the
+// data "'; body { color: black; }". The first would have the chunk index 1 and
+// the second would have an invalid chunk index.
+type outputPiece struct {
+	data []byte
+
+	// Note: The "kind" may be "outputPieceNone" in which case there is one piece
+	// with data and no chunk index. For example, the chunk may not contain any
+	// imports.
+	index uint32
+	kind  outputPieceIndexKind
+}
+
+type intermediateOutput struct {
+	// If the chunk has references to other chunks, then "pieces" contains the
+	// contents of the chunk and "joiner" should not be used. Another joiner
+	// will have to be constructed later when merging the pieces together.
+	pieces []outputPiece
+
+	// If the chunk doesn't have any references to other chunks, then "pieces" is
+	// nil and "joiner" contains the contents of the chunk. This is more efficient
+	// because it avoids doing a join operation twice.
+	joiner helpers.Joiner
+}
+
+type chunkRepr interface{ isChunk() }
+
+func (*chunkReprJS) isChunk()  {}
+func (*chunkReprCSS) isChunk() {}
+
+type chunkReprJS struct {
+	filesInChunkInOrder []uint32
+	partsInChunkInOrder []partRange
+
+	// For code splitting
+	exportsToOtherChunks   map[ast.Ref]string
+	importsFromOtherChunks map[uint32]crossChunkImportItemArray
+	crossChunkPrefixStmts  []js_ast.Stmt
+	crossChunkSuffixStmts  []js_ast.Stmt
+
+	cssChunkIndex uint32
+	hasCSSChunk   bool
+}
+
+type chunkReprCSS struct {
+	importsInChunkInOrder []cssImportOrder
+}
+
+type externalImportCSS struct {
+	path                   logger.Path
+	conditions             []css_ast.ImportConditions
+	conditionImportRecords []ast.ImportRecord
+}
+
+// Returns a log where "log.HasErrors()" only returns true if any errors have
+// been logged since this call. This is useful when there have already been
+// errors logged by other linkers that share the same log.
+func wrappedLog(log logger.Log) logger.Log {
+	var mutex sync.Mutex
+	var hasErrors bool
+	addMsg := log.AddMsg
+
+	log.AddMsg = func(msg logger.Msg) {
+		if msg.Kind == logger.Error {
+			mutex.Lock()
+			defer mutex.Unlock()
+			hasErrors = true
+		}
+		addMsg(msg)
+	}
+
+	log.HasErrors = func() bool {
+		mutex.Lock()
+		defer mutex.Unlock()
+		return hasErrors
+	}
+
+	return log
+}
+
+func Link(
+	options *config.Options,
+	timer *helpers.Timer,
+	log logger.Log,
+	fs fs.FS,
+	res *resolver.Resolver,
+	inputFiles []graph.InputFile,
+	entryPoints []graph.EntryPoint,
+	uniqueKeyPrefix string,
+	reachableFiles []uint32,
+	dataForSourceMaps func() []bundler.DataForSourceMap,
+) []graph.OutputFile {
+	timer.Begin("Link")
+	defer timer.End("Link")
+
+	log = wrappedLog(log)
+
+	timer.Begin("Clone linker graph")
+	c := linkerContext{
+		options:              options,
+		timer:                timer,
+		log:                  log,
+		fs:                   fs,
+		res:                  res,
+		dataForSourceMaps:    dataForSourceMaps,
+		uniqueKeyPrefix:      uniqueKeyPrefix,
+		uniqueKeyPrefixBytes: []byte(uniqueKeyPrefix),
+		graph: graph.CloneLinkerGraph(
+			inputFiles,
+			reachableFiles,
+			entryPoints,
+			options.CodeSplitting,
+		),
+	}
+	timer.End("Clone linker graph")
+
+	// Use a smaller version of these functions if we don't need profiler names
+	runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr)
+	if c.options.ProfilerNames {
+		c.cjsRuntimeRef = runtimeRepr.AST.NamedExports["__commonJS"].Ref
+		c.esmRuntimeRef = runtimeRepr.AST.NamedExports["__esm"].Ref
+	} else {
+		c.cjsRuntimeRef = runtimeRepr.AST.NamedExports["__commonJSMin"].Ref
+		c.esmRuntimeRef = runtimeRepr.AST.NamedExports["__esmMin"].Ref
+	}
+
+	var additionalFiles []graph.OutputFile
+	for _, entryPoint := range entryPoints {
+		file := &c.graph.Files[entryPoint.SourceIndex].InputFile
+		switch repr := file.Repr.(type) {
+		case *graph.JSRepr:
+			// Loaders default to CommonJS when they are the entry point and the output
+			// format is not ESM-compatible since that avoids generating the ESM-to-CJS
+			// machinery.
+			if repr.AST.HasLazyExport && (c.options.Mode == config.ModePassThrough ||
+				(c.options.Mode == config.ModeConvertFormat && !c.options.OutputFormat.KeepESMImportExportSyntax())) {
+				repr.AST.ExportsKind = js_ast.ExportsCommonJS
+			}
+
+			// Entry points with ES6 exports must generate an exports object when
+			// targeting non-ES6 formats. Note that the IIFE format only needs this
+			// when the global name is present, since that's the only way the exports
+			// can actually be observed externally.
+			if repr.AST.ExportKeyword.Len > 0 && (options.OutputFormat == config.FormatCommonJS ||
+				(options.OutputFormat == config.FormatIIFE && len(options.GlobalName) > 0)) {
+				repr.AST.UsesExportsRef = true
+				repr.Meta.ForceIncludeExportsForEntryPoint = true
+			}
+
+		case *graph.CopyRepr:
+			// If an entry point uses the copy loader, then copy the file manually
+			// here. Other uses of the copy loader will automatically be included
+			// along with the corresponding bundled chunk but that doesn't happen
+			// for entry points.
+			additionalFiles = append(additionalFiles, file.AdditionalFiles...)
+		}
+	}
+
+	// Allocate a new unbound symbol called "module" in case we need it later
+	if c.options.OutputFormat == config.FormatCommonJS {
+		c.unboundModuleRef = c.graph.GenerateNewSymbol(runtime.SourceIndex, ast.SymbolUnbound, "module")
+	} else {
+		c.unboundModuleRef = ast.InvalidRef
+	}
+
+	c.scanImportsAndExports()
+
+	// Stop now if there were errors
+	if c.log.HasErrors() {
+		c.options.ExclusiveMangleCacheUpdate(func(map[string]interface{}, map[string]bool) {
+			// Always do this so that we don't cause other entry points when there are errors
+		})
+		return []graph.OutputFile{}
+	}
+
+	c.treeShakingAndCodeSplitting()
+
+	if c.options.Mode == config.ModePassThrough {
+		for _, entryPoint := range c.graph.EntryPoints() {
+			c.preventExportsFromBeingRenamed(entryPoint.SourceIndex)
+		}
+	}
+
+	c.computeChunks()
+	c.computeCrossChunkDependencies()
+
+	// Merge mangled properties before chunks are generated since the names must
+	// be consistent across all chunks, or the generated code will break
+	c.timer.Begin("Waiting for mangle cache")
+	c.options.ExclusiveMangleCacheUpdate(func(
+		mangleCache map[string]interface{},
+		cssUsedLocalNames map[string]bool,
+	) {
+		c.timer.End("Waiting for mangle cache")
+		c.mangleProps(mangleCache)
+		c.mangleLocalCSS(cssUsedLocalNames)
+	})
+
+	// Make sure calls to "ast.FollowSymbols()" in parallel goroutines after this
+	// won't hit concurrent map mutation hazards
+	ast.FollowAllSymbols(c.graph.Symbols)
+
+	return c.generateChunksInParallel(additionalFiles)
+}
+
+func (c *linkerContext) mangleProps(mangleCache map[string]interface{}) {
+	c.timer.Begin("Mangle props")
+	defer c.timer.End("Mangle props")
+
+	mangledProps := make(map[ast.Ref]string)
+	c.mangledProps = mangledProps
+
+	// Reserve all JS keywords
+	reservedProps := make(map[string]bool)
+	for keyword := range js_lexer.Keywords {
+		reservedProps[keyword] = true
+	}
+
+	// Reserve all target properties in the cache
+	for original, remapped := range mangleCache {
+		if remapped == false {
+			reservedProps[original] = true
+		} else {
+			reservedProps[remapped.(string)] = true
+		}
+	}
+
+	// Merge all mangled property symbols together
+	freq := ast.CharFreq{}
+	mergedProps := make(map[string]ast.Ref)
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		// Don't mangle anything in the runtime code
+		if sourceIndex == runtime.SourceIndex {
+			continue
+		}
+
+		// For each file
+		if repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr); ok {
+			// Reserve all non-mangled properties
+			for prop := range repr.AST.ReservedProps {
+				reservedProps[prop] = true
+			}
+
+			// Merge each mangled property with other ones of the same name
+			for name, ref := range repr.AST.MangledProps {
+				if existing, ok := mergedProps[name]; ok {
+					ast.MergeSymbols(c.graph.Symbols, ref, existing)
+				} else {
+					mergedProps[name] = ref
+				}
+			}
+
+			// Include this file's frequency histogram, which affects the mangled names
+			if repr.AST.CharFreq != nil {
+				freq.Include(repr.AST.CharFreq)
+			}
+		}
+	}
+
+	// Sort by use count (note: does not currently account for live vs. dead code)
+	sorted := make(renamer.StableSymbolCountArray, 0, len(mergedProps))
+	stableSourceIndices := c.graph.StableSourceIndices
+	for _, ref := range mergedProps {
+		sorted = append(sorted, renamer.StableSymbolCount{
+			StableSourceIndex: stableSourceIndices[ref.SourceIndex],
+			Ref:               ref,
+			Count:             c.graph.Symbols.Get(ref).UseCountEstimate,
+		})
+	}
+	sort.Sort(sorted)
+
+	// Assign names in order of use count
+	minifier := ast.DefaultNameMinifierJS.ShuffleByCharFreq(freq)
+	nextName := 0
+	for _, symbolCount := range sorted {
+		symbol := c.graph.Symbols.Get(symbolCount.Ref)
+
+		// Don't change existing mappings
+		if existing, ok := mangleCache[symbol.OriginalName]; ok {
+			if existing != false {
+				mangledProps[symbolCount.Ref] = existing.(string)
+			}
+			continue
+		}
+
+		// Generate a new name
+		name := minifier.NumberToMinifiedName(nextName)
+		nextName++
+
+		// Avoid reserved properties
+		for reservedProps[name] {
+			name = minifier.NumberToMinifiedName(nextName)
+			nextName++
+		}
+
+		// Track the new mapping
+		if mangleCache != nil {
+			mangleCache[symbol.OriginalName] = name
+		}
+		mangledProps[symbolCount.Ref] = name
+	}
+}
+
+func (c *linkerContext) mangleLocalCSS(usedLocalNames map[string]bool) {
+	c.timer.Begin("Mangle local CSS")
+	defer c.timer.End("Mangle local CSS")
+
+	mangledProps := c.mangledProps
+	globalNames := make(map[string]bool)
+	localNames := make(map[ast.Ref]struct{})
+
+	// Collect all local and global CSS names
+	freq := ast.CharFreq{}
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		if repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.CSSRepr); ok {
+			for innerIndex, symbol := range c.graph.Symbols.SymbolsForSource[sourceIndex] {
+				if symbol.Kind == ast.SymbolGlobalCSS {
+					globalNames[symbol.OriginalName] = true
+				} else {
+					ref := ast.Ref{SourceIndex: sourceIndex, InnerIndex: uint32(innerIndex)}
+					ref = ast.FollowSymbols(c.graph.Symbols, ref)
+					localNames[ref] = struct{}{}
+				}
+			}
+
+			// Include this file's frequency histogram, which affects the mangled names
+			if repr.AST.CharFreq != nil {
+				freq.Include(repr.AST.CharFreq)
+			}
+		}
+	}
+
+	// Sort by use count (note: does not currently account for live vs. dead code)
+	sorted := make(renamer.StableSymbolCountArray, 0, len(localNames))
+	stableSourceIndices := c.graph.StableSourceIndices
+	for ref := range localNames {
+		sorted = append(sorted, renamer.StableSymbolCount{
+			StableSourceIndex: stableSourceIndices[ref.SourceIndex],
+			Ref:               ref,
+			Count:             c.graph.Symbols.Get(ref).UseCountEstimate,
+		})
+	}
+	sort.Sort(sorted)
+
+	// Rename all local names to avoid collisions
+	if c.options.MinifyIdentifiers {
+		minifier := ast.DefaultNameMinifierCSS.ShuffleByCharFreq(freq)
+		nextName := 0
+
+		for _, symbolCount := range sorted {
+			name := minifier.NumberToMinifiedName(nextName)
+			for globalNames[name] || usedLocalNames[name] {
+				nextName++
+				name = minifier.NumberToMinifiedName(nextName)
+			}
+
+			// Turn this local name into a global one
+			mangledProps[symbolCount.Ref] = name
+			usedLocalNames[name] = true
+		}
+	} else {
+		nameCounts := make(map[string]uint32)
+
+		for _, symbolCount := range sorted {
+			symbol := c.graph.Symbols.Get(symbolCount.Ref)
+			name := fmt.Sprintf("%s_%s", c.graph.Files[symbolCount.Ref.SourceIndex].InputFile.Source.IdentifierName, symbol.OriginalName)
+
+			// If the name is already in use, generate a new name by appending a number
+			if globalNames[name] || usedLocalNames[name] {
+				// To avoid O(n^2) behavior, the number must start off being the number
+				// that we used last time there was a collision with this name. Otherwise
+				// if there are many collisions with the same name, each name collision
+				// would have to increment the counter past all previous name collisions
+				// which is a O(n^2) time algorithm.
+				tries, ok := nameCounts[name]
+				if !ok {
+					tries = 1
+				}
+				prefix := name
+
+				// Keep incrementing the number until the name is unused
+				for {
+					tries++
+					name = prefix + strconv.Itoa(int(tries))
+
+					// Make sure this new name is unused
+					if !globalNames[name] && !usedLocalNames[name] {
+						// Store the count so we can start here next time instead of starting
+						// from 1. This means we avoid O(n^2) behavior.
+						nameCounts[prefix] = tries
+						break
+					}
+				}
+			}
+
+			// Turn this local name into a global one
+			mangledProps[symbolCount.Ref] = name
+			usedLocalNames[name] = true
+		}
+	}
+}
+
+// Currently the automatic chunk generation algorithm should by construction
+// never generate chunks that import each other since files are allocated to
+// chunks based on which entry points they are reachable from.
+//
+// This will change in the future when we allow manual chunk labels. But before
+// we allow manual chunk labels, we'll need to rework module initialization to
+// allow code splitting chunks to be lazily-initialized.
+//
+// Since that work hasn't been finished yet, cycles in the chunk import graph
+// can cause initialization bugs. So let's forbid these cycles for now to guard
+// against code splitting bugs that could cause us to generate buggy chunks.
+func (c *linkerContext) enforceNoCyclicChunkImports() {
+	var validate func(int, map[int]int) bool
+
+	// DFS memoization with 3-colors, more space efficient
+	// 0: white (unvisited), 1: gray (visiting), 2: black (visited)
+	colors := make(map[int]int)
+	validate = func(chunkIndex int, colors map[int]int) bool {
+		if colors[chunkIndex] == 1 {
+			c.log.AddError(nil, logger.Range{}, "Internal error: generated chunks contain a circular import")
+			return true
+		}
+
+		if colors[chunkIndex] == 2 {
+			return false
+		}
+
+		colors[chunkIndex] = 1
+
+		for _, chunkImport := range c.chunks[chunkIndex].crossChunkImports {
+			// Ignore cycles caused by dynamic "import()" expressions. These are fine
+			// because they don't necessarily cause initialization order issues and
+			// they don't indicate a bug in our chunk generation algorithm. They arise
+			// normally in real code (e.g. two files that import each other).
+			if chunkImport.importKind != ast.ImportDynamic {
+
+				// Recursively validate otherChunkIndex
+				if validate(int(chunkImport.chunkIndex), colors) {
+					return true
+				}
+			}
+		}
+
+		colors[chunkIndex] = 2
+		return false
+	}
+
+	for i := range c.chunks {
+		if validate(i, colors) {
+			break
+		}
+	}
+}
+
+func (c *linkerContext) generateChunksInParallel(additionalFiles []graph.OutputFile) []graph.OutputFile {
+	c.timer.Begin("Generate chunks")
+	defer c.timer.End("Generate chunks")
+
+	// Generate each chunk on a separate goroutine. When a chunk needs to
+	// reference the path of another chunk, it will use a temporary path called
+	// the "uniqueKey" since the final path hasn't been computed yet (and is
+	// in general uncomputable at this point because paths have hashes that
+	// include information about chunk dependencies, and chunk dependencies
+	// can be cyclic due to dynamic imports).
+	generateWaitGroup := sync.WaitGroup{}
+	generateWaitGroup.Add(len(c.chunks))
+	for chunkIndex := range c.chunks {
+		switch c.chunks[chunkIndex].chunkRepr.(type) {
+		case *chunkReprJS:
+			go c.generateChunkJS(chunkIndex, &generateWaitGroup)
+		case *chunkReprCSS:
+			go c.generateChunkCSS(chunkIndex, &generateWaitGroup)
+		}
+	}
+	c.enforceNoCyclicChunkImports()
+	generateWaitGroup.Wait()
+
+	// Compute the final hashes of each chunk, then use those to create the final
+	// paths of each chunk. This can technically be done in parallel but it
+	// probably doesn't matter so much because we're not hashing that much data.
+	visited := make([]uint32, len(c.chunks))
+	var finalBytes []byte
+	for chunkIndex := range c.chunks {
+		chunk := &c.chunks[chunkIndex]
+		var hashSubstitution *string
+
+		// Only wait for the hash if necessary
+		if config.HasPlaceholder(chunk.finalTemplate, config.HashPlaceholder) {
+			// Compute the final hash using the isolated hashes of the dependencies
+			hash := xxhash.New()
+			c.appendIsolatedHashesForImportedChunks(hash, uint32(chunkIndex), visited, ^uint32(chunkIndex))
+			finalBytes = hash.Sum(finalBytes[:0])
+			finalString := bundler.HashForFileName(finalBytes)
+			hashSubstitution = &finalString
+		}
+
+		// Render the last remaining placeholder in the template
+		chunk.finalRelPath = config.TemplateToString(config.SubstituteTemplate(chunk.finalTemplate, config.PathPlaceholders{
+			Hash: hashSubstitution,
+		}))
+	}
+
+	// Generate the final output files by joining file pieces together and
+	// substituting the temporary paths for the final paths. This substitution
+	// can be done in parallel for each chunk.
+	c.timer.Begin("Generate final output files")
+	var resultsWaitGroup sync.WaitGroup
+	results := make([][]graph.OutputFile, len(c.chunks))
+	resultsWaitGroup.Add(len(c.chunks))
+	for chunkIndex, chunk := range c.chunks {
+		go func(chunkIndex int, chunk chunkInfo) {
+			var outputFiles []graph.OutputFile
+
+			// Each file may optionally contain additional files to be copied to the
+			// output directory. This is used by the "file" and "copy" loaders.
+			var commentPrefix string
+			var commentSuffix string
+			switch chunkRepr := chunk.chunkRepr.(type) {
+			case *chunkReprJS:
+				for _, sourceIndex := range chunkRepr.filesInChunkInOrder {
+					outputFiles = append(outputFiles, c.graph.Files[sourceIndex].InputFile.AdditionalFiles...)
+				}
+				commentPrefix = "//"
+
+			case *chunkReprCSS:
+				for _, entry := range chunkRepr.importsInChunkInOrder {
+					if entry.kind == cssImportSourceIndex {
+						outputFiles = append(outputFiles, c.graph.Files[entry.sourceIndex].InputFile.AdditionalFiles...)
+					}
+				}
+				commentPrefix = "/*"
+				commentSuffix = " */"
+			}
+
+			// Path substitution for the chunk itself
+			finalRelDir := c.fs.Dir(chunk.finalRelPath)
+			outputContentsJoiner, outputSourceMapShifts := c.substituteFinalPaths(chunk.intermediateOutput,
+				func(finalRelPathForImport string) string {
+					return c.pathBetweenChunks(finalRelDir, finalRelPathForImport)
+				})
+
+			// Generate the optional legal comments file for this chunk
+			if chunk.externalLegalComments != nil {
+				finalRelPathForLegalComments := chunk.finalRelPath + ".LEGAL.txt"
+
+				// Link the file to the legal comments
+				if c.options.LegalComments == config.LegalCommentsLinkedWithComment {
+					importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForLegalComments)
+					importPath = strings.TrimPrefix(importPath, "./")
+					outputContentsJoiner.EnsureNewlineAtEnd()
+					outputContentsJoiner.AddString("/*! For license information please see ")
+					outputContentsJoiner.AddString(importPath)
+					outputContentsJoiner.AddString(" */\n")
+				}
+
+				// Write the external legal comments file
+				outputFiles = append(outputFiles, graph.OutputFile{
+					AbsPath:  c.fs.Join(c.options.AbsOutputDir, finalRelPathForLegalComments),
+					Contents: chunk.externalLegalComments,
+					JSONMetadataChunk: fmt.Sprintf(
+						"{\n      \"imports\": [],\n      \"exports\": [],\n      \"inputs\": {},\n      \"bytes\": %d\n    }", len(chunk.externalLegalComments)),
+				})
+			}
+
+			// Generate the optional source map for this chunk
+			if c.options.SourceMap != config.SourceMapNone && chunk.outputSourceMap.HasContent() {
+				outputSourceMap := chunk.outputSourceMap.Finalize(outputSourceMapShifts)
+				finalRelPathForSourceMap := chunk.finalRelPath + ".map"
+
+				// Potentially write a trailing source map comment
+				switch c.options.SourceMap {
+				case config.SourceMapLinkedWithComment:
+					importPath := c.pathBetweenChunks(finalRelDir, finalRelPathForSourceMap)
+					importPath = strings.TrimPrefix(importPath, "./")
+					outputContentsJoiner.EnsureNewlineAtEnd()
+					outputContentsJoiner.AddString(commentPrefix)
+					outputContentsJoiner.AddString("# sourceMappingURL=")
+					outputContentsJoiner.AddString(importPath)
+					outputContentsJoiner.AddString(commentSuffix)
+					outputContentsJoiner.AddString("\n")
+
+				case config.SourceMapInline, config.SourceMapInlineAndExternal:
+					outputContentsJoiner.EnsureNewlineAtEnd()
+					outputContentsJoiner.AddString(commentPrefix)
+					outputContentsJoiner.AddString("# sourceMappingURL=data:application/json;base64,")
+					outputContentsJoiner.AddString(base64.StdEncoding.EncodeToString(outputSourceMap))
+					outputContentsJoiner.AddString(commentSuffix)
+					outputContentsJoiner.AddString("\n")
+				}
+
+				// Potentially write the external source map file
+				switch c.options.SourceMap {
+				case config.SourceMapLinkedWithComment, config.SourceMapInlineAndExternal, config.SourceMapExternalWithoutComment:
+					outputFiles = append(outputFiles, graph.OutputFile{
+						AbsPath:  c.fs.Join(c.options.AbsOutputDir, finalRelPathForSourceMap),
+						Contents: outputSourceMap,
+						JSONMetadataChunk: fmt.Sprintf(
+							"{\n      \"imports\": [],\n      \"exports\": [],\n      \"inputs\": {},\n      \"bytes\": %d\n    }", len(outputSourceMap)),
+					})
+				}
+			}
+
+			// Finalize the output contents
+			outputContents := outputContentsJoiner.Done()
+
+			// Path substitution for the JSON metadata
+			var jsonMetadataChunk string
+			if c.options.NeedsMetafile {
+				jsonMetadataChunkPieces := c.breakJoinerIntoPieces(chunk.jsonMetadataChunkCallback(len(outputContents)))
+				jsonMetadataChunkBytes, _ := c.substituteFinalPaths(jsonMetadataChunkPieces, func(finalRelPathForImport string) string {
+					return resolver.PrettyPath(c.fs, logger.Path{Text: c.fs.Join(c.options.AbsOutputDir, finalRelPathForImport), Namespace: "file"})
+				})
+				jsonMetadataChunk = string(jsonMetadataChunkBytes.Done())
+			}
+
+			// Generate the output file for this chunk
+			outputFiles = append(outputFiles, graph.OutputFile{
+				AbsPath:           c.fs.Join(c.options.AbsOutputDir, chunk.finalRelPath),
+				Contents:          outputContents,
+				JSONMetadataChunk: jsonMetadataChunk,
+				IsExecutable:      chunk.isExecutable,
+			})
+
+			results[chunkIndex] = outputFiles
+			resultsWaitGroup.Done()
+		}(chunkIndex, chunk)
+	}
+	resultsWaitGroup.Wait()
+	c.timer.End("Generate final output files")
+
+	// Merge the output files from the different goroutines together in order
+	outputFilesLen := len(additionalFiles)
+	for _, result := range results {
+		outputFilesLen += len(result)
+	}
+	outputFiles := make([]graph.OutputFile, 0, outputFilesLen)
+	outputFiles = append(outputFiles, additionalFiles...)
+	for _, result := range results {
+		outputFiles = append(outputFiles, result...)
+	}
+	return outputFiles
+}
+
+// Given a set of output pieces (i.e. a buffer already divided into the spans
+// between import paths), substitute the final import paths in and then join
+// everything into a single byte buffer.
+func (c *linkerContext) substituteFinalPaths(
+	intermediateOutput intermediateOutput,
+	modifyPath func(string) string,
+) (j helpers.Joiner, shifts []sourcemap.SourceMapShift) {
+	// Optimization: If there can be no substitutions, just reuse the initial
+	// joiner that was used when generating the intermediate chunk output
+	// instead of creating another one and copying the whole file into it.
+	if intermediateOutput.pieces == nil {
+		return intermediateOutput.joiner, []sourcemap.SourceMapShift{{}}
+	}
+
+	var shift sourcemap.SourceMapShift
+	shifts = make([]sourcemap.SourceMapShift, 0, len(intermediateOutput.pieces))
+	shifts = append(shifts, shift)
+
+	for _, piece := range intermediateOutput.pieces {
+		var dataOffset sourcemap.LineColumnOffset
+		j.AddBytes(piece.data)
+		dataOffset.AdvanceBytes(piece.data)
+		shift.Before.Add(dataOffset)
+		shift.After.Add(dataOffset)
+
+		switch piece.kind {
+		case outputPieceAssetIndex:
+			file := c.graph.Files[piece.index]
+			if len(file.InputFile.AdditionalFiles) != 1 {
+				panic("Internal error")
+			}
+			relPath, _ := c.fs.Rel(c.options.AbsOutputDir, file.InputFile.AdditionalFiles[0].AbsPath)
+
+			// Make sure to always use forward slashes, even on Windows
+			relPath = strings.ReplaceAll(relPath, "\\", "/")
+
+			importPath := modifyPath(relPath)
+			j.AddString(importPath)
+			shift.Before.AdvanceString(file.InputFile.UniqueKeyForAdditionalFile)
+			shift.After.AdvanceString(importPath)
+			shifts = append(shifts, shift)
+
+		case outputPieceChunkIndex:
+			chunk := c.chunks[piece.index]
+			importPath := modifyPath(chunk.finalRelPath)
+			j.AddString(importPath)
+			shift.Before.AdvanceString(chunk.uniqueKey)
+			shift.After.AdvanceString(importPath)
+			shifts = append(shifts, shift)
+		}
+	}
+
+	return
+}
+
+func (c *linkerContext) accurateFinalByteCount(output intermediateOutput, chunkFinalRelDir string) int {
+	count := 0
+
+	// Note: The paths generated here must match "substituteFinalPaths" above
+	for _, piece := range output.pieces {
+		count += len(piece.data)
+
+		switch piece.kind {
+		case outputPieceAssetIndex:
+			file := c.graph.Files[piece.index]
+			if len(file.InputFile.AdditionalFiles) != 1 {
+				panic("Internal error")
+			}
+			relPath, _ := c.fs.Rel(c.options.AbsOutputDir, file.InputFile.AdditionalFiles[0].AbsPath)
+
+			// Make sure to always use forward slashes, even on Windows
+			relPath = strings.ReplaceAll(relPath, "\\", "/")
+
+			importPath := c.pathBetweenChunks(chunkFinalRelDir, relPath)
+			count += len(importPath)
+
+		case outputPieceChunkIndex:
+			chunk := c.chunks[piece.index]
+			importPath := c.pathBetweenChunks(chunkFinalRelDir, chunk.finalRelPath)
+			count += len(importPath)
+		}
+	}
+
+	return count
+}
+
+func (c *linkerContext) pathBetweenChunks(fromRelDir string, toRelPath string) string {
+	// Join with the public path if it has been configured
+	if c.options.PublicPath != "" {
+		return joinWithPublicPath(c.options.PublicPath, toRelPath)
+	}
+
+	// Otherwise, return a relative path
+	relPath, ok := c.fs.Rel(fromRelDir, toRelPath)
+	if !ok {
+		c.log.AddError(nil, logger.Range{},
+			fmt.Sprintf("Cannot traverse from directory %q to chunk %q", fromRelDir, toRelPath))
+		return ""
+	}
+
+	// Make sure to always use forward slashes, even on Windows
+	relPath = strings.ReplaceAll(relPath, "\\", "/")
+
+	// Make sure the relative path doesn't start with a name, since that could
+	// be interpreted as a package path instead of a relative path
+	if !strings.HasPrefix(relPath, "./") && !strings.HasPrefix(relPath, "../") {
+		relPath = "./" + relPath
+	}
+
+	return relPath
+}
+
+func (c *linkerContext) computeCrossChunkDependencies() {
+	c.timer.Begin("Compute cross-chunk dependencies")
+	defer c.timer.End("Compute cross-chunk dependencies")
+
+	if !c.options.CodeSplitting {
+		// No need to compute cross-chunk dependencies if there can't be any
+		return
+	}
+
+	type chunkMeta struct {
+		imports        map[ast.Ref]bool
+		exports        map[ast.Ref]bool
+		dynamicImports map[int]bool
+	}
+
+	chunkMetas := make([]chunkMeta, len(c.chunks))
+
+	// For each chunk, see what symbols it uses from other chunks. Do this in
+	// parallel because it's the most expensive part of this function.
+	waitGroup := sync.WaitGroup{}
+	waitGroup.Add(len(c.chunks))
+	for chunkIndex, chunk := range c.chunks {
+		go func(chunkIndex int, chunk chunkInfo) {
+			chunkMeta := &chunkMetas[chunkIndex]
+			imports := make(map[ast.Ref]bool)
+			chunkMeta.imports = imports
+			chunkMeta.exports = make(map[ast.Ref]bool)
+
+			// Go over each file in this chunk
+			for sourceIndex := range chunk.filesWithPartsInChunk {
+				// Go over each part in this file that's marked for inclusion in this chunk
+				switch repr := c.graph.Files[sourceIndex].InputFile.Repr.(type) {
+				case *graph.JSRepr:
+					for partIndex, partMeta := range repr.AST.Parts {
+						if !partMeta.IsLive {
+							continue
+						}
+						part := &repr.AST.Parts[partIndex]
+
+						// Rewrite external dynamic imports to point to the chunk for that entry point
+						for _, importRecordIndex := range part.ImportRecordIndices {
+							record := &repr.AST.ImportRecords[importRecordIndex]
+							if record.SourceIndex.IsValid() && c.isExternalDynamicImport(record, sourceIndex) {
+								otherChunkIndex := c.graph.Files[record.SourceIndex.GetIndex()].EntryPointChunkIndex
+								record.Path.Text = c.chunks[otherChunkIndex].uniqueKey
+								record.SourceIndex = ast.Index32{}
+								record.Flags |= ast.ShouldNotBeExternalInMetafile | ast.ContainsUniqueKey
+
+								// Track this cross-chunk dynamic import so we make sure to
+								// include its hash when we're calculating the hashes of all
+								// dependencies of this chunk.
+								if int(otherChunkIndex) != chunkIndex {
+									if chunkMeta.dynamicImports == nil {
+										chunkMeta.dynamicImports = make(map[int]bool)
+									}
+									chunkMeta.dynamicImports[int(otherChunkIndex)] = true
+								}
+							}
+						}
+
+						// Remember what chunk each top-level symbol is declared in. Symbols
+						// with multiple declarations such as repeated "var" statements with
+						// the same name should already be marked as all being in a single
+						// chunk. In that case this will overwrite the same value below which
+						// is fine.
+						for _, declared := range part.DeclaredSymbols {
+							if declared.IsTopLevel {
+								c.graph.Symbols.Get(declared.Ref).ChunkIndex = ast.MakeIndex32(uint32(chunkIndex))
+							}
+						}
+
+						// Record each symbol used in this part. This will later be matched up
+						// with our map of which chunk a given symbol is declared in to
+						// determine if the symbol needs to be imported from another chunk.
+						for ref := range part.SymbolUses {
+							symbol := c.graph.Symbols.Get(ref)
+
+							// Ignore unbound symbols, which don't have declarations
+							if symbol.Kind == ast.SymbolUnbound {
+								continue
+							}
+
+							// Ignore symbols that are going to be replaced by undefined
+							if symbol.ImportItemStatus == ast.ImportItemMissing {
+								continue
+							}
+
+							// If this is imported from another file, follow the import
+							// reference and reference the symbol in that file instead
+							if importData, ok := repr.Meta.ImportsToBind[ref]; ok {
+								ref = importData.Ref
+								symbol = c.graph.Symbols.Get(ref)
+							} else if repr.Meta.Wrap == graph.WrapCJS && ref != repr.AST.WrapperRef {
+								// The only internal symbol that wrapped CommonJS files export
+								// is the wrapper itself.
+								continue
+							}
+
+							// If this is an ES6 import from a CommonJS file, it will become a
+							// property access off the namespace symbol instead of a bare
+							// identifier. In that case we want to pull in the namespace symbol
+							// instead. The namespace symbol stores the result of "require()".
+							if symbol.NamespaceAlias != nil {
+								ref = symbol.NamespaceAlias.NamespaceRef
+							}
+
+							// We must record this relationship even for symbols that are not
+							// imports. Due to code splitting, the definition of a symbol may
+							// be moved to a separate chunk than the use of a symbol even if
+							// the definition and use of that symbol are originally from the
+							// same source file.
+							imports[ref] = true
+						}
+					}
+				}
+			}
+
+			// Include the exports if this is an entry point chunk
+			if chunk.isEntryPoint {
+				if repr, ok := c.graph.Files[chunk.sourceIndex].InputFile.Repr.(*graph.JSRepr); ok {
+					if repr.Meta.Wrap != graph.WrapCJS {
+						for _, alias := range repr.Meta.SortedAndFilteredExportAliases {
+							export := repr.Meta.ResolvedExports[alias]
+							targetRef := export.Ref
+
+							// If this is an import, then target what the import points to
+							if importData, ok := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).Meta.ImportsToBind[targetRef]; ok {
+								targetRef = importData.Ref
+							}
+
+							// If this is an ES6 import from a CommonJS file, it will become a
+							// property access off the namespace symbol instead of a bare
+							// identifier. In that case we want to pull in the namespace symbol
+							// instead. The namespace symbol stores the result of "require()".
+							if symbol := c.graph.Symbols.Get(targetRef); symbol.NamespaceAlias != nil {
+								targetRef = symbol.NamespaceAlias.NamespaceRef
+							}
+
+							imports[targetRef] = true
+						}
+					}
+
+					// Ensure "exports" is included if the current output format needs it
+					if repr.Meta.ForceIncludeExportsForEntryPoint {
+						imports[repr.AST.ExportsRef] = true
+					}
+
+					// Include the wrapper if present
+					if repr.Meta.Wrap != graph.WrapNone {
+						imports[repr.AST.WrapperRef] = true
+					}
+				}
+			}
+
+			waitGroup.Done()
+		}(chunkIndex, chunk)
+	}
+	waitGroup.Wait()
+
+	// Mark imported symbols as exported in the chunk from which they are declared
+	for chunkIndex := range c.chunks {
+		chunk := &c.chunks[chunkIndex]
+		chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS)
+		if !ok {
+			continue
+		}
+		chunkMeta := chunkMetas[chunkIndex]
+
+		// Find all uses in this chunk of symbols from other chunks
+		chunkRepr.importsFromOtherChunks = make(map[uint32]crossChunkImportItemArray)
+		for importRef := range chunkMeta.imports {
+			// Ignore uses that aren't top-level symbols
+			if otherChunkIndex := c.graph.Symbols.Get(importRef).ChunkIndex; otherChunkIndex.IsValid() {
+				if otherChunkIndex := otherChunkIndex.GetIndex(); otherChunkIndex != uint32(chunkIndex) {
+					chunkRepr.importsFromOtherChunks[otherChunkIndex] =
+						append(chunkRepr.importsFromOtherChunks[otherChunkIndex], crossChunkImportItem{ref: importRef})
+					chunkMetas[otherChunkIndex].exports[importRef] = true
+				}
+			}
+		}
+
+		// If this is an entry point, make sure we import all chunks belonging to
+		// this entry point, even if there are no imports. We need to make sure
+		// these chunks are evaluated for their side effects too.
+		if chunk.isEntryPoint {
+			for otherChunkIndex, otherChunk := range c.chunks {
+				if _, ok := otherChunk.chunkRepr.(*chunkReprJS); ok && chunkIndex != otherChunkIndex && otherChunk.entryBits.HasBit(chunk.entryPointBit) {
+					imports := chunkRepr.importsFromOtherChunks[uint32(otherChunkIndex)]
+					chunkRepr.importsFromOtherChunks[uint32(otherChunkIndex)] = imports
+				}
+			}
+		}
+
+		// Make sure we also track dynamic cross-chunk imports. These need to be
+		// tracked so we count them as dependencies of this chunk for the purpose
+		// of hash calculation.
+		if chunkMeta.dynamicImports != nil {
+			sortedDynamicImports := make([]int, 0, len(chunkMeta.dynamicImports))
+			for chunkIndex := range chunkMeta.dynamicImports {
+				sortedDynamicImports = append(sortedDynamicImports, chunkIndex)
+			}
+			sort.Ints(sortedDynamicImports)
+			for _, chunkIndex := range sortedDynamicImports {
+				chunk.crossChunkImports = append(chunk.crossChunkImports, chunkImport{
+					importKind: ast.ImportDynamic,
+					chunkIndex: uint32(chunkIndex),
+				})
+			}
+		}
+	}
+
+	// Generate cross-chunk exports. These must be computed before cross-chunk
+	// imports because of export alias renaming, which must consider all export
+	// aliases simultaneously to avoid collisions.
+	for chunkIndex := range c.chunks {
+		chunk := &c.chunks[chunkIndex]
+		chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS)
+		if !ok {
+			continue
+		}
+
+		chunkRepr.exportsToOtherChunks = make(map[ast.Ref]string)
+		switch c.options.OutputFormat {
+		case config.FormatESModule:
+			r := renamer.ExportRenamer{}
+			var items []js_ast.ClauseItem
+			for _, export := range c.sortedCrossChunkExportItems(chunkMetas[chunkIndex].exports) {
+				var alias string
+				if c.options.MinifyIdentifiers {
+					alias = r.NextMinifiedName()
+				} else {
+					alias = r.NextRenamedName(c.graph.Symbols.Get(export.Ref).OriginalName)
+				}
+				items = append(items, js_ast.ClauseItem{Name: ast.LocRef{Ref: export.Ref}, Alias: alias})
+				chunkRepr.exportsToOtherChunks[export.Ref] = alias
+			}
+			if len(items) > 0 {
+				chunkRepr.crossChunkSuffixStmts = []js_ast.Stmt{{Data: &js_ast.SExportClause{
+					Items: items,
+				}}}
+			}
+
+		default:
+			panic("Internal error")
+		}
+	}
+
+	// Generate cross-chunk imports. These must be computed after cross-chunk
+	// exports because the export aliases must already be finalized so they can
+	// be embedded in the generated import statements.
+	for chunkIndex := range c.chunks {
+		chunk := &c.chunks[chunkIndex]
+		chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS)
+		if !ok {
+			continue
+		}
+
+		var crossChunkPrefixStmts []js_ast.Stmt
+
+		for _, crossChunkImport := range c.sortedCrossChunkImports(chunkRepr.importsFromOtherChunks) {
+			switch c.options.OutputFormat {
+			case config.FormatESModule:
+				var items []js_ast.ClauseItem
+				for _, item := range crossChunkImport.sortedImportItems {
+					items = append(items, js_ast.ClauseItem{Name: ast.LocRef{Ref: item.ref}, Alias: item.exportAlias})
+				}
+				importRecordIndex := uint32(len(chunk.crossChunkImports))
+				chunk.crossChunkImports = append(chunk.crossChunkImports, chunkImport{
+					importKind: ast.ImportStmt,
+					chunkIndex: crossChunkImport.chunkIndex,
+				})
+				if len(items) > 0 {
+					// "import {a, b} from './chunk.js'"
+					crossChunkPrefixStmts = append(crossChunkPrefixStmts, js_ast.Stmt{Data: &js_ast.SImport{
+						Items:             &items,
+						ImportRecordIndex: importRecordIndex,
+					}})
+				} else {
+					// "import './chunk.js'"
+					crossChunkPrefixStmts = append(crossChunkPrefixStmts, js_ast.Stmt{Data: &js_ast.SImport{
+						ImportRecordIndex: importRecordIndex,
+					}})
+				}
+
+			default:
+				panic("Internal error")
+			}
+		}
+
+		chunkRepr.crossChunkPrefixStmts = crossChunkPrefixStmts
+	}
+}
+
+type crossChunkImport struct {
+	sortedImportItems crossChunkImportItemArray
+	chunkIndex        uint32
+}
+
+// This type is just so we can use Go's native sort function
+type crossChunkImportArray []crossChunkImport
+
+func (a crossChunkImportArray) Len() int          { return len(a) }
+func (a crossChunkImportArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a crossChunkImportArray) Less(i int, j int) bool {
+	return a[i].chunkIndex < a[j].chunkIndex
+}
+
+// Sort cross-chunk imports by chunk name for determinism
+func (c *linkerContext) sortedCrossChunkImports(importsFromOtherChunks map[uint32]crossChunkImportItemArray) crossChunkImportArray {
+	result := make(crossChunkImportArray, 0, len(importsFromOtherChunks))
+
+	for otherChunkIndex, importItems := range importsFromOtherChunks {
+		// Sort imports from a single chunk by alias for determinism
+		otherChunk := &c.chunks[otherChunkIndex]
+		exportsToOtherChunks := otherChunk.chunkRepr.(*chunkReprJS).exportsToOtherChunks
+		for i, item := range importItems {
+			importItems[i].exportAlias = exportsToOtherChunks[item.ref]
+		}
+		sort.Sort(importItems)
+		result = append(result, crossChunkImport{
+			chunkIndex:        otherChunkIndex,
+			sortedImportItems: importItems,
+		})
+	}
+
+	sort.Sort(result)
+	return result
+}
+
+type crossChunkImportItem struct {
+	exportAlias string
+	ref         ast.Ref
+}
+
+// This type is just so we can use Go's native sort function
+type crossChunkImportItemArray []crossChunkImportItem
+
+func (a crossChunkImportItemArray) Len() int          { return len(a) }
+func (a crossChunkImportItemArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a crossChunkImportItemArray) Less(i int, j int) bool {
+	return a[i].exportAlias < a[j].exportAlias
+}
+
+// The sort order here is arbitrary but needs to be consistent between builds.
+// The InnerIndex should be stable because the parser for a single file is
+// single-threaded and deterministically assigns out InnerIndex values
+// sequentially. But the SourceIndex should be unstable because the main thread
+// assigns out source index values sequentially to newly-discovered dependencies
+// in a multi-threaded producer/consumer relationship. So instead we use the
+// index of the source in the DFS order over all entry points for stability.
+type stableRef struct {
+	StableSourceIndex uint32
+	Ref               ast.Ref
+}
+
+// This type is just so we can use Go's native sort function
+type stableRefArray []stableRef
+
+func (a stableRefArray) Len() int          { return len(a) }
+func (a stableRefArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+func (a stableRefArray) Less(i int, j int) bool {
+	ai, aj := a[i], a[j]
+	return ai.StableSourceIndex < aj.StableSourceIndex ||
+		(ai.StableSourceIndex == aj.StableSourceIndex && ai.Ref.InnerIndex < aj.Ref.InnerIndex)
+}
+
+// Sort cross-chunk exports by chunk name for determinism
+func (c *linkerContext) sortedCrossChunkExportItems(exportRefs map[ast.Ref]bool) stableRefArray {
+	result := make(stableRefArray, 0, len(exportRefs))
+	for ref := range exportRefs {
+		result = append(result, stableRef{
+			StableSourceIndex: c.graph.StableSourceIndices[ref.SourceIndex],
+			Ref:               ref,
+		})
+	}
+	sort.Sort(result)
+	return result
+}
+
+func (c *linkerContext) scanImportsAndExports() {
+	c.timer.Begin("Scan imports and exports")
+	defer c.timer.End("Scan imports and exports")
+
+	// Step 1: Figure out what modules must be CommonJS
+	c.timer.Begin("Step 1")
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		file := &c.graph.Files[sourceIndex]
+		additionalFiles := file.InputFile.AdditionalFiles
+
+		switch repr := file.InputFile.Repr.(type) {
+		case *graph.CSSRepr:
+			// Inline URLs for non-CSS files into the CSS file
+			for importRecordIndex := range repr.AST.ImportRecords {
+				if record := &repr.AST.ImportRecords[importRecordIndex]; record.SourceIndex.IsValid() {
+					otherFile := &c.graph.Files[record.SourceIndex.GetIndex()]
+					if otherRepr, ok := otherFile.InputFile.Repr.(*graph.JSRepr); ok {
+						record.Path.Text = otherRepr.AST.URLForCSS
+						record.Path.Namespace = ""
+						record.SourceIndex = ast.Index32{}
+						if otherFile.InputFile.Loader == config.LoaderEmpty {
+							record.Flags |= ast.WasLoadedWithEmptyLoader
+						} else {
+							record.Flags |= ast.ShouldNotBeExternalInMetafile
+						}
+						if strings.Contains(otherRepr.AST.URLForCSS, c.uniqueKeyPrefix) {
+							record.Flags |= ast.ContainsUniqueKey
+						}
+
+						// Copy the additional files to the output directory
+						additionalFiles = append(additionalFiles, otherFile.InputFile.AdditionalFiles...)
+					}
+				} else if record.CopySourceIndex.IsValid() {
+					otherFile := &c.graph.Files[record.CopySourceIndex.GetIndex()]
+					if otherRepr, ok := otherFile.InputFile.Repr.(*graph.CopyRepr); ok {
+						record.Path.Text = otherRepr.URLForCode
+						record.Path.Namespace = ""
+						record.CopySourceIndex = ast.Index32{}
+						record.Flags |= ast.ShouldNotBeExternalInMetafile | ast.ContainsUniqueKey
+
+						// Copy the additional files to the output directory
+						additionalFiles = append(additionalFiles, otherFile.InputFile.AdditionalFiles...)
+					}
+				}
+			}
+
+			// Validate cross-file "composes: ... from" named imports
+			for _, composes := range repr.AST.Composes {
+				for _, name := range composes.ImportedNames {
+					if record := repr.AST.ImportRecords[name.ImportRecordIndex]; record.SourceIndex.IsValid() {
+						otherFile := &c.graph.Files[record.SourceIndex.GetIndex()]
+						if otherRepr, ok := otherFile.InputFile.Repr.(*graph.CSSRepr); ok {
+							if _, ok := otherRepr.AST.LocalScope[name.Alias]; !ok {
+								if global, ok := otherRepr.AST.GlobalScope[name.Alias]; ok {
+									var hint string
+									if otherFile.InputFile.Loader == config.LoaderCSS {
+										hint = fmt.Sprintf("Use the \"local-css\" loader for %q to enable local names.", otherFile.InputFile.Source.PrettyPath)
+									} else {
+										hint = fmt.Sprintf("Use the \":local\" selector to change %q into a local name.", name.Alias)
+									}
+									c.log.AddErrorWithNotes(file.LineColumnTracker(),
+										css_lexer.RangeOfIdentifier(file.InputFile.Source, name.AliasLoc),
+										fmt.Sprintf("Cannot use global name %q with \"composes\"", name.Alias),
+										[]logger.MsgData{
+											otherFile.LineColumnTracker().MsgData(
+												css_lexer.RangeOfIdentifier(otherFile.InputFile.Source, global.Loc),
+												fmt.Sprintf("The global name %q is defined here:", name.Alias),
+											),
+											{Text: hint},
+										})
+								} else {
+									c.log.AddError(file.LineColumnTracker(),
+										css_lexer.RangeOfIdentifier(file.InputFile.Source, name.AliasLoc),
+										fmt.Sprintf("The name %q never appears in %q",
+											name.Alias, otherFile.InputFile.Source.PrettyPath))
+								}
+							}
+						}
+					}
+				}
+			}
+
+			c.validateComposesFromProperties(file, repr)
+
+		case *graph.JSRepr:
+			for importRecordIndex := range repr.AST.ImportRecords {
+				record := &repr.AST.ImportRecords[importRecordIndex]
+				if !record.SourceIndex.IsValid() {
+					if record.CopySourceIndex.IsValid() {
+						otherFile := &c.graph.Files[record.CopySourceIndex.GetIndex()]
+						if otherRepr, ok := otherFile.InputFile.Repr.(*graph.CopyRepr); ok {
+							record.Path.Text = otherRepr.URLForCode
+							record.Path.Namespace = ""
+							record.CopySourceIndex = ast.Index32{}
+							record.Flags |= ast.ShouldNotBeExternalInMetafile | ast.ContainsUniqueKey
+
+							// Copy the additional files to the output directory
+							additionalFiles = append(additionalFiles, otherFile.InputFile.AdditionalFiles...)
+						}
+					}
+					continue
+				}
+
+				otherFile := &c.graph.Files[record.SourceIndex.GetIndex()]
+				otherRepr := otherFile.InputFile.Repr.(*graph.JSRepr)
+
+				switch record.Kind {
+				case ast.ImportStmt:
+					// Importing using ES6 syntax from a file without any ES6 syntax
+					// causes that module to be considered CommonJS-style, even if it
+					// doesn't have any CommonJS exports.
+					//
+					// That means the ES6 imports will become undefined instead of
+					// causing errors. This is for compatibility with older CommonJS-
+					// style bundlers.
+					//
+					// We emit a warning in this case but try to avoid turning the module
+					// into a CommonJS module if possible. This is possible with named
+					// imports (the module stays an ECMAScript module but the imports are
+					// rewritten with undefined) but is not possible with star or default
+					// imports:
+					//
+					//   import * as ns from './empty-file'
+					//   import defVal from './empty-file'
+					//   console.log(ns, defVal)
+					//
+					// In that case the module *is* considered a CommonJS module because
+					// the namespace object must be created.
+					if (record.Flags.Has(ast.ContainsImportStar) || record.Flags.Has(ast.ContainsDefaultAlias)) &&
+						otherRepr.AST.ExportsKind == js_ast.ExportsNone && !otherRepr.AST.HasLazyExport {
+						otherRepr.Meta.Wrap = graph.WrapCJS
+						otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS
+					}
+
+				case ast.ImportRequire:
+					// Files that are imported with require() must be wrapped so that
+					// they can be lazily-evaluated
+					if otherRepr.AST.ExportsKind == js_ast.ExportsESM {
+						otherRepr.Meta.Wrap = graph.WrapESM
+					} else {
+						otherRepr.Meta.Wrap = graph.WrapCJS
+						otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS
+					}
+
+				case ast.ImportDynamic:
+					if !c.options.CodeSplitting {
+						// If we're not splitting, then import() is just a require() that
+						// returns a promise, so the imported file must also be wrapped
+						if otherRepr.AST.ExportsKind == js_ast.ExportsESM {
+							otherRepr.Meta.Wrap = graph.WrapESM
+						} else {
+							otherRepr.Meta.Wrap = graph.WrapCJS
+							otherRepr.AST.ExportsKind = js_ast.ExportsCommonJS
+						}
+					}
+				}
+			}
+
+			// If the output format doesn't have an implicit CommonJS wrapper, any file
+			// that uses CommonJS features will need to be wrapped, even though the
+			// resulting wrapper won't be invoked by other files. An exception is made
+			// for entry point files in CommonJS format (or when in pass-through mode).
+			if repr.AST.ExportsKind == js_ast.ExportsCommonJS && (!file.IsEntryPoint() ||
+				c.options.OutputFormat == config.FormatIIFE || c.options.OutputFormat == config.FormatESModule) {
+				repr.Meta.Wrap = graph.WrapCJS
+			}
+		}
+
+		file.InputFile.AdditionalFiles = additionalFiles
+	}
+	c.timer.End("Step 1")
+
+	// Step 2: Propagate dynamic export status for export star statements that
+	// are re-exports from a module whose exports are not statically analyzable.
+	// In this case the export star must be evaluated at run time instead of at
+	// bundle time.
+	c.timer.Begin("Step 2")
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+		if !ok {
+			continue
+		}
+
+		if repr.Meta.Wrap != graph.WrapNone {
+			c.recursivelyWrapDependencies(sourceIndex)
+		}
+
+		if len(repr.AST.ExportStarImportRecords) > 0 {
+			visited := make(map[uint32]bool)
+			c.hasDynamicExportsDueToExportStar(sourceIndex, visited)
+		}
+
+		// Even if the output file is CommonJS-like, we may still need to wrap
+		// CommonJS-style files. Any file that imports a CommonJS-style file will
+		// cause that file to need to be wrapped. This is because the import
+		// method, whatever it is, will need to invoke the wrapper. Note that
+		// this can include entry points (e.g. an entry point that imports a file
+		// that imports that entry point).
+		for _, record := range repr.AST.ImportRecords {
+			if record.SourceIndex.IsValid() {
+				otherRepr := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr)
+				if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS {
+					c.recursivelyWrapDependencies(record.SourceIndex.GetIndex())
+				}
+			}
+		}
+	}
+	c.timer.End("Step 2")
+
+	// Step 3: Resolve "export * from" statements. This must be done after we
+	// discover all modules that can have dynamic exports because export stars
+	// are ignored for those modules.
+	c.timer.Begin("Step 3")
+	exportStarStack := make([]uint32, 0, 32)
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+		if !ok {
+			continue
+		}
+
+		// Expression-style loaders defer code generation until linking. Code
+		// generation is done here because at this point we know that the
+		// "ExportsKind" field has its final value and will not be changed.
+		if repr.AST.HasLazyExport {
+			c.generateCodeForLazyExport(sourceIndex)
+		}
+
+		// Propagate exports for export star statements
+		if len(repr.AST.ExportStarImportRecords) > 0 {
+			c.addExportsForExportStar(repr.Meta.ResolvedExports, sourceIndex, exportStarStack)
+		}
+
+		// Also add a special export so import stars can bind to it. This must be
+		// done in this step because it must come after CommonJS module discovery
+		// but before matching imports with exports.
+		repr.Meta.ResolvedExportStar = &graph.ExportData{
+			Ref:         repr.AST.ExportsRef,
+			SourceIndex: sourceIndex,
+		}
+	}
+	c.timer.End("Step 3")
+
+	// Step 4: Match imports with exports. This must be done after we process all
+	// export stars because imports can bind to export star re-exports.
+	c.timer.Begin("Step 4")
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		file := &c.graph.Files[sourceIndex]
+		repr, ok := file.InputFile.Repr.(*graph.JSRepr)
+		if !ok {
+			continue
+		}
+
+		if len(repr.AST.NamedImports) > 0 {
+			c.matchImportsWithExportsForFile(uint32(sourceIndex))
+		}
+
+		// If we're exporting as CommonJS and this file was originally CommonJS,
+		// then we'll be using the actual CommonJS "exports" and/or "module"
+		// symbols. In that case make sure to mark them as such so they don't
+		// get minified.
+		if file.IsEntryPoint() && repr.AST.ExportsKind == js_ast.ExportsCommonJS && repr.Meta.Wrap == graph.WrapNone &&
+			(c.options.OutputFormat == config.FormatPreserve || c.options.OutputFormat == config.FormatCommonJS) {
+			exportsRef := ast.FollowSymbols(c.graph.Symbols, repr.AST.ExportsRef)
+			moduleRef := ast.FollowSymbols(c.graph.Symbols, repr.AST.ModuleRef)
+			c.graph.Symbols.Get(exportsRef).Kind = ast.SymbolUnbound
+			c.graph.Symbols.Get(moduleRef).Kind = ast.SymbolUnbound
+		} else if repr.Meta.ForceIncludeExportsForEntryPoint || repr.AST.ExportsKind != js_ast.ExportsCommonJS {
+			repr.Meta.NeedsExportsVariable = true
+		}
+
+		// Create the wrapper part for wrapped files. This is needed by a later step.
+		c.createWrapperForFile(uint32(sourceIndex))
+	}
+	c.timer.End("Step 4")
+
+	// Step 5: Create namespace exports for every file. This is always necessary
+	// for CommonJS files, and is also necessary for other files if they are
+	// imported using an import star statement.
+	c.timer.Begin("Step 5")
+	waitGroup := sync.WaitGroup{}
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+		if !ok {
+			continue
+		}
+
+		// This is the slowest step and is also parallelizable, so do this in parallel.
+		waitGroup.Add(1)
+		go func(sourceIndex uint32, repr *graph.JSRepr) {
+			// Now that all exports have been resolved, sort and filter them to create
+			// something we can iterate over later.
+			aliases := make([]string, 0, len(repr.Meta.ResolvedExports))
+		nextAlias:
+			for alias, export := range repr.Meta.ResolvedExports {
+				otherFile := &c.graph.Files[export.SourceIndex].InputFile
+				otherRepr := otherFile.Repr.(*graph.JSRepr)
+
+				// Re-exporting multiple symbols with the same name causes an ambiguous
+				// export. These names cannot be used and should not end up in generated code.
+				if len(export.PotentiallyAmbiguousExportStarRefs) > 0 {
+					mainRef := export.Ref
+					mainLoc := export.NameLoc
+					if imported, ok := otherRepr.Meta.ImportsToBind[export.Ref]; ok {
+						mainRef = imported.Ref
+						mainLoc = imported.NameLoc
+					}
+
+					for _, ambiguousExport := range export.PotentiallyAmbiguousExportStarRefs {
+						ambiguousFile := &c.graph.Files[ambiguousExport.SourceIndex].InputFile
+						ambiguousRepr := ambiguousFile.Repr.(*graph.JSRepr)
+						ambiguousRef := ambiguousExport.Ref
+						ambiguousLoc := ambiguousExport.NameLoc
+						if imported, ok := ambiguousRepr.Meta.ImportsToBind[ambiguousExport.Ref]; ok {
+							ambiguousRef = imported.Ref
+							ambiguousLoc = imported.NameLoc
+						}
+
+						if mainRef != ambiguousRef {
+							file := &c.graph.Files[sourceIndex].InputFile
+							otherTracker := logger.MakeLineColumnTracker(&otherFile.Source)
+							ambiguousTracker := logger.MakeLineColumnTracker(&ambiguousFile.Source)
+							c.log.AddIDWithNotes(logger.MsgID_Bundler_AmbiguousReexport, logger.Debug, nil, logger.Range{},
+								fmt.Sprintf("Re-export of %q in %q is ambiguous and has been removed", alias, file.Source.PrettyPath),
+								[]logger.MsgData{
+									otherTracker.MsgData(js_lexer.RangeOfIdentifier(otherFile.Source, mainLoc),
+										fmt.Sprintf("One definition of %q comes from %q here:", alias, otherFile.Source.PrettyPath)),
+									ambiguousTracker.MsgData(js_lexer.RangeOfIdentifier(ambiguousFile.Source, ambiguousLoc),
+										fmt.Sprintf("Another definition of %q comes from %q here:", alias, ambiguousFile.Source.PrettyPath)),
+								},
+							)
+							continue nextAlias
+						}
+					}
+				}
+
+				// Ignore re-exported imports in TypeScript files that failed to be
+				// resolved. These are probably just type-only imports so the best thing to
+				// do is to silently omit them from the export list.
+				if otherRepr.Meta.IsProbablyTypeScriptType[export.Ref] {
+					continue
+				}
+
+				if c.options.OutputFormat == config.FormatESModule && c.options.UnsupportedJSFeatures.Has(compat.ArbitraryModuleNamespaceNames) && c.graph.Files[sourceIndex].IsEntryPoint() {
+					c.maybeForbidArbitraryModuleNamespaceIdentifier("export", export.SourceIndex, export.NameLoc, alias)
+				}
+
+				aliases = append(aliases, alias)
+			}
+			sort.Strings(aliases)
+			repr.Meta.SortedAndFilteredExportAliases = aliases
+
+			// Export creation uses "sortedAndFilteredExportAliases" so this must
+			// come second after we fill in that array
+			c.createExportsForFile(uint32(sourceIndex))
+
+			// Each part tracks the other parts it depends on within this file
+			localDependencies := make(map[uint32]uint32)
+			parts := repr.AST.Parts
+			namedImports := repr.AST.NamedImports
+			graph := c.graph
+			for partIndex := range parts {
+				part := &parts[partIndex]
+
+				// Now that all files have been parsed, determine which property
+				// accesses off of imported symbols are inlined enum values and
+				// which ones aren't
+				for ref, properties := range part.ImportSymbolPropertyUses {
+					use := part.SymbolUses[ref]
+
+					// Rare path: this import is a TypeScript enum
+					if importData, ok := repr.Meta.ImportsToBind[ref]; ok {
+						if symbol := graph.Symbols.Get(importData.Ref); symbol.Kind == ast.SymbolTSEnum {
+							if enum, ok := graph.TSEnums[importData.Ref]; ok {
+								foundNonInlinedEnum := false
+								for name, propertyUse := range properties {
+									if _, ok := enum[name]; !ok {
+										foundNonInlinedEnum = true
+										use.CountEstimate += propertyUse.CountEstimate
+									}
+								}
+								if foundNonInlinedEnum {
+									part.SymbolUses[ref] = use
+								}
+							}
+							continue
+						}
+					}
+
+					// Common path: this import isn't a TypeScript enum
+					for _, propertyUse := range properties {
+						use.CountEstimate += propertyUse.CountEstimate
+					}
+					part.SymbolUses[ref] = use
+				}
+
+				// Also determine which function calls will be inlined (and so should
+				// not count as uses), and which ones will not be (and so should count
+				// as uses)
+				for ref, callUse := range part.SymbolCallUses {
+					use := part.SymbolUses[ref]
+
+					// Find the symbol that was called
+					symbol := graph.Symbols.Get(ref)
+					if symbol.Kind == ast.SymbolImport {
+						if importData, ok := repr.Meta.ImportsToBind[ref]; ok {
+							symbol = graph.Symbols.Get(importData.Ref)
+						}
+					}
+					flags := symbol.Flags
+
+					// Rare path: this is a function that will be inlined
+					if (flags & (ast.IsEmptyFunction | ast.CouldPotentiallyBeMutated)) == ast.IsEmptyFunction {
+						// Every call will be inlined
+						continue
+					} else if (flags & (ast.IsIdentityFunction | ast.CouldPotentiallyBeMutated)) == ast.IsIdentityFunction {
+						// Every single-argument call will be inlined as long as it's not a spread
+						callUse.CallCountEstimate -= callUse.SingleArgNonSpreadCallCountEstimate
+						if callUse.CallCountEstimate == 0 {
+							continue
+						}
+					}
+
+					// Common path: this isn't a function that will be inlined
+					use.CountEstimate += callUse.CallCountEstimate
+					part.SymbolUses[ref] = use
+				}
+
+				// Now that we know this, we can determine cross-part dependencies
+				for ref := range part.SymbolUses {
+
+					// Rare path: this import is an inlined const value
+					if graph.ConstValues != nil {
+						if importData, ok := repr.Meta.ImportsToBind[ref]; ok {
+							if _, isConstValue := graph.ConstValues[importData.Ref]; isConstValue {
+								delete(part.SymbolUses, importData.Ref)
+								continue
+							}
+						}
+					}
+
+					for _, otherPartIndex := range repr.TopLevelSymbolToParts(ref) {
+						if oldPartIndex, ok := localDependencies[otherPartIndex]; !ok || oldPartIndex != uint32(partIndex) {
+							localDependencies[otherPartIndex] = uint32(partIndex)
+							part.Dependencies = append(part.Dependencies, js_ast.Dependency{
+								SourceIndex: sourceIndex,
+								PartIndex:   otherPartIndex,
+							})
+						}
+					}
+
+					// Also map from imports to parts that use them
+					if namedImport, ok := namedImports[ref]; ok {
+						namedImport.LocalPartsWithUses = append(namedImport.LocalPartsWithUses, uint32(partIndex))
+						namedImports[ref] = namedImport
+					}
+				}
+			}
+
+			waitGroup.Done()
+		}(sourceIndex, repr)
+	}
+	waitGroup.Wait()
+	c.timer.End("Step 5")
+
+	// Step 6: Bind imports to exports. This adds non-local dependencies on the
+	// parts that declare the export to all parts that use the import. Also
+	// generate wrapper parts for wrapped files.
+	c.timer.Begin("Step 6")
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		file := &c.graph.Files[sourceIndex]
+		repr, ok := file.InputFile.Repr.(*graph.JSRepr)
+		if !ok {
+			continue
+		}
+
+		// Pre-generate symbols for re-exports CommonJS symbols in case they
+		// are necessary later. This is done now because the symbols map cannot be
+		// mutated later due to parallelism.
+		if file.IsEntryPoint() && c.options.OutputFormat == config.FormatESModule {
+			copies := make([]ast.Ref, len(repr.Meta.SortedAndFilteredExportAliases))
+			for i, alias := range repr.Meta.SortedAndFilteredExportAliases {
+				copies[i] = c.graph.GenerateNewSymbol(sourceIndex, ast.SymbolOther, "export_"+alias)
+			}
+			repr.Meta.CJSExportCopies = copies
+		}
+
+		// Use "init_*" for ESM wrappers instead of "require_*"
+		if repr.Meta.Wrap == graph.WrapESM {
+			c.graph.Symbols.Get(repr.AST.WrapperRef).OriginalName = "init_" + file.InputFile.Source.IdentifierName
+		}
+
+		// If this isn't CommonJS, then rename the unused "exports" and "module"
+		// variables to avoid them causing the identically-named variables in
+		// actual CommonJS files from being renamed. This is purely about
+		// aesthetics and is not about correctness. This is done here because by
+		// this point, we know the CommonJS status will not change further.
+		if repr.Meta.Wrap != graph.WrapCJS && repr.AST.ExportsKind != js_ast.ExportsCommonJS {
+			name := file.InputFile.Source.IdentifierName
+			c.graph.Symbols.Get(repr.AST.ExportsRef).OriginalName = name + "_exports"
+			c.graph.Symbols.Get(repr.AST.ModuleRef).OriginalName = name + "_module"
+		}
+
+		// Include the "__export" symbol from the runtime if it was used in the
+		// previous step. The previous step can't do this because it's running in
+		// parallel and can't safely mutate the "importsToBind" map of another file.
+		if repr.Meta.NeedsExportSymbolFromRuntime {
+			runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr)
+			exportRef := runtimeRepr.AST.ModuleScope.Members["__export"].Ref
+			c.graph.GenerateSymbolImportAndUse(sourceIndex, js_ast.NSExportPartIndex, exportRef, 1, runtime.SourceIndex)
+		}
+
+		for importRef, importData := range repr.Meta.ImportsToBind {
+			resolvedRepr := c.graph.Files[importData.SourceIndex].InputFile.Repr.(*graph.JSRepr)
+			partsDeclaringSymbol := resolvedRepr.TopLevelSymbolToParts(importData.Ref)
+
+			for _, partIndex := range repr.AST.NamedImports[importRef].LocalPartsWithUses {
+				part := &repr.AST.Parts[partIndex]
+
+				// Depend on the file containing the imported symbol
+				for _, resolvedPartIndex := range partsDeclaringSymbol {
+					part.Dependencies = append(part.Dependencies, js_ast.Dependency{
+						SourceIndex: importData.SourceIndex,
+						PartIndex:   resolvedPartIndex,
+					})
+				}
+
+				// Also depend on any files that re-exported this symbol in between the
+				// file containing the import and the file containing the imported symbol
+				part.Dependencies = append(part.Dependencies, importData.ReExports...)
+			}
+
+			// Merge these symbols so they will share the same name
+			ast.MergeSymbols(c.graph.Symbols, importRef, importData.Ref)
+		}
+
+		// If this is an entry point, depend on all exports so they are included
+		if file.IsEntryPoint() {
+			var dependencies []js_ast.Dependency
+
+			for _, alias := range repr.Meta.SortedAndFilteredExportAliases {
+				export := repr.Meta.ResolvedExports[alias]
+				targetSourceIndex := export.SourceIndex
+				targetRef := export.Ref
+
+				// If this is an import, then target what the import points to
+				targetRepr := c.graph.Files[targetSourceIndex].InputFile.Repr.(*graph.JSRepr)
+				if importData, ok := targetRepr.Meta.ImportsToBind[targetRef]; ok {
+					targetSourceIndex = importData.SourceIndex
+					targetRef = importData.Ref
+					targetRepr = c.graph.Files[targetSourceIndex].InputFile.Repr.(*graph.JSRepr)
+					dependencies = append(dependencies, importData.ReExports...)
+				}
+
+				// Pull in all declarations of this symbol
+				for _, partIndex := range targetRepr.TopLevelSymbolToParts(targetRef) {
+					dependencies = append(dependencies, js_ast.Dependency{
+						SourceIndex: targetSourceIndex,
+						PartIndex:   partIndex,
+					})
+				}
+			}
+
+			// Ensure "exports" is included if the current output format needs it
+			if repr.Meta.ForceIncludeExportsForEntryPoint {
+				dependencies = append(dependencies, js_ast.Dependency{
+					SourceIndex: sourceIndex,
+					PartIndex:   js_ast.NSExportPartIndex,
+				})
+			}
+
+			// Include the wrapper if present
+			if repr.Meta.Wrap != graph.WrapNone {
+				dependencies = append(dependencies, js_ast.Dependency{
+					SourceIndex: sourceIndex,
+					PartIndex:   repr.Meta.WrapperPartIndex.GetIndex(),
+				})
+			}
+
+			// Represent these constraints with a dummy part
+			entryPointPartIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{
+				Dependencies:         dependencies,
+				CanBeRemovedIfUnused: false,
+			})
+			repr.Meta.EntryPointPartIndex = ast.MakeIndex32(entryPointPartIndex)
+
+			// Pull in the "__toCommonJS" symbol if we need it due to being an entry point
+			if repr.Meta.ForceIncludeExportsForEntryPoint {
+				c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, entryPointPartIndex, "__toCommonJS", 1)
+			}
+		}
+
+		// Encode import-specific constraints in the dependency graph
+		for partIndex, part := range repr.AST.Parts {
+			toESMUses := uint32(0)
+			toCommonJSUses := uint32(0)
+			runtimeRequireUses := uint32(0)
+
+			// Imports of wrapped files must depend on the wrapper
+			for _, importRecordIndex := range part.ImportRecordIndices {
+				record := &repr.AST.ImportRecords[importRecordIndex]
+
+				// Don't follow external imports (this includes import() expressions)
+				if !record.SourceIndex.IsValid() || c.isExternalDynamicImport(record, sourceIndex) {
+					// This is an external import. Check if it will be a "require()" call.
+					if record.Kind == ast.ImportRequire || !c.options.OutputFormat.KeepESMImportExportSyntax() ||
+						(record.Kind == ast.ImportDynamic && c.options.UnsupportedJSFeatures.Has(compat.DynamicImport)) {
+						// We should use "__require" instead of "require" if we're not
+						// generating a CommonJS output file, since it won't exist otherwise
+						if config.ShouldCallRuntimeRequire(c.options.Mode, c.options.OutputFormat) {
+							record.Flags |= ast.CallRuntimeRequire
+							runtimeRequireUses++
+						}
+
+						// If this wasn't originally a "require()" call, then we may need
+						// to wrap this in a call to the "__toESM" wrapper to convert from
+						// CommonJS semantics to ESM semantics.
+						//
+						// Unfortunately this adds some additional code since the conversion
+						// is somewhat complex. As an optimization, we can avoid this if the
+						// following things are true:
+						//
+						// - The import is an ES module statement (e.g. not an "import()" expression)
+						// - The ES module namespace object must not be captured
+						// - The "default" and "__esModule" exports must not be accessed
+						//
+						if record.Kind != ast.ImportRequire &&
+							(record.Kind != ast.ImportStmt ||
+								record.Flags.Has(ast.ContainsImportStar) ||
+								record.Flags.Has(ast.ContainsDefaultAlias) ||
+								record.Flags.Has(ast.ContainsESModuleAlias)) {
+							record.Flags |= ast.WrapWithToESM
+							toESMUses++
+						}
+					}
+					continue
+				}
+
+				otherSourceIndex := record.SourceIndex.GetIndex()
+				otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr)
+
+				if otherRepr.Meta.Wrap != graph.WrapNone {
+					// Depend on the automatically-generated require wrapper symbol
+					wrapperRef := otherRepr.AST.WrapperRef
+					c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), wrapperRef, 1, otherSourceIndex)
+
+					// This is an ES6 import of a CommonJS module, so it needs the
+					// "__toESM" wrapper as long as it's not a bare "require()"
+					if record.Kind != ast.ImportRequire && otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS {
+						record.Flags |= ast.WrapWithToESM
+						toESMUses++
+					}
+
+					// If this is an ESM wrapper, also depend on the exports object
+					// since the final code will contain an inline reference to it.
+					// This must be done for "require()" and "import()" expressions
+					// but does not need to be done for "import" statements since
+					// those just cause us to reference the exports directly.
+					if otherRepr.Meta.Wrap == graph.WrapESM && record.Kind != ast.ImportStmt {
+						c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex)
+
+						// If this is a "require()" call, then we should add the
+						// "__esModule" marker to behave as if the module was converted
+						// from ESM to CommonJS. This is done via a wrapper instead of
+						// by modifying the exports object itself because the same ES
+						// module may be simultaneously imported and required, and the
+						// importing code should not see "__esModule" while the requiring
+						// code should see "__esModule". This is an extremely complex
+						// and subtle set of bundler interop issues. See for example
+						// https://github.com/evanw/esbuild/issues/1591.
+						if record.Kind == ast.ImportRequire {
+							record.Flags |= ast.WrapWithToCJS
+							toCommonJSUses++
+						}
+					}
+				} else if record.Kind == ast.ImportStmt && otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback {
+					// This is an import of a module that has a dynamic export fallback
+					// object. In that case we need to depend on that object in case
+					// something ends up needing to use it later. This could potentially
+					// be omitted in some cases with more advanced analysis if this
+					// dynamic export fallback object doesn't end up being needed.
+					c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex)
+				}
+			}
+
+			// If there's an ES6 import of a non-ES6 module, then we're going to need the
+			// "__toESM" symbol from the runtime to wrap the result of "require()"
+			c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__toESM", toESMUses)
+
+			// If there's a CommonJS require of an ES6 module, then we're going to need the
+			// "__toCommonJS" symbol from the runtime to wrap the exports object
+			c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__toCommonJS", toCommonJSUses)
+
+			// If there are unbundled calls to "require()" and we're not generating
+			// code for node, then substitute a "__require" wrapper for "require".
+			c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__require", runtimeRequireUses)
+
+			// If there's an ES6 export star statement of a non-ES6 module, then we're
+			// going to need the "__reExport" symbol from the runtime
+			reExportUses := uint32(0)
+			for _, importRecordIndex := range repr.AST.ExportStarImportRecords {
+				record := &repr.AST.ImportRecords[importRecordIndex]
+
+				// Is this export star evaluated at run time?
+				happensAtRunTime := !record.SourceIndex.IsValid() && (!file.IsEntryPoint() || !c.options.OutputFormat.KeepESMImportExportSyntax())
+				if record.SourceIndex.IsValid() {
+					otherSourceIndex := record.SourceIndex.GetIndex()
+					otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr)
+					if otherSourceIndex != sourceIndex && otherRepr.AST.ExportsKind.IsDynamic() {
+						happensAtRunTime = true
+					}
+					if otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback {
+						// This looks like "__reExport(exports_a, exports_b)". Make sure to
+						// pull in the "exports_b" symbol into this export star. This matters
+						// in code splitting situations where the "export_b" symbol might live
+						// in a different chunk than this export star.
+						c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), otherRepr.AST.ExportsRef, 1, otherSourceIndex)
+					}
+				}
+				if happensAtRunTime {
+					// Depend on this file's "exports" object for the first argument to "__reExport"
+					c.graph.GenerateSymbolImportAndUse(sourceIndex, uint32(partIndex), repr.AST.ExportsRef, 1, sourceIndex)
+					record.Flags |= ast.CallsRunTimeReExportFn
+					repr.AST.UsesExportsRef = true
+					reExportUses++
+				}
+			}
+			c.graph.GenerateRuntimeSymbolImportAndUse(sourceIndex, uint32(partIndex), "__reExport", reExportUses)
+		}
+	}
+	c.timer.End("Step 6")
+}
+
+func (c *linkerContext) validateComposesFromProperties(rootFile *graph.LinkerFile, rootRepr *graph.CSSRepr) {
+	for _, local := range rootRepr.AST.LocalSymbols {
+		type propertyInFile struct {
+			file *graph.LinkerFile
+			loc  logger.Loc
+		}
+
+		visited := make(map[ast.Ref]bool)
+		properties := make(map[string]propertyInFile)
+		var visit func(*graph.LinkerFile, *graph.CSSRepr, ast.Ref)
+
+		visit = func(file *graph.LinkerFile, repr *graph.CSSRepr, ref ast.Ref) {
+			if visited[ref] {
+				return
+			}
+			visited[ref] = true
+
+			composes, ok := repr.AST.Composes[ref]
+			if !ok {
+				return
+			}
+
+			for _, name := range composes.ImportedNames {
+				if record := repr.AST.ImportRecords[name.ImportRecordIndex]; record.SourceIndex.IsValid() {
+					otherFile := &c.graph.Files[record.SourceIndex.GetIndex()]
+					if otherRepr, ok := otherFile.InputFile.Repr.(*graph.CSSRepr); ok {
+						if otherName, ok := otherRepr.AST.LocalScope[name.Alias]; ok {
+							visit(otherFile, otherRepr, otherName.Ref)
+						}
+					}
+				}
+			}
+
+			for _, name := range composes.Names {
+				visit(file, repr, name.Ref)
+			}
+
+			// Warn about cross-file composition with the same CSS properties
+			for keyText, keyLoc := range composes.Properties {
+				property, ok := properties[keyText]
+				if !ok {
+					properties[keyText] = propertyInFile{file, keyLoc}
+					continue
+				}
+				if property.file == file || property.file == nil {
+					continue
+				}
+
+				localOriginalName := c.graph.Symbols.Get(local.Ref).OriginalName
+				c.log.AddMsgID(logger.MsgID_CSS_UndefinedComposesFrom, logger.Msg{
+					Kind: logger.Warning,
+					Data: rootFile.LineColumnTracker().MsgData(
+						css_lexer.RangeOfIdentifier(rootFile.InputFile.Source, local.Loc),
+						fmt.Sprintf("The value of %q in the %q class is undefined", keyText, localOriginalName),
+					),
+					Notes: []logger.MsgData{
+						property.file.LineColumnTracker().MsgData(
+							css_lexer.RangeOfIdentifier(property.file.InputFile.Source, property.loc),
+							fmt.Sprintf("The first definition of %q is here:", keyText),
+						),
+						file.LineColumnTracker().MsgData(
+							css_lexer.RangeOfIdentifier(file.InputFile.Source, keyLoc),
+							fmt.Sprintf("The second definition of %q is here:", keyText),
+						),
+						{Text: fmt.Sprintf("The specification of \"composes\" does not define an order when class declarations from separate files are composed together. "+
+							"The value of the %q property for %q may change unpredictably as the code is edited. "+
+							"Make sure that all definitions of %q for %q are in a single file.", keyText, localOriginalName, keyText, localOriginalName)},
+					},
+				})
+
+				// Don't warn more than once
+				property.file = nil
+				properties[keyText] = property
+			}
+		}
+
+		visit(rootFile, rootRepr, local.Ref)
+	}
+}
+
+func (c *linkerContext) generateCodeForLazyExport(sourceIndex uint32) {
+	file := &c.graph.Files[sourceIndex]
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+
+	// Grab the lazy expression
+	if len(repr.AST.Parts) < 1 {
+		panic("Internal error")
+	}
+	part := &repr.AST.Parts[len(repr.AST.Parts)-1]
+	if len(part.Stmts) != 1 {
+		panic("Internal error")
+	}
+	lazyValue := part.Stmts[0].Data.(*js_ast.SLazyExport).Value
+
+	// If this JavaScript file is a stub from a CSS file, populate the exports of
+	// this JavaScript stub with the local names from that CSS file. This is done
+	// now instead of earlier because we need the whole bundle to be present.
+	if repr.CSSSourceIndex.IsValid() {
+		cssSourceIndex := repr.CSSSourceIndex.GetIndex()
+		if css, ok := c.graph.Files[cssSourceIndex].InputFile.Repr.(*graph.CSSRepr); ok {
+			exports := js_ast.EObject{}
+
+			for _, local := range css.AST.LocalSymbols {
+				value := js_ast.Expr{Loc: local.Loc, Data: &js_ast.ENameOfSymbol{Ref: local.Ref}}
+				visited := map[ast.Ref]bool{local.Ref: true}
+				var parts []js_ast.TemplatePart
+				var visitName func(*graph.CSSRepr, ast.Ref)
+				var visitComposes func(*graph.CSSRepr, ast.Ref)
+
+				visitName = func(repr *graph.CSSRepr, ref ast.Ref) {
+					if !visited[ref] {
+						visited[ref] = true
+						visitComposes(repr, ref)
+						parts = append(parts, js_ast.TemplatePart{
+							Value:      js_ast.Expr{Data: &js_ast.ENameOfSymbol{Ref: ref}},
+							TailCooked: []uint16{' '},
+						})
+					}
+				}
+
+				visitComposes = func(repr *graph.CSSRepr, ref ast.Ref) {
+					if composes, ok := repr.AST.Composes[ref]; ok {
+						for _, name := range composes.ImportedNames {
+							if record := repr.AST.ImportRecords[name.ImportRecordIndex]; record.SourceIndex.IsValid() {
+								otherFile := &c.graph.Files[record.SourceIndex.GetIndex()]
+								if otherRepr, ok := otherFile.InputFile.Repr.(*graph.CSSRepr); ok {
+									if otherName, ok := otherRepr.AST.LocalScope[name.Alias]; ok {
+										visitName(otherRepr, otherName.Ref)
+									}
+								}
+							}
+						}
+
+						for _, name := range composes.Names {
+							visitName(repr, name.Ref)
+						}
+					}
+				}
+
+				visitComposes(css, local.Ref)
+
+				if len(parts) > 0 {
+					value.Data = &js_ast.ETemplate{Parts: append(parts, js_ast.TemplatePart{Value: value})}
+				}
+
+				exports.Properties = append(exports.Properties, js_ast.Property{
+					Key:        js_ast.Expr{Loc: local.Loc, Data: &js_ast.EString{Value: helpers.StringToUTF16(c.graph.Symbols.Get(local.Ref).OriginalName)}},
+					ValueOrNil: value,
+				})
+			}
+
+			lazyValue.Data = &exports
+		}
+	}
+
+	// Use "module.exports = value" for CommonJS-style modules
+	if repr.AST.ExportsKind == js_ast.ExportsCommonJS {
+		part.Stmts = []js_ast.Stmt{js_ast.AssignStmt(
+			js_ast.Expr{Loc: lazyValue.Loc, Data: &js_ast.EDot{
+				Target:  js_ast.Expr{Loc: lazyValue.Loc, Data: &js_ast.EIdentifier{Ref: repr.AST.ModuleRef}},
+				Name:    "exports",
+				NameLoc: lazyValue.Loc,
+			}},
+			lazyValue,
+		)}
+		c.graph.GenerateSymbolImportAndUse(sourceIndex, 0, repr.AST.ModuleRef, 1, sourceIndex)
+		return
+	}
+
+	// Otherwise, generate ES6 export statements. These are added as additional
+	// parts so they can be tree shaken individually.
+	part.Stmts = nil
+
+	// Generate a new symbol and link the export into the graph for tree shaking
+	generateExport := func(loc logger.Loc, name string, alias string) (ast.Ref, uint32) {
+		ref := c.graph.GenerateNewSymbol(sourceIndex, ast.SymbolOther, name)
+		partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{
+			DeclaredSymbols:      []js_ast.DeclaredSymbol{{Ref: ref, IsTopLevel: true}},
+			CanBeRemovedIfUnused: true,
+		})
+		c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, repr.AST.ModuleRef, 1, sourceIndex)
+		repr.Meta.TopLevelSymbolToPartsOverlay[ref] = []uint32{partIndex}
+		repr.Meta.ResolvedExports[alias] = graph.ExportData{
+			Ref:         ref,
+			NameLoc:     loc,
+			SourceIndex: sourceIndex,
+		}
+		return ref, partIndex
+	}
+
+	// Unwrap JSON objects into separate top-level variables. This improves tree-
+	// shaking by letting you only import part of a JSON file.
+	//
+	// But don't do this for files loaded via "with { type: 'json' }" as that
+	// behavior is specified to not export anything except for the "default"
+	// export: https://github.com/tc39/proposal-json-modules
+	if object, ok := lazyValue.Data.(*js_ast.EObject); ok && file.InputFile.Loader != config.LoaderWithTypeJSON {
+		for _, property := range object.Properties {
+			if str, ok := property.Key.Data.(*js_ast.EString); ok &&
+				(!file.IsEntryPoint() || js_ast.IsIdentifierUTF16(str.Value) ||
+					!c.options.UnsupportedJSFeatures.Has(compat.ArbitraryModuleNamespaceNames)) {
+				if name := helpers.UTF16ToString(str.Value); name != "default" {
+					ref, partIndex := generateExport(property.Key.Loc, name, name)
+
+					// This initializes the generated variable with a copy of the property
+					// value, which is INCORRECT for values that are objects/arrays because
+					// they will have separate object identity. This is fixed up later in
+					// "generateCodeForFileInChunkJS" by changing the object literal to
+					// reference this generated variable instead.
+					//
+					// Changing the object literal is deferred until that point instead of
+					// doing it now because we only want to do this for top-level variables
+					// that actually end up being used, and we don't know which ones will
+					// end up actually being used at this point (since import binding hasn't
+					// happened yet). So we need to wait until after tree shaking happens.
+					repr.AST.Parts[partIndex].Stmts = []js_ast.Stmt{{Loc: property.Key.Loc, Data: &js_ast.SLocal{
+						IsExport: true,
+						Decls: []js_ast.Decl{{
+							Binding:    js_ast.Binding{Loc: property.Key.Loc, Data: &js_ast.BIdentifier{Ref: ref}},
+							ValueOrNil: property.ValueOrNil,
+						}},
+					}}}
+				}
+			}
+		}
+	}
+
+	// Generate the default export
+	ref, partIndex := generateExport(lazyValue.Loc, file.InputFile.Source.IdentifierName+"_default", "default")
+	repr.AST.Parts[partIndex].Stmts = []js_ast.Stmt{{Loc: lazyValue.Loc, Data: &js_ast.SExportDefault{
+		DefaultName: ast.LocRef{Loc: lazyValue.Loc, Ref: ref},
+		Value:       js_ast.Stmt{Loc: lazyValue.Loc, Data: &js_ast.SExpr{Value: lazyValue}},
+	}}}
+}
+
+func (c *linkerContext) createExportsForFile(sourceIndex uint32) {
+	////////////////////////////////////////////////////////////////////////////////
+	// WARNING: This method is run in parallel over all files. Do not mutate data
+	// for other files within this method or you will create a data race.
+	////////////////////////////////////////////////////////////////////////////////
+
+	file := &c.graph.Files[sourceIndex]
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+
+	// Generate a getter per export
+	properties := []js_ast.Property{}
+	nsExportDependencies := []js_ast.Dependency{}
+	nsExportSymbolUses := make(map[ast.Ref]js_ast.SymbolUse)
+	for _, alias := range repr.Meta.SortedAndFilteredExportAliases {
+		export := repr.Meta.ResolvedExports[alias]
+
+		// If this is an export of an import, reference the symbol that the import
+		// was eventually resolved to. We need to do this because imports have
+		// already been resolved by this point, so we can't generate a new import
+		// and have that be resolved later.
+		if importData, ok := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).Meta.ImportsToBind[export.Ref]; ok {
+			export.Ref = importData.Ref
+			export.SourceIndex = importData.SourceIndex
+			nsExportDependencies = append(nsExportDependencies, importData.ReExports...)
+		}
+
+		// Exports of imports need EImportIdentifier in case they need to be re-
+		// written to a property access later on
+		var value js_ast.Expr
+		if c.graph.Symbols.Get(export.Ref).NamespaceAlias != nil {
+			value = js_ast.Expr{Data: &js_ast.EImportIdentifier{Ref: export.Ref}}
+		} else {
+			value = js_ast.Expr{Data: &js_ast.EIdentifier{Ref: export.Ref}}
+		}
+
+		// Add a getter property
+		var getter js_ast.Expr
+		body := js_ast.FnBody{Block: js_ast.SBlock{Stmts: []js_ast.Stmt{{Loc: value.Loc, Data: &js_ast.SReturn{ValueOrNil: value}}}}}
+		if c.options.UnsupportedJSFeatures.Has(compat.Arrow) {
+			getter = js_ast.Expr{Data: &js_ast.EFunction{Fn: js_ast.Fn{Body: body}}}
+		} else {
+			getter = js_ast.Expr{Data: &js_ast.EArrow{PreferExpr: true, Body: body}}
+		}
+		properties = append(properties, js_ast.Property{
+			Key:        js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(alias)}},
+			ValueOrNil: getter,
+		})
+		nsExportSymbolUses[export.Ref] = js_ast.SymbolUse{CountEstimate: 1}
+
+		// Make sure the part that declares the export is included
+		for _, partIndex := range c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).TopLevelSymbolToParts(export.Ref) {
+			// Use a non-local dependency since this is likely from a different
+			// file if it came in through an export star
+			nsExportDependencies = append(nsExportDependencies, js_ast.Dependency{
+				SourceIndex: export.SourceIndex,
+				PartIndex:   partIndex,
+			})
+		}
+	}
+
+	declaredSymbols := []js_ast.DeclaredSymbol{}
+	var nsExportStmts []js_ast.Stmt
+
+	// Prefix this part with "var exports = {}" if this isn't a CommonJS entry point
+	if repr.Meta.NeedsExportsVariable {
+		nsExportStmts = append(nsExportStmts, js_ast.Stmt{Data: &js_ast.SLocal{Decls: []js_ast.Decl{{
+			Binding:    js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.ExportsRef}},
+			ValueOrNil: js_ast.Expr{Data: &js_ast.EObject{}},
+		}}}})
+		declaredSymbols = append(declaredSymbols, js_ast.DeclaredSymbol{
+			Ref:        repr.AST.ExportsRef,
+			IsTopLevel: true,
+		})
+	}
+
+	// "__export(exports, { foo: () => foo })"
+	exportRef := ast.InvalidRef
+	if len(properties) > 0 {
+		runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr)
+		exportRef = runtimeRepr.AST.ModuleScope.Members["__export"].Ref
+		nsExportStmts = append(nsExportStmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{
+			Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: exportRef}},
+			Args: []js_ast.Expr{
+				{Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}},
+				{Data: &js_ast.EObject{
+					Properties: properties,
+				}},
+			},
+		}}}})
+
+		// Make sure this file depends on the "__export" symbol
+		for _, partIndex := range runtimeRepr.TopLevelSymbolToParts(exportRef) {
+			nsExportDependencies = append(nsExportDependencies, js_ast.Dependency{
+				SourceIndex: runtime.SourceIndex,
+				PartIndex:   partIndex,
+			})
+		}
+
+		// Make sure the CommonJS closure, if there is one, includes "exports"
+		repr.AST.UsesExportsRef = true
+	}
+
+	// Decorate "module.exports" with the "__esModule" flag to indicate that
+	// we used to be an ES module. This is done by wrapping the exports object
+	// instead of by mutating the exports object because other modules in the
+	// bundle (including the entry point module) may do "import * as" to get
+	// access to the exports object and should NOT see the "__esModule" flag.
+	if repr.Meta.ForceIncludeExportsForEntryPoint &&
+		c.options.OutputFormat == config.FormatCommonJS {
+
+		runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr)
+		toCommonJSRef := runtimeRepr.AST.NamedExports["__toCommonJS"].Ref
+
+		// "module.exports = __toCommonJS(exports);"
+		nsExportStmts = append(nsExportStmts, js_ast.AssignStmt(
+			js_ast.Expr{Data: &js_ast.EDot{
+				Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: c.unboundModuleRef}},
+				Name:   "exports",
+			}},
+
+			js_ast.Expr{Data: &js_ast.ECall{
+				Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: toCommonJSRef}},
+				Args:   []js_ast.Expr{{Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}},
+			}},
+		))
+	}
+
+	// No need to generate a part if it'll be empty
+	if len(nsExportStmts) > 0 {
+		// Initialize the part that was allocated for us earlier. The information
+		// here will be used after this during tree shaking.
+		repr.AST.Parts[js_ast.NSExportPartIndex] = js_ast.Part{
+			Stmts:           nsExportStmts,
+			SymbolUses:      nsExportSymbolUses,
+			Dependencies:    nsExportDependencies,
+			DeclaredSymbols: declaredSymbols,
+
+			// This can be removed if nothing uses it
+			CanBeRemovedIfUnused: true,
+
+			// Make sure this is trimmed if unused even if tree shaking is disabled
+			ForceTreeShaking: true,
+		}
+
+		// Pull in the "__export" symbol if it was used
+		if exportRef != ast.InvalidRef {
+			repr.Meta.NeedsExportSymbolFromRuntime = true
+		}
+	}
+}
+
+func (c *linkerContext) createWrapperForFile(sourceIndex uint32) {
+	repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+
+	switch repr.Meta.Wrap {
+	// If this is a CommonJS file, we're going to need to generate a wrapper
+	// for the CommonJS closure. That will end up looking something like this:
+	//
+	//   var require_foo = __commonJS((exports, module) => {
+	//     ...
+	//   });
+	//
+	// However, that generation is special-cased for various reasons and is
+	// done later on. Still, we're going to need to ensure that this file
+	// both depends on the "__commonJS" symbol and declares the "require_foo"
+	// symbol. Instead of special-casing this during the reachablity analysis
+	// below, we just append a dummy part to the end of the file with these
+	// dependencies and let the general-purpose reachablity analysis take care
+	// of it.
+	case graph.WrapCJS:
+		runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr)
+		commonJSParts := runtimeRepr.TopLevelSymbolToParts(c.cjsRuntimeRef)
+
+		// Generate the dummy part
+		dependencies := make([]js_ast.Dependency, len(commonJSParts))
+		for i, partIndex := range commonJSParts {
+			dependencies[i] = js_ast.Dependency{
+				SourceIndex: runtime.SourceIndex,
+				PartIndex:   partIndex,
+			}
+		}
+		partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{
+			SymbolUses: map[ast.Ref]js_ast.SymbolUse{
+				repr.AST.WrapperRef: {CountEstimate: 1},
+			},
+			DeclaredSymbols: []js_ast.DeclaredSymbol{
+				{Ref: repr.AST.ExportsRef, IsTopLevel: true},
+				{Ref: repr.AST.ModuleRef, IsTopLevel: true},
+				{Ref: repr.AST.WrapperRef, IsTopLevel: true},
+			},
+			Dependencies: dependencies,
+		})
+		repr.Meta.WrapperPartIndex = ast.MakeIndex32(partIndex)
+		c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, c.cjsRuntimeRef, 1, runtime.SourceIndex)
+
+	// If this is a lazily-initialized ESM file, we're going to need to
+	// generate a wrapper for the ESM closure. That will end up looking
+	// something like this:
+	//
+	//   var init_foo = __esm(() => {
+	//     ...
+	//   });
+	//
+	// This depends on the "__esm" symbol and declares the "init_foo" symbol
+	// for similar reasons to the CommonJS closure above.
+	case graph.WrapESM:
+		runtimeRepr := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr)
+		esmParts := runtimeRepr.TopLevelSymbolToParts(c.esmRuntimeRef)
+
+		// Generate the dummy part
+		dependencies := make([]js_ast.Dependency, len(esmParts))
+		for i, partIndex := range esmParts {
+			dependencies[i] = js_ast.Dependency{
+				SourceIndex: runtime.SourceIndex,
+				PartIndex:   partIndex,
+			}
+		}
+		partIndex := c.graph.AddPartToFile(sourceIndex, js_ast.Part{
+			SymbolUses: map[ast.Ref]js_ast.SymbolUse{
+				repr.AST.WrapperRef: {CountEstimate: 1},
+			},
+			DeclaredSymbols: []js_ast.DeclaredSymbol{
+				{Ref: repr.AST.WrapperRef, IsTopLevel: true},
+			},
+			Dependencies: dependencies,
+		})
+		repr.Meta.WrapperPartIndex = ast.MakeIndex32(partIndex)
+		c.graph.GenerateSymbolImportAndUse(sourceIndex, partIndex, c.esmRuntimeRef, 1, runtime.SourceIndex)
+	}
+}
+
+func (c *linkerContext) matchImportsWithExportsForFile(sourceIndex uint32) {
+	file := &c.graph.Files[sourceIndex]
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+
+	// Sort imports for determinism. Otherwise our unit tests will randomly
+	// fail sometimes when error messages are reordered.
+	sortedImportRefs := make([]int, 0, len(repr.AST.NamedImports))
+	for ref := range repr.AST.NamedImports {
+		sortedImportRefs = append(sortedImportRefs, int(ref.InnerIndex))
+	}
+	sort.Ints(sortedImportRefs)
+
+	// Pair imports with their matching exports
+	for _, innerIndex := range sortedImportRefs {
+		// Re-use memory for the cycle detector
+		c.cycleDetector = c.cycleDetector[:0]
+
+		importRef := ast.Ref{SourceIndex: sourceIndex, InnerIndex: uint32(innerIndex)}
+		result, reExports := c.matchImportWithExport(importTracker{sourceIndex: sourceIndex, importRef: importRef}, nil)
+		switch result.kind {
+		case matchImportIgnore:
+
+		case matchImportNormal:
+			repr.Meta.ImportsToBind[importRef] = graph.ImportData{
+				ReExports:   reExports,
+				SourceIndex: result.sourceIndex,
+				Ref:         result.ref,
+			}
+
+		case matchImportNamespace:
+			c.graph.Symbols.Get(importRef).NamespaceAlias = &ast.NamespaceAlias{
+				NamespaceRef: result.namespaceRef,
+				Alias:        result.alias,
+			}
+
+		case matchImportNormalAndNamespace:
+			repr.Meta.ImportsToBind[importRef] = graph.ImportData{
+				ReExports:   reExports,
+				SourceIndex: result.sourceIndex,
+				Ref:         result.ref,
+			}
+
+			c.graph.Symbols.Get(importRef).NamespaceAlias = &ast.NamespaceAlias{
+				NamespaceRef: result.namespaceRef,
+				Alias:        result.alias,
+			}
+
+		case matchImportCycle:
+			namedImport := repr.AST.NamedImports[importRef]
+			c.log.AddError(file.LineColumnTracker(), js_lexer.RangeOfIdentifier(file.InputFile.Source, namedImport.AliasLoc),
+				fmt.Sprintf("Detected cycle while resolving import %q", namedImport.Alias))
+
+		case matchImportProbablyTypeScriptType:
+			repr.Meta.IsProbablyTypeScriptType[importRef] = true
+
+		case matchImportAmbiguous:
+			namedImport := repr.AST.NamedImports[importRef]
+			r := js_lexer.RangeOfIdentifier(file.InputFile.Source, namedImport.AliasLoc)
+			var notes []logger.MsgData
+
+			// Provide the locations of both ambiguous exports if possible
+			if result.nameLoc.Start != 0 && result.otherNameLoc.Start != 0 {
+				a := c.graph.Files[result.sourceIndex]
+				b := c.graph.Files[result.otherSourceIndex]
+				ra := js_lexer.RangeOfIdentifier(a.InputFile.Source, result.nameLoc)
+				rb := js_lexer.RangeOfIdentifier(b.InputFile.Source, result.otherNameLoc)
+				notes = []logger.MsgData{
+					a.LineColumnTracker().MsgData(ra, "One matching export is here:"),
+					b.LineColumnTracker().MsgData(rb, "Another matching export is here:"),
+				}
+			}
+
+			symbol := c.graph.Symbols.Get(importRef)
+			if symbol.ImportItemStatus == ast.ImportItemGenerated {
+				// This is a warning instead of an error because although it appears
+				// to be a named import, it's actually an automatically-generated
+				// named import that was originally a property access on an import
+				// star namespace object. Normally this property access would just
+				// resolve to undefined at run-time instead of failing at binding-
+				// time, so we emit a warning and rewrite the value to the literal
+				// "undefined" instead of emitting an error.
+				symbol.ImportItemStatus = ast.ImportItemMissing
+				msg := fmt.Sprintf("Import %q will always be undefined because there are multiple matching exports", namedImport.Alias)
+				c.log.AddIDWithNotes(logger.MsgID_Bundler_ImportIsUndefined, logger.Warning, file.LineColumnTracker(), r, msg, notes)
+			} else {
+				msg := fmt.Sprintf("Ambiguous import %q has multiple matching exports", namedImport.Alias)
+				c.log.AddErrorWithNotes(file.LineColumnTracker(), r, msg, notes)
+			}
+		}
+	}
+}
+
+type matchImportKind uint8
+
+const (
+	// The import is either external or undefined
+	matchImportIgnore matchImportKind = iota
+
+	// "sourceIndex" and "ref" are in use
+	matchImportNormal
+
+	// "namespaceRef" and "alias" are in use
+	matchImportNamespace
+
+	// Both "matchImportNormal" and "matchImportNamespace"
+	matchImportNormalAndNamespace
+
+	// The import could not be evaluated due to a cycle
+	matchImportCycle
+
+	// The import is missing but came from a TypeScript file
+	matchImportProbablyTypeScriptType
+
+	// The import resolved to multiple symbols via "export * from"
+	matchImportAmbiguous
+)
+
+type matchImportResult struct {
+	alias            string
+	kind             matchImportKind
+	namespaceRef     ast.Ref
+	sourceIndex      uint32
+	nameLoc          logger.Loc // Optional, goes with sourceIndex, ignore if zero
+	otherSourceIndex uint32
+	otherNameLoc     logger.Loc // Optional, goes with otherSourceIndex, ignore if zero
+	ref              ast.Ref
+}
+
+func (c *linkerContext) matchImportWithExport(
+	tracker importTracker, reExportsIn []js_ast.Dependency,
+) (result matchImportResult, reExports []js_ast.Dependency) {
+	var ambiguousResults []matchImportResult
+	reExports = reExportsIn
+
+loop:
+	for {
+		// Make sure we avoid infinite loops trying to resolve cycles:
+		//
+		//   // foo.js
+		//   export {a as b} from './foo.js'
+		//   export {b as c} from './foo.js'
+		//   export {c as a} from './foo.js'
+		//
+		// This uses a O(n^2) array scan instead of a O(n) map because the vast
+		// majority of cases have one or two elements and Go arrays are cheap to
+		// reuse without allocating.
+		for _, previousTracker := range c.cycleDetector {
+			if tracker == previousTracker {
+				result = matchImportResult{kind: matchImportCycle}
+				break loop
+			}
+		}
+		c.cycleDetector = append(c.cycleDetector, tracker)
+
+		// Resolve the import by one step
+		nextTracker, status, potentiallyAmbiguousExportStarRefs := c.advanceImportTracker(tracker)
+		switch status {
+		case importCommonJS, importCommonJSWithoutExports, importExternal, importDisabled:
+			if status == importExternal && c.options.OutputFormat.KeepESMImportExportSyntax() {
+				// Imports from external modules should not be converted to CommonJS
+				// if the output format preserves the original ES6 import statements
+				break
+			}
+
+			// If it's a CommonJS or external file, rewrite the import to a
+			// property access. Don't do this if the namespace reference is invalid
+			// though. This is the case for star imports, where the import is the
+			// namespace.
+			trackerFile := &c.graph.Files[tracker.sourceIndex]
+			namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef]
+			if namedImport.NamespaceRef != ast.InvalidRef {
+				if result.kind == matchImportNormal {
+					result.kind = matchImportNormalAndNamespace
+					result.namespaceRef = namedImport.NamespaceRef
+					result.alias = namedImport.Alias
+				} else {
+					result = matchImportResult{
+						kind:         matchImportNamespace,
+						namespaceRef: namedImport.NamespaceRef,
+						alias:        namedImport.Alias,
+					}
+				}
+			}
+
+			// Warn about importing from a file that is known to not have any exports
+			if status == importCommonJSWithoutExports {
+				symbol := c.graph.Symbols.Get(tracker.importRef)
+				symbol.ImportItemStatus = ast.ImportItemMissing
+				kind := logger.Warning
+				if helpers.IsInsideNodeModules(trackerFile.InputFile.Source.KeyPath.Text) {
+					kind = logger.Debug
+				}
+				c.log.AddID(logger.MsgID_Bundler_ImportIsUndefined, kind,
+					trackerFile.LineColumnTracker(),
+					js_lexer.RangeOfIdentifier(trackerFile.InputFile.Source, namedImport.AliasLoc),
+					fmt.Sprintf("Import %q will always be undefined because the file %q has no exports",
+						namedImport.Alias, c.graph.Files[nextTracker.sourceIndex].InputFile.Source.PrettyPath))
+			}
+
+		case importDynamicFallback:
+			// If it's a file with dynamic export fallback, rewrite the import to a property access
+			trackerFile := &c.graph.Files[tracker.sourceIndex]
+			namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef]
+			if result.kind == matchImportNormal {
+				result.kind = matchImportNormalAndNamespace
+				result.namespaceRef = nextTracker.importRef
+				result.alias = namedImport.Alias
+			} else {
+				result = matchImportResult{
+					kind:         matchImportNamespace,
+					namespaceRef: nextTracker.importRef,
+					alias:        namedImport.Alias,
+				}
+			}
+
+		case importNoMatch:
+			symbol := c.graph.Symbols.Get(tracker.importRef)
+			trackerFile := &c.graph.Files[tracker.sourceIndex]
+			namedImport := trackerFile.InputFile.Repr.(*graph.JSRepr).AST.NamedImports[tracker.importRef]
+			r := js_lexer.RangeOfIdentifier(trackerFile.InputFile.Source, namedImport.AliasLoc)
+
+			// Report mismatched imports and exports
+			if symbol.ImportItemStatus == ast.ImportItemGenerated {
+				// This is not an error because although it appears to be a named
+				// import, it's actually an automatically-generated named import
+				// that was originally a property access on an import star
+				// namespace object:
+				//
+				//   import * as ns from 'foo'
+				//   const undefinedValue = ns.notAnExport
+				//
+				// If this code wasn't bundled, this property access would just resolve
+				// to undefined at run-time instead of failing at binding-time, so we
+				// emit rewrite the value to the literal "undefined" instead of
+				// emitting an error.
+				symbol.ImportItemStatus = ast.ImportItemMissing
+
+				// Don't emit a log message if this symbol isn't used, since then the
+				// log message isn't helpful. This can happen with "import" assignment
+				// statements in TypeScript code since they are ambiguously either a
+				// type or a value. We consider them to be a type if they aren't used.
+				//
+				//   import * as ns from 'foo'
+				//
+				//   // There's no warning here because this is dead code
+				//   if (false) ns.notAnExport
+				//
+				//   // There's no warning here because this is never used
+				//   import unused = ns.notAnExport
+				//
+				if symbol.UseCountEstimate > 0 {
+					nextFile := &c.graph.Files[nextTracker.sourceIndex].InputFile
+					msg := logger.Msg{
+						Kind: logger.Warning,
+						Data: trackerFile.LineColumnTracker().MsgData(r, fmt.Sprintf(
+							"Import %q will always be undefined because there is no matching export in %q",
+							namedImport.Alias, nextFile.Source.PrettyPath)),
+					}
+					if helpers.IsInsideNodeModules(trackerFile.InputFile.Source.KeyPath.Text) {
+						msg.Kind = logger.Debug
+					}
+					c.maybeCorrectObviousTypo(nextFile.Repr.(*graph.JSRepr), namedImport.Alias, &msg)
+					c.log.AddMsgID(logger.MsgID_Bundler_ImportIsUndefined, msg)
+				}
+			} else {
+				nextFile := &c.graph.Files[nextTracker.sourceIndex].InputFile
+				msg := logger.Msg{
+					Kind: logger.Error,
+					Data: trackerFile.LineColumnTracker().MsgData(r, fmt.Sprintf(
+						"No matching export in %q for import %q",
+						nextFile.Source.PrettyPath, namedImport.Alias)),
+				}
+				c.maybeCorrectObviousTypo(nextFile.Repr.(*graph.JSRepr), namedImport.Alias, &msg)
+				c.log.AddMsg(msg)
+			}
+
+		case importProbablyTypeScriptType:
+			// Omit this import from any namespace export code we generate for
+			// import star statements (i.e. "import * as ns from 'path'")
+			result = matchImportResult{kind: matchImportProbablyTypeScriptType}
+
+		case importFound:
+			// If there are multiple ambiguous results due to use of "export * from"
+			// statements, trace them all to see if they point to different things.
+			for _, ambiguousTracker := range potentiallyAmbiguousExportStarRefs {
+				// If this is a re-export of another import, follow the import
+				if _, ok := c.graph.Files[ambiguousTracker.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.NamedImports[ambiguousTracker.Ref]; ok {
+					// Save and restore the cycle detector to avoid mixing information
+					oldCycleDetector := c.cycleDetector
+					ambiguousResult, newReExportFiles := c.matchImportWithExport(importTracker{
+						sourceIndex: ambiguousTracker.SourceIndex,
+						importRef:   ambiguousTracker.Ref,
+					}, reExports)
+					c.cycleDetector = oldCycleDetector
+					ambiguousResults = append(ambiguousResults, ambiguousResult)
+					reExports = newReExportFiles
+				} else {
+					ambiguousResults = append(ambiguousResults, matchImportResult{
+						kind:        matchImportNormal,
+						sourceIndex: ambiguousTracker.SourceIndex,
+						ref:         ambiguousTracker.Ref,
+						nameLoc:     ambiguousTracker.NameLoc,
+					})
+				}
+			}
+
+			// Defer the actual binding of this import until after we generate
+			// namespace export code for all files. This has to be done for all
+			// import-to-export matches, not just the initial import to the final
+			// export, since all imports and re-exports must be merged together
+			// for correctness.
+			result = matchImportResult{
+				kind:        matchImportNormal,
+				sourceIndex: nextTracker.sourceIndex,
+				ref:         nextTracker.importRef,
+				nameLoc:     nextTracker.nameLoc,
+			}
+
+			// Depend on the statement(s) that declared this import symbol in the
+			// original file
+			for _, resolvedPartIndex := range c.graph.Files[tracker.sourceIndex].InputFile.Repr.(*graph.JSRepr).TopLevelSymbolToParts(tracker.importRef) {
+				reExports = append(reExports, js_ast.Dependency{
+					SourceIndex: tracker.sourceIndex,
+					PartIndex:   resolvedPartIndex,
+				})
+			}
+
+			// If this is a re-export of another import, continue for another
+			// iteration of the loop to resolve that import as well
+			if _, ok := c.graph.Files[nextTracker.sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.NamedImports[nextTracker.importRef]; ok {
+				tracker = nextTracker
+				continue
+			}
+
+		default:
+			panic("Internal error")
+		}
+
+		// Stop now if we didn't explicitly "continue" above
+		break
+	}
+
+	// If there is a potential ambiguity, all results must be the same
+	for _, ambiguousResult := range ambiguousResults {
+		if ambiguousResult != result {
+			if result.kind == matchImportNormal && ambiguousResult.kind == matchImportNormal &&
+				result.nameLoc.Start != 0 && ambiguousResult.nameLoc.Start != 0 {
+				return matchImportResult{
+					kind:             matchImportAmbiguous,
+					sourceIndex:      result.sourceIndex,
+					nameLoc:          result.nameLoc,
+					otherSourceIndex: ambiguousResult.sourceIndex,
+					otherNameLoc:     ambiguousResult.nameLoc,
+				}, nil
+			}
+			return matchImportResult{kind: matchImportAmbiguous}, nil
+		}
+	}
+
+	return
+}
+
+func (c *linkerContext) maybeForbidArbitraryModuleNamespaceIdentifier(kind string, sourceIndex uint32, loc logger.Loc, alias string) {
+	if !js_ast.IsIdentifier(alias) {
+		file := &c.graph.Files[sourceIndex]
+		where := config.PrettyPrintTargetEnvironment(c.options.OriginalTargetEnv, c.options.UnsupportedJSFeatureOverridesMask)
+		c.log.AddError(file.LineColumnTracker(), file.InputFile.Source.RangeOfString(loc), fmt.Sprintf(
+			"Using the string %q as an %s name is not supported in %s", alias, kind, where))
+	}
+}
+
+// Attempt to correct an import name with a typo
+func (c *linkerContext) maybeCorrectObviousTypo(repr *graph.JSRepr, name string, msg *logger.Msg) {
+	if repr.Meta.ResolvedExportTypos == nil {
+		valid := make([]string, 0, len(repr.Meta.ResolvedExports))
+		for alias := range repr.Meta.ResolvedExports {
+			valid = append(valid, alias)
+		}
+		sort.Strings(valid)
+		typos := helpers.MakeTypoDetector(valid)
+		repr.Meta.ResolvedExportTypos = &typos
+	}
+
+	if corrected, ok := repr.Meta.ResolvedExportTypos.MaybeCorrectTypo(name); ok {
+		msg.Data.Location.Suggestion = corrected
+		export := repr.Meta.ResolvedExports[corrected]
+		importedFile := &c.graph.Files[export.SourceIndex]
+		text := fmt.Sprintf("Did you mean to import %q instead?", corrected)
+		var note logger.MsgData
+		if export.NameLoc.Start == 0 {
+			// Don't report a source location for definitions without one. This can
+			// happen with automatically-generated exports from non-JavaScript files.
+			note.Text = text
+		} else {
+			var r logger.Range
+			if importedFile.InputFile.Loader.IsCSS() {
+				r = css_lexer.RangeOfIdentifier(importedFile.InputFile.Source, export.NameLoc)
+			} else {
+				r = js_lexer.RangeOfIdentifier(importedFile.InputFile.Source, export.NameLoc)
+			}
+			note = importedFile.LineColumnTracker().MsgData(r, text)
+		}
+		msg.Notes = append(msg.Notes, note)
+	}
+}
+
+func (c *linkerContext) recursivelyWrapDependencies(sourceIndex uint32) {
+	repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+	if repr.Meta.DidWrapDependencies {
+		return
+	}
+	repr.Meta.DidWrapDependencies = true
+
+	// Never wrap the runtime file since it always comes first
+	if sourceIndex == runtime.SourceIndex {
+		return
+	}
+
+	// This module must be wrapped
+	if repr.Meta.Wrap == graph.WrapNone {
+		if repr.AST.ExportsKind == js_ast.ExportsCommonJS {
+			repr.Meta.Wrap = graph.WrapCJS
+		} else {
+			repr.Meta.Wrap = graph.WrapESM
+		}
+	}
+
+	// All dependencies must also be wrapped
+	for _, record := range repr.AST.ImportRecords {
+		if record.SourceIndex.IsValid() {
+			c.recursivelyWrapDependencies(record.SourceIndex.GetIndex())
+		}
+	}
+}
+
+func (c *linkerContext) hasDynamicExportsDueToExportStar(sourceIndex uint32, visited map[uint32]bool) bool {
+	// Terminate the traversal now if this file already has dynamic exports
+	repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+	if repr.AST.ExportsKind == js_ast.ExportsCommonJS || repr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback {
+		return true
+	}
+
+	// Avoid infinite loops due to cycles in the export star graph
+	if visited[sourceIndex] {
+		return false
+	}
+	visited[sourceIndex] = true
+
+	// Scan over the export star graph
+	for _, importRecordIndex := range repr.AST.ExportStarImportRecords {
+		record := &repr.AST.ImportRecords[importRecordIndex]
+
+		// This file has dynamic exports if the exported imports are from a file
+		// that either has dynamic exports directly or transitively by itself
+		// having an export star from a file with dynamic exports.
+		if (!record.SourceIndex.IsValid() && (!c.graph.Files[sourceIndex].IsEntryPoint() || !c.options.OutputFormat.KeepESMImportExportSyntax())) ||
+			(record.SourceIndex.IsValid() && record.SourceIndex.GetIndex() != sourceIndex && c.hasDynamicExportsDueToExportStar(record.SourceIndex.GetIndex(), visited)) {
+			repr.AST.ExportsKind = js_ast.ExportsESMWithDynamicFallback
+			return true
+		}
+	}
+
+	return false
+}
+
+func (c *linkerContext) addExportsForExportStar(
+	resolvedExports map[string]graph.ExportData,
+	sourceIndex uint32,
+	sourceIndexStack []uint32,
+) {
+	// Avoid infinite loops due to cycles in the export star graph
+	for _, prevSourceIndex := range sourceIndexStack {
+		if prevSourceIndex == sourceIndex {
+			return
+		}
+	}
+	sourceIndexStack = append(sourceIndexStack, sourceIndex)
+	repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+
+	for _, importRecordIndex := range repr.AST.ExportStarImportRecords {
+		record := &repr.AST.ImportRecords[importRecordIndex]
+		if !record.SourceIndex.IsValid() {
+			// This will be resolved at run time instead
+			continue
+		}
+		otherSourceIndex := record.SourceIndex.GetIndex()
+
+		// Export stars from a CommonJS module don't work because they can't be
+		// statically discovered. Just silently ignore them in this case.
+		//
+		// We could attempt to check whether the imported file still has ES6
+		// exports even though it still uses CommonJS features. However, when
+		// doing this we'd also have to rewrite any imports of these export star
+		// re-exports as property accesses off of a generated require() call.
+		otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr)
+		if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS {
+			// All exports will be resolved at run time instead
+			continue
+		}
+
+		// Accumulate this file's exports
+	nextExport:
+		for alias, name := range otherRepr.AST.NamedExports {
+			// ES6 export star statements ignore exports named "default"
+			if alias == "default" {
+				continue
+			}
+
+			// This export star is shadowed if any file in the stack has a matching real named export
+			for _, prevSourceIndex := range sourceIndexStack {
+				prevRepr := c.graph.Files[prevSourceIndex].InputFile.Repr.(*graph.JSRepr)
+				if _, ok := prevRepr.AST.NamedExports[alias]; ok {
+					continue nextExport
+				}
+			}
+
+			if existing, ok := resolvedExports[alias]; !ok {
+				// Initialize the re-export
+				resolvedExports[alias] = graph.ExportData{
+					Ref:         name.Ref,
+					SourceIndex: otherSourceIndex,
+					NameLoc:     name.AliasLoc,
+				}
+
+				// Make sure the symbol is marked as imported so that code splitting
+				// imports it correctly if it ends up being shared with another chunk
+				repr.Meta.ImportsToBind[name.Ref] = graph.ImportData{
+					Ref:         name.Ref,
+					SourceIndex: otherSourceIndex,
+				}
+			} else if existing.SourceIndex != otherSourceIndex {
+				// Two different re-exports colliding makes it potentially ambiguous
+				existing.PotentiallyAmbiguousExportStarRefs =
+					append(existing.PotentiallyAmbiguousExportStarRefs, graph.ImportData{
+						SourceIndex: otherSourceIndex,
+						Ref:         name.Ref,
+						NameLoc:     name.AliasLoc,
+					})
+				resolvedExports[alias] = existing
+			}
+		}
+
+		// Search further through this file's export stars
+		c.addExportsForExportStar(resolvedExports, otherSourceIndex, sourceIndexStack)
+	}
+}
+
+type importTracker struct {
+	sourceIndex uint32
+	nameLoc     logger.Loc // Optional, goes with sourceIndex, ignore if zero
+	importRef   ast.Ref
+}
+
+type importStatus uint8
+
+const (
+	// The imported file has no matching export
+	importNoMatch importStatus = iota
+
+	// The imported file has a matching export
+	importFound
+
+	// The imported file is CommonJS and has unknown exports
+	importCommonJS
+
+	// The import is missing but there is a dynamic fallback object
+	importDynamicFallback
+
+	// The import was treated as a CommonJS import but the file is known to have no exports
+	importCommonJSWithoutExports
+
+	// The imported file was disabled by mapping it to false in the "browser"
+	// field of package.json
+	importDisabled
+
+	// The imported file is external and has unknown exports
+	importExternal
+
+	// This is a missing re-export in a TypeScript file, so it's probably a type
+	importProbablyTypeScriptType
+)
+
+func (c *linkerContext) advanceImportTracker(tracker importTracker) (importTracker, importStatus, []graph.ImportData) {
+	file := &c.graph.Files[tracker.sourceIndex]
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+	namedImport := repr.AST.NamedImports[tracker.importRef]
+
+	// Is this an external file?
+	record := &repr.AST.ImportRecords[namedImport.ImportRecordIndex]
+	if !record.SourceIndex.IsValid() {
+		return importTracker{}, importExternal, nil
+	}
+
+	// Is this a named import of a file without any exports?
+	otherSourceIndex := record.SourceIndex.GetIndex()
+	otherRepr := c.graph.Files[otherSourceIndex].InputFile.Repr.(*graph.JSRepr)
+	if !namedImport.AliasIsStar && !otherRepr.AST.HasLazyExport &&
+		// CommonJS exports
+		otherRepr.AST.ExportKeyword.Len == 0 && namedImport.Alias != "default" &&
+		// ESM exports
+		!otherRepr.AST.UsesExportsRef && !otherRepr.AST.UsesModuleRef {
+		// Just warn about it and replace the import with "undefined"
+		return importTracker{sourceIndex: otherSourceIndex, importRef: ast.InvalidRef}, importCommonJSWithoutExports, nil
+	}
+
+	// Is this a CommonJS file?
+	if otherRepr.AST.ExportsKind == js_ast.ExportsCommonJS {
+		return importTracker{sourceIndex: otherSourceIndex, importRef: ast.InvalidRef}, importCommonJS, nil
+	}
+
+	// Match this import star with an export star from the imported file
+	if matchingExport := otherRepr.Meta.ResolvedExportStar; namedImport.AliasIsStar && matchingExport != nil {
+		// Check to see if this is a re-export of another import
+		return importTracker{
+			sourceIndex: matchingExport.SourceIndex,
+			importRef:   matchingExport.Ref,
+			nameLoc:     matchingExport.NameLoc,
+		}, importFound, matchingExport.PotentiallyAmbiguousExportStarRefs
+	}
+
+	// Match this import up with an export from the imported file
+	if matchingExport, ok := otherRepr.Meta.ResolvedExports[namedImport.Alias]; ok {
+		// Check to see if this is a re-export of another import
+		return importTracker{
+			sourceIndex: matchingExport.SourceIndex,
+			importRef:   matchingExport.Ref,
+			nameLoc:     matchingExport.NameLoc,
+		}, importFound, matchingExport.PotentiallyAmbiguousExportStarRefs
+	}
+
+	// Is this a file with dynamic exports?
+	if otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback {
+		return importTracker{sourceIndex: otherSourceIndex, importRef: otherRepr.AST.ExportsRef}, importDynamicFallback, nil
+	}
+
+	// Missing re-exports in TypeScript files are indistinguishable from types
+	if file.InputFile.Loader.IsTypeScript() && namedImport.IsExported {
+		return importTracker{}, importProbablyTypeScriptType, nil
+	}
+
+	return importTracker{sourceIndex: otherSourceIndex}, importNoMatch, nil
+}
+
+func (c *linkerContext) treeShakingAndCodeSplitting() {
+	// Tree shaking: Each entry point marks all files reachable from itself
+	c.timer.Begin("Tree shaking")
+	for _, entryPoint := range c.graph.EntryPoints() {
+		c.markFileLiveForTreeShaking(entryPoint.SourceIndex)
+	}
+	c.timer.End("Tree shaking")
+
+	// Code splitting: Determine which entry points can reach which files. This
+	// has to happen after tree shaking because there is an implicit dependency
+	// between live parts within the same file. All liveness has to be computed
+	// first before determining which entry points can reach which files.
+	c.timer.Begin("Code splitting")
+	for i, entryPoint := range c.graph.EntryPoints() {
+		c.markFileReachableForCodeSplitting(entryPoint.SourceIndex, uint(i), 0)
+	}
+	c.timer.End("Code splitting")
+}
+
+func (c *linkerContext) markFileReachableForCodeSplitting(sourceIndex uint32, entryPointBit uint, distanceFromEntryPoint uint32) {
+	file := &c.graph.Files[sourceIndex]
+	if !file.IsLive {
+		return
+	}
+	traverseAgain := false
+
+	// Track the minimum distance to an entry point
+	if distanceFromEntryPoint < file.DistanceFromEntryPoint {
+		file.DistanceFromEntryPoint = distanceFromEntryPoint
+		traverseAgain = true
+	}
+	distanceFromEntryPoint++
+
+	// Don't mark this file more than once
+	if file.EntryBits.HasBit(entryPointBit) && !traverseAgain {
+		return
+	}
+	file.EntryBits.SetBit(entryPointBit)
+
+	switch repr := file.InputFile.Repr.(type) {
+	case *graph.JSRepr:
+		// If the JavaScript stub for a CSS file is included, also include the CSS file
+		if repr.CSSSourceIndex.IsValid() {
+			c.markFileReachableForCodeSplitting(repr.CSSSourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint)
+		}
+
+		// Traverse into all imported files
+		for _, record := range repr.AST.ImportRecords {
+			if record.SourceIndex.IsValid() && !c.isExternalDynamicImport(&record, sourceIndex) {
+				c.markFileReachableForCodeSplitting(record.SourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint)
+			}
+		}
+
+		// Traverse into all dependencies of all parts in this file
+		for _, part := range repr.AST.Parts {
+			for _, dependency := range part.Dependencies {
+				if dependency.SourceIndex != sourceIndex {
+					c.markFileReachableForCodeSplitting(dependency.SourceIndex, entryPointBit, distanceFromEntryPoint)
+				}
+			}
+		}
+
+	case *graph.CSSRepr:
+		// Traverse into all dependencies
+		for _, record := range repr.AST.ImportRecords {
+			if record.SourceIndex.IsValid() {
+				c.markFileReachableForCodeSplitting(record.SourceIndex.GetIndex(), entryPointBit, distanceFromEntryPoint)
+			}
+		}
+	}
+}
+
+func (c *linkerContext) markFileLiveForTreeShaking(sourceIndex uint32) {
+	file := &c.graph.Files[sourceIndex]
+
+	// Don't mark this file more than once
+	if file.IsLive {
+		return
+	}
+	file.IsLive = true
+
+	switch repr := file.InputFile.Repr.(type) {
+	case *graph.JSRepr:
+		// If the JavaScript stub for a CSS file is included, also include the CSS file
+		if repr.CSSSourceIndex.IsValid() {
+			c.markFileLiveForTreeShaking(repr.CSSSourceIndex.GetIndex())
+		}
+
+		for partIndex, part := range repr.AST.Parts {
+			canBeRemovedIfUnused := part.CanBeRemovedIfUnused
+
+			// Also include any statement-level imports
+			for _, importRecordIndex := range part.ImportRecordIndices {
+				record := &repr.AST.ImportRecords[importRecordIndex]
+				if record.Kind != ast.ImportStmt {
+					continue
+				}
+
+				if record.SourceIndex.IsValid() {
+					otherSourceIndex := record.SourceIndex.GetIndex()
+
+					// Don't include this module for its side effects if it can be
+					// considered to have no side effects
+					if otherFile := &c.graph.Files[otherSourceIndex]; otherFile.InputFile.SideEffects.Kind != graph.HasSideEffects && !c.options.IgnoreDCEAnnotations {
+						continue
+					}
+
+					// Otherwise, include this module for its side effects
+					c.markFileLiveForTreeShaking(otherSourceIndex)
+				} else if record.Flags.Has(ast.IsExternalWithoutSideEffects) {
+					// This can be removed if it's unused
+					continue
+				}
+
+				// If we get here then the import was included for its side effects, so
+				// we must also keep this part
+				canBeRemovedIfUnused = false
+			}
+
+			// Include all parts in this file with side effects, or just include
+			// everything if tree-shaking is disabled. Note that we still want to
+			// perform tree-shaking on the runtime even if tree-shaking is disabled.
+			if !canBeRemovedIfUnused || (!part.ForceTreeShaking && !c.options.TreeShaking && file.IsEntryPoint()) {
+				c.markPartLiveForTreeShaking(sourceIndex, uint32(partIndex))
+			}
+		}
+
+	case *graph.CSSRepr:
+		// Include all "@import" rules
+		for _, record := range repr.AST.ImportRecords {
+			if record.SourceIndex.IsValid() {
+				c.markFileLiveForTreeShaking(record.SourceIndex.GetIndex())
+			}
+		}
+	}
+}
+
+func (c *linkerContext) isExternalDynamicImport(record *ast.ImportRecord, sourceIndex uint32) bool {
+	return c.options.CodeSplitting &&
+		record.Kind == ast.ImportDynamic &&
+		c.graph.Files[record.SourceIndex.GetIndex()].IsEntryPoint() &&
+		record.SourceIndex.GetIndex() != sourceIndex
+}
+
+func (c *linkerContext) markPartLiveForTreeShaking(sourceIndex uint32, partIndex uint32) {
+	file := &c.graph.Files[sourceIndex]
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+	part := &repr.AST.Parts[partIndex]
+
+	// Don't mark this part more than once
+	if part.IsLive {
+		return
+	}
+	part.IsLive = true
+
+	// Include the file containing this part
+	c.markFileLiveForTreeShaking(sourceIndex)
+
+	// Also include any dependencies
+	for _, dep := range part.Dependencies {
+		c.markPartLiveForTreeShaking(dep.SourceIndex, dep.PartIndex)
+	}
+}
+
+// JavaScript modules are traversed in depth-first postorder. This is the
+// order that JavaScript modules were evaluated in before the top-level await
+// feature was introduced.
+//
+//	  A
+//	 / \
+//	B   C
+//	 \ /
+//	  D
+//
+// If A imports B and then C, B imports D, and C imports D, then the JavaScript
+// traversal order is D B C A.
+//
+// This function may deviate from ESM import order for dynamic imports (both
+// "require()" and "import()"). This is because the import order is impossible
+// to determine since the imports happen at run-time instead of compile-time.
+// In this case we just pick an arbitrary but consistent order.
+func (c *linkerContext) findImportedCSSFilesInJSOrder(entryPoint uint32) (order []uint32) {
+	visited := make(map[uint32]bool)
+	var visit func(uint32)
+
+	// Include this file and all files it imports
+	visit = func(sourceIndex uint32) {
+		if visited[sourceIndex] {
+			return
+		}
+		visited[sourceIndex] = true
+		file := &c.graph.Files[sourceIndex]
+		repr := file.InputFile.Repr.(*graph.JSRepr)
+
+		// Iterate over each part in the file in order
+		for _, part := range repr.AST.Parts {
+			// Traverse any files imported by this part. Note that CommonJS calls
+			// to "require()" count as imports too, sort of as if the part has an
+			// ESM "import" statement in it. This may seem weird because ESM imports
+			// are a compile-time concept while CommonJS imports are a run-time
+			// concept. But we don't want to manipulate <style> tags at run-time so
+			// this is the only way to do it.
+			for _, importRecordIndex := range part.ImportRecordIndices {
+				if record := &repr.AST.ImportRecords[importRecordIndex]; record.SourceIndex.IsValid() {
+					visit(record.SourceIndex.GetIndex())
+				}
+			}
+		}
+
+		// Iterate over the associated CSS imports in postorder
+		if repr.CSSSourceIndex.IsValid() {
+			order = append(order, repr.CSSSourceIndex.GetIndex())
+		}
+	}
+
+	// Include all files reachable from the entry point
+	visit(entryPoint)
+
+	return
+}
+
+type cssImportKind uint8
+
+const (
+	cssImportNone cssImportKind = iota
+	cssImportSourceIndex
+	cssImportExternalPath
+	cssImportLayers
+)
+
+type cssImportOrder struct {
+	conditions             []css_ast.ImportConditions
+	conditionImportRecords []ast.ImportRecord
+
+	layers       [][]string  // kind == cssImportAtLayer
+	externalPath logger.Path // kind == cssImportExternal
+	sourceIndex  uint32      // kind == cssImportSourceIndex
+
+	kind cssImportKind
+}
+
+// CSS files are traversed in depth-first postorder just like JavaScript. But
+// unlike JavaScript import statements, CSS "@import" rules are evaluated every
+// time instead of just the first time.
+//
+//	  A
+//	 / \
+//	B   C
+//	 \ /
+//	  D
+//
+// If A imports B and then C, B imports D, and C imports D, then the CSS
+// traversal order is D B D C A.
+//
+// However, evaluating a CSS file multiple times is sort of equivalent to
+// evaluating it once at the last location. So we basically drop all but the
+// last evaluation in the order.
+//
+// The only exception to this is "@layer". Evaluating a CSS file multiple
+// times is sort of equivalent to evaluating it once at the first location
+// as far as "@layer" is concerned. So we may in some cases keep both the
+// first and last locations and only write out the "@layer" information
+// for the first location.
+func (c *linkerContext) findImportedFilesInCSSOrder(entryPoints []uint32) (order []cssImportOrder) {
+	var visit func(uint32, []uint32, []css_ast.ImportConditions, []ast.ImportRecord)
+	hasExternalImport := false
+
+	// Include this file and all files it imports
+	visit = func(
+		sourceIndex uint32,
+		visited []uint32,
+		wrappingConditions []css_ast.ImportConditions,
+		wrappingImportRecords []ast.ImportRecord,
+	) {
+		// The CSS specification strangely does not describe what to do when there
+		// is a cycle. So we are left with reverse-engineering the behavior from a
+		// real browser. Here's what the WebKit code base has to say about this:
+		//
+		//   "Check for a cycle in our import chain. If we encounter a stylesheet
+		//   in our parent chain with the same URL, then just bail."
+		//
+		// So that's what we do here. See "StyleRuleImport::requestStyleSheet()" in
+		// WebKit for more information.
+		for _, visitedSourceIndex := range visited {
+			if visitedSourceIndex == sourceIndex {
+				return
+			}
+		}
+		visited = append(visited, sourceIndex)
+
+		repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.CSSRepr)
+		topLevelRules := repr.AST.Rules
+
+		// Any pre-import layers come first
+		if len(repr.AST.LayersPreImport) > 0 {
+			order = append(order, cssImportOrder{
+				kind:                   cssImportLayers,
+				layers:                 repr.AST.LayersPreImport,
+				conditions:             wrappingConditions,
+				conditionImportRecords: wrappingImportRecords,
+			})
+		}
+
+		// Iterate over the top-level "@import" rules
+		for _, rule := range topLevelRules {
+			if atImport, ok := rule.Data.(*css_ast.RAtImport); ok {
+				record := &repr.AST.ImportRecords[atImport.ImportRecordIndex]
+
+				// Follow internal dependencies
+				if record.SourceIndex.IsValid() {
+					nestedConditions := wrappingConditions
+					nestedImportRecords := wrappingImportRecords
+
+					// If this import has conditions, fork our state so that the entire
+					// imported stylesheet subtree is wrapped in all of the conditions
+					if atImport.ImportConditions != nil {
+						// Fork our state
+						nestedConditions = append([]css_ast.ImportConditions{}, nestedConditions...)
+						nestedImportRecords = append([]ast.ImportRecord{}, nestedImportRecords...)
+
+						// Clone these import conditions and append them to the state
+						var conditions css_ast.ImportConditions
+						conditions, nestedImportRecords = atImport.ImportConditions.CloneWithImportRecords(repr.AST.ImportRecords, nestedImportRecords)
+						nestedConditions = append(nestedConditions, conditions)
+					}
+
+					visit(record.SourceIndex.GetIndex(), visited, nestedConditions, nestedImportRecords)
+					continue
+				}
+
+				// Record external dependencies
+				if (record.Flags & ast.WasLoadedWithEmptyLoader) == 0 {
+					allConditions := wrappingConditions
+					allImportRecords := wrappingImportRecords
+
+					// If this import has conditions, append it to the list of overall
+					// conditions for this external import. Note that an external import
+					// may actually have multiple sets of conditions that can't be
+					// merged. When this happens we need to generate a nested imported
+					// CSS file using a data URL.
+					if atImport.ImportConditions != nil {
+						var conditions css_ast.ImportConditions
+						allConditions = append([]css_ast.ImportConditions{}, allConditions...)
+						allImportRecords = append([]ast.ImportRecord{}, allImportRecords...)
+						conditions, allImportRecords = atImport.ImportConditions.CloneWithImportRecords(repr.AST.ImportRecords, allImportRecords)
+						allConditions = append(allConditions, conditions)
+					}
+
+					order = append(order, cssImportOrder{
+						kind:                   cssImportExternalPath,
+						externalPath:           record.Path,
+						conditions:             allConditions,
+						conditionImportRecords: allImportRecords,
+					})
+					hasExternalImport = true
+				}
+			}
+		}
+
+		// Iterate over the "composes" directives. Note that the order doesn't
+		// matter for these because the output order is explicitly undefined
+		// in the specification.
+		for _, record := range repr.AST.ImportRecords {
+			if record.Kind == ast.ImportComposesFrom && record.SourceIndex.IsValid() {
+				visit(record.SourceIndex.GetIndex(), visited, wrappingConditions, wrappingImportRecords)
+			}
+		}
+
+		// Accumulate imports in depth-first postorder
+		order = append(order, cssImportOrder{
+			kind:                   cssImportSourceIndex,
+			sourceIndex:            sourceIndex,
+			conditions:             wrappingConditions,
+			conditionImportRecords: wrappingImportRecords,
+		})
+	}
+
+	// Include all files reachable from any entry point
+	var visited [16]uint32 // Preallocate some space for the visited set
+	for _, sourceIndex := range entryPoints {
+		visit(sourceIndex, visited[:], nil, nil)
+	}
+
+	// Create a temporary array that we can use for filtering
+	wipOrder := make([]cssImportOrder, 0, len(order))
+
+	// CSS syntax unfortunately only allows "@import" rules at the top of the
+	// file. This means we must hoist all external "@import" rules to the top of
+	// the file when bundling, even though doing so will change the order of CSS
+	// evaluation.
+	if hasExternalImport {
+		// Pass 1: Pull out leading "@layer" and external "@import" rules
+		isAtLayerPrefix := true
+		for _, entry := range order {
+			if (entry.kind == cssImportLayers && isAtLayerPrefix) || entry.kind == cssImportExternalPath {
+				wipOrder = append(wipOrder, entry)
+			}
+			if entry.kind != cssImportLayers {
+				isAtLayerPrefix = false
+			}
+		}
+
+		// Pass 2: Append everything that we didn't pull out in pass 1
+		isAtLayerPrefix = true
+		for _, entry := range order {
+			if (entry.kind != cssImportLayers || !isAtLayerPrefix) && entry.kind != cssImportExternalPath {
+				wipOrder = append(wipOrder, entry)
+			}
+			if entry.kind != cssImportLayers {
+				isAtLayerPrefix = false
+			}
+		}
+
+		order, wipOrder = wipOrder, order[:0]
+	}
+
+	// Next, optimize import order. If there are duplicate copies of an imported
+	// file, replace all but the last copy with just the layers that are in that
+	// file. This works because in CSS, the last instance of a declaration
+	// overrides all previous instances of that declaration.
+	{
+		sourceIndexDuplicates := make(map[uint32][]int)
+		externalPathDuplicates := make(map[logger.Path][]int)
+
+	nextBackward:
+		for i := len(order) - 1; i >= 0; i-- {
+			entry := order[i]
+			switch entry.kind {
+			case cssImportSourceIndex:
+				duplicates := sourceIndexDuplicates[entry.sourceIndex]
+				for _, j := range duplicates {
+					if isConditionalImportRedundant(entry.conditions, order[j].conditions) {
+						order[i].kind = cssImportLayers
+						order[i].layers = c.graph.Files[entry.sourceIndex].InputFile.Repr.(*graph.CSSRepr).AST.LayersPostImport
+						continue nextBackward
+					}
+				}
+				sourceIndexDuplicates[entry.sourceIndex] = append(duplicates, i)
+
+			case cssImportExternalPath:
+				duplicates := externalPathDuplicates[entry.externalPath]
+				for _, j := range duplicates {
+					if isConditionalImportRedundant(entry.conditions, order[j].conditions) {
+						// Don't remove duplicates entirely. The import conditions may
+						// still introduce layers to the layer order. Represent this as a
+						// file with an empty layer list.
+						order[i].kind = cssImportLayers
+						continue nextBackward
+					}
+				}
+				externalPathDuplicates[entry.externalPath] = append(duplicates, i)
+			}
+		}
+	}
+
+	// Then optimize "@layer" rules by removing redundant ones. This loop goes
+	// forward instead of backward because "@layer" takes effect at the first
+	// copy instead of the last copy like other things in CSS.
+	{
+		type duplicateEntry struct {
+			layers  [][]string
+			indices []int
+		}
+		var layerDuplicates []duplicateEntry
+
+	nextForward:
+		for i := range order {
+			entry := order[i]
+
+			// Simplify the conditions since we know they only wrap "@layer"
+			if entry.kind == cssImportLayers {
+				// Truncate the conditions at the first anonymous layer
+				for i, conditions := range entry.conditions {
+					// The layer is anonymous if it's a "layer" token without any
+					// children instead of a "layer(...)" token with children:
+					//
+					//   /* entry.css */
+					//   @import "foo.css" layer;
+					//
+					//   /* foo.css */
+					//   @layer foo;
+					//
+					// We don't need to generate this (as far as I can tell):
+					//
+					//   @layer {
+					//     @layer foo;
+					//   }
+					//
+					if conditions.Layers != nil && len(conditions.Layers) == 1 && conditions.Layers[0].Children == nil {
+						entry.conditions = entry.conditions[:i]
+						entry.layers = nil
+						break
+					}
+				}
+
+				// If there are no layer names for this file, trim all conditions
+				// without layers because we know they have no effect.
+				//
+				//   /* entry.css */
+				//   @import "foo.css" layer(foo) supports(display: flex);
+				//
+				//   /* foo.css */
+				//   @import "empty.css" supports(display: grid);
+				//
+				// That would result in this:
+				//
+				//   @supports (display: flex) {
+				//     @layer foo {
+				//       @supports (display: grid) {}
+				//     }
+				//   }
+				//
+				// Here we can trim "supports(display: grid)" to generate this:
+				//
+				//   @supports (display: flex) {
+				//     @layer foo;
+				//   }
+				//
+				if len(entry.layers) == 0 {
+					for i := len(entry.conditions) - 1; i >= 0; i-- {
+						if len(entry.conditions[i].Layers) > 0 {
+							break
+						}
+						entry.conditions = entry.conditions[:i]
+					}
+				}
+
+				// Remove unnecessary entries entirely
+				if len(entry.conditions) == 0 && len(entry.layers) == 0 {
+					continue
+				}
+			}
+
+			// Omit redundant "@layer" rules with the same set of layer names. Note
+			// that this tests all import order entries (not just layer ones) because
+			// sometimes non-layer ones can make following layer ones redundant.
+			layersKey := entry.layers
+			if entry.kind == cssImportSourceIndex {
+				layersKey = c.graph.Files[entry.sourceIndex].InputFile.Repr.(*graph.CSSRepr).AST.LayersPostImport
+			}
+			index := 0
+			for index < len(layerDuplicates) {
+				if helpers.StringArrayArraysEqual(layersKey, layerDuplicates[index].layers) {
+					break
+				}
+				index++
+			}
+			if index == len(layerDuplicates) {
+				// This is the first time we've seen this combination of layer names.
+				// Allocate a new set of duplicate indices to track this combination.
+				layerDuplicates = append(layerDuplicates, duplicateEntry{layers: layersKey})
+			}
+			duplicates := layerDuplicates[index].indices
+			for j := len(duplicates) - 1; j >= 0; j-- {
+				if index := duplicates[j]; isConditionalImportRedundant(entry.conditions, wipOrder[index].conditions) {
+					if entry.kind != cssImportLayers {
+						// If an empty layer is followed immediately by a full layer and
+						// everything else is identical, then we don't need to emit the
+						// empty layer. For example:
+						//
+						//   @media screen {
+						//     @supports (display: grid) {
+						//       @layer foo;
+						//     }
+						//   }
+						//   @media screen {
+						//     @supports (display: grid) {
+						//       @layer foo {
+						//         div {
+						//           color: red;
+						//         }
+						//       }
+						//     }
+						//   }
+						//
+						// This can be improved by dropping the empty layer. But we can
+						// only do this if there's nothing in between these two rules.
+						if j == len(duplicates)-1 && index == len(wipOrder)-1 {
+							if other := wipOrder[index]; other.kind == cssImportLayers && importConditionsAreEqual(entry.conditions, other.conditions) {
+								// Remove the previous entry and then overwrite it below
+								duplicates = duplicates[:j]
+								wipOrder = wipOrder[:index]
+								break
+							}
+						}
+
+						// Non-layer entries still need to be present because they have
+						// other side effects beside inserting things in the layer order
+						wipOrder = append(wipOrder, entry)
+					}
+
+					// Don't add this to the duplicate list below because it's redundant
+					continue nextForward
+				}
+			}
+			layerDuplicates[index].indices = append(duplicates, len(wipOrder))
+			wipOrder = append(wipOrder, entry)
+		}
+
+		order, wipOrder = wipOrder, order[:0]
+	}
+
+	// Finally, merge adjacent "@layer" rules with identical conditions together.
+	{
+		didClone := -1
+		for _, entry := range order {
+			if entry.kind == cssImportLayers && len(wipOrder) > 0 {
+				prevIndex := len(wipOrder) - 1
+				prev := wipOrder[prevIndex]
+				if prev.kind == cssImportLayers && importConditionsAreEqual(prev.conditions, entry.conditions) {
+					if didClone != prevIndex {
+						didClone = prevIndex
+						prev.layers = append([][]string{}, prev.layers...)
+					}
+					wipOrder[prevIndex].layers = append(prev.layers, entry.layers...)
+					continue
+				}
+			}
+			wipOrder = append(wipOrder, entry)
+		}
+		order = wipOrder
+	}
+
+	return
+}
+
+func importConditionsAreEqual(a []css_ast.ImportConditions, b []css_ast.ImportConditions) bool {
+	if len(a) != len(b) {
+		return false
+	}
+	for i := 0; i < len(a); i++ {
+		ai := a[i]
+		bi := b[i]
+		if !css_ast.TokensEqualIgnoringWhitespace(ai.Layers, bi.Layers) ||
+			!css_ast.TokensEqualIgnoringWhitespace(ai.Supports, bi.Supports) ||
+			!css_ast.TokensEqualIgnoringWhitespace(ai.Media, bi.Media) {
+			return false
+		}
+	}
+	return true
+}
+
+// Given two "@import" rules for the same source index (an earlier one and a
+// later one), the earlier one is masked by the later one if the later one's
+// condition list is a prefix of the earlier one's condition list.
+//
+// For example:
+//
+//	// entry.css
+//	@import "foo.css" supports(display: flex);
+//	@import "bar.css" supports(display: flex);
+//
+//	// foo.css
+//	@import "lib.css" screen;
+//
+//	// bar.css
+//	@import "lib.css";
+//
+// When we bundle this code we'll get an import order as follows:
+//
+//  1. lib.css [supports(display: flex), screen]
+//  2. foo.css [supports(display: flex)]
+//  3. lib.css [supports(display: flex)]
+//  4. bar.css [supports(display: flex)]
+//  5. entry.css []
+//
+// For "lib.css", the entry with the conditions [supports(display: flex)] should
+// make the entry with the conditions [supports(display: flex), screen] redundant.
+//
+// Note that all of this deliberately ignores the existence of "@layer" because
+// that is handled separately. All of this is only for handling unlayered styles.
+func isConditionalImportRedundant(earlier []css_ast.ImportConditions, later []css_ast.ImportConditions) bool {
+	if len(later) > len(earlier) {
+		return false
+	}
+
+	for i := 0; i < len(later); i++ {
+		a := earlier[i]
+		b := later[i]
+
+		// Only compare "@supports" and "@media" if "@layers" is equal
+		if css_ast.TokensEqualIgnoringWhitespace(a.Layers, b.Layers) {
+			sameSupports := css_ast.TokensEqualIgnoringWhitespace(a.Supports, b.Supports)
+			sameMedia := css_ast.TokensEqualIgnoringWhitespace(a.Media, b.Media)
+
+			// If the import conditions are exactly equal, then only keep
+			// the later one. The earlier one is redundant. Example:
+			//
+			//   @import "foo.css" layer(abc) supports(display: flex) screen;
+			//   @import "foo.css" layer(abc) supports(display: flex) screen;
+			//
+			// The later one makes the earlier one redundant.
+			if sameSupports && sameMedia {
+				continue
+			}
+
+			// If the media conditions are exactly equal and the later one
+			// doesn't have any supports conditions, then the later one will
+			// apply in all cases where the earlier one applies. Example:
+			//
+			//   @import "foo.css" layer(abc) supports(display: flex) screen;
+			//   @import "foo.css" layer(abc) screen;
+			//
+			// The later one makes the earlier one redundant.
+			if sameMedia && len(b.Supports) == 0 {
+				continue
+			}
+
+			// If the supports conditions are exactly equal and the later one
+			// doesn't have any media conditions, then the later one will
+			// apply in all cases where the earlier one applies. Example:
+			//
+			//   @import "foo.css" layer(abc) supports(display: flex) screen;
+			//   @import "foo.css" layer(abc) supports(display: flex);
+			//
+			// The later one makes the earlier one redundant.
+			if sameSupports && len(b.Media) == 0 {
+				continue
+			}
+		}
+
+		return false
+	}
+
+	return true
+}
+
+func (c *linkerContext) computeChunks() {
+	c.timer.Begin("Compute chunks")
+	defer c.timer.End("Compute chunks")
+
+	jsChunks := make(map[string]chunkInfo)
+	cssChunks := make(map[string]chunkInfo)
+
+	// Create chunks for entry points
+	for i, entryPoint := range c.graph.EntryPoints() {
+		file := &c.graph.Files[entryPoint.SourceIndex]
+
+		// Create a chunk for the entry point here to ensure that the chunk is
+		// always generated even if the resulting file is empty
+		entryBits := helpers.NewBitSet(uint(len(c.graph.EntryPoints())))
+		entryBits.SetBit(uint(i))
+		key := entryBits.String()
+		chunk := chunkInfo{
+			entryBits:             entryBits,
+			isEntryPoint:          true,
+			sourceIndex:           entryPoint.SourceIndex,
+			entryPointBit:         uint(i),
+			filesWithPartsInChunk: make(map[uint32]bool),
+		}
+
+		switch file.InputFile.Repr.(type) {
+		case *graph.JSRepr:
+			chunkRepr := &chunkReprJS{}
+			chunk.chunkRepr = chunkRepr
+			jsChunks[key] = chunk
+
+			// If this JS entry point has an associated CSS entry point, generate it
+			// now. This is essentially done by generating a virtual CSS file that
+			// only contains "@import" statements in the order that the files were
+			// discovered in JS source order, where JS source order is arbitrary but
+			// consistent for dynamic imports. Then we run the CSS import order
+			// algorithm to determine the final CSS file order for the chunk.
+
+			if cssSourceIndices := c.findImportedCSSFilesInJSOrder(entryPoint.SourceIndex); len(cssSourceIndices) > 0 {
+				order := c.findImportedFilesInCSSOrder(cssSourceIndices)
+				cssFilesWithPartsInChunk := make(map[uint32]bool)
+				for _, entry := range order {
+					if entry.kind == cssImportSourceIndex {
+						cssFilesWithPartsInChunk[uint32(entry.sourceIndex)] = true
+					}
+				}
+				cssChunks[key] = chunkInfo{
+					entryBits:             entryBits,
+					isEntryPoint:          true,
+					sourceIndex:           entryPoint.SourceIndex,
+					entryPointBit:         uint(i),
+					filesWithPartsInChunk: cssFilesWithPartsInChunk,
+					chunkRepr: &chunkReprCSS{
+						importsInChunkInOrder: order,
+					},
+				}
+				chunkRepr.hasCSSChunk = true
+			}
+
+		case *graph.CSSRepr:
+			order := c.findImportedFilesInCSSOrder([]uint32{entryPoint.SourceIndex})
+			for _, entry := range order {
+				if entry.kind == cssImportSourceIndex {
+					chunk.filesWithPartsInChunk[uint32(entry.sourceIndex)] = true
+				}
+			}
+			chunk.chunkRepr = &chunkReprCSS{
+				importsInChunkInOrder: order,
+			}
+			cssChunks[key] = chunk
+		}
+	}
+
+	// Figure out which JS files are in which chunk
+	for _, sourceIndex := range c.graph.ReachableFiles {
+		if file := &c.graph.Files[sourceIndex]; file.IsLive {
+			if _, ok := file.InputFile.Repr.(*graph.JSRepr); ok {
+				key := file.EntryBits.String()
+				chunk, ok := jsChunks[key]
+				if !ok {
+					chunk.entryBits = file.EntryBits
+					chunk.filesWithPartsInChunk = make(map[uint32]bool)
+					chunk.chunkRepr = &chunkReprJS{}
+					jsChunks[key] = chunk
+				}
+				chunk.filesWithPartsInChunk[uint32(sourceIndex)] = true
+			}
+		}
+	}
+
+	// Sort the chunks for determinism. This matters because we use chunk indices
+	// as sorting keys in a few places.
+	sortedChunks := make([]chunkInfo, 0, len(jsChunks)+len(cssChunks))
+	sortedKeys := make([]string, 0, len(jsChunks)+len(cssChunks))
+	for key := range jsChunks {
+		sortedKeys = append(sortedKeys, key)
+	}
+	sort.Strings(sortedKeys)
+	jsChunkIndicesForCSS := make(map[string]uint32)
+	for _, key := range sortedKeys {
+		chunk := jsChunks[key]
+		if chunk.chunkRepr.(*chunkReprJS).hasCSSChunk {
+			jsChunkIndicesForCSS[key] = uint32(len(sortedChunks))
+		}
+		sortedChunks = append(sortedChunks, chunk)
+	}
+	sortedKeys = sortedKeys[:0]
+	for key := range cssChunks {
+		sortedKeys = append(sortedKeys, key)
+	}
+	sort.Strings(sortedKeys)
+	for _, key := range sortedKeys {
+		chunk := cssChunks[key]
+		if jsChunkIndex, ok := jsChunkIndicesForCSS[key]; ok {
+			sortedChunks[jsChunkIndex].chunkRepr.(*chunkReprJS).cssChunkIndex = uint32(len(sortedChunks))
+		}
+		sortedChunks = append(sortedChunks, chunk)
+	}
+
+	// Map from the entry point file to its chunk. We will need this later if
+	// a file contains a dynamic import to this entry point, since we'll need
+	// to look up the path for this chunk to use with the import.
+	for chunkIndex, chunk := range sortedChunks {
+		if chunk.isEntryPoint {
+			file := &c.graph.Files[chunk.sourceIndex]
+
+			// JS entry points that import CSS files generate two chunks, a JS chunk
+			// and a CSS chunk. Don't link the CSS chunk to the JS file since the CSS
+			// chunk is secondary (the JS chunk is primary).
+			if _, ok := chunk.chunkRepr.(*chunkReprCSS); ok {
+				if _, ok := file.InputFile.Repr.(*graph.JSRepr); ok {
+					continue
+				}
+			}
+
+			file.EntryPointChunkIndex = uint32(chunkIndex)
+		}
+	}
+
+	// Determine the order of JS files (and parts) within the chunk ahead of time
+	for _, chunk := range sortedChunks {
+		if chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS); ok {
+			chunkRepr.filesInChunkInOrder, chunkRepr.partsInChunkInOrder = c.findImportedPartsInJSOrder(&chunk)
+		}
+	}
+
+	// Assign general information to each chunk
+	for chunkIndex := range sortedChunks {
+		chunk := &sortedChunks[chunkIndex]
+
+		// Assign a unique key to each chunk. This key encodes the index directly so
+		// we can easily recover it later without needing to look it up in a map. The
+		// last 8 numbers of the key are the chunk index.
+		chunk.uniqueKey = fmt.Sprintf("%sC%08d", c.uniqueKeyPrefix, chunkIndex)
+
+		// Determine the standard file extension
+		var stdExt string
+		switch chunk.chunkRepr.(type) {
+		case *chunkReprJS:
+			stdExt = c.options.OutputExtensionJS
+		case *chunkReprCSS:
+			stdExt = c.options.OutputExtensionCSS
+		}
+
+		// Compute the template substitutions
+		var dir, base, ext string
+		var template []config.PathTemplate
+		if chunk.isEntryPoint {
+			// Only use the entry path template for user-specified entry points
+			file := &c.graph.Files[chunk.sourceIndex]
+			if file.IsUserSpecifiedEntryPoint() {
+				template = c.options.EntryPathTemplate
+			} else {
+				template = c.options.ChunkPathTemplate
+			}
+
+			if c.options.AbsOutputFile != "" {
+				// If the output path was configured explicitly, use it verbatim
+				dir = "/"
+				base = c.fs.Base(c.options.AbsOutputFile)
+				originalExt := c.fs.Ext(base)
+				base = base[:len(base)-len(originalExt)]
+
+				// Use the extension from the explicit output file path. However, don't do
+				// that if this is a CSS chunk but the entry point file is not CSS. In that
+				// case use the standard extension. This happens when importing CSS into JS.
+				if _, ok := file.InputFile.Repr.(*graph.CSSRepr); ok || stdExt != c.options.OutputExtensionCSS {
+					ext = originalExt
+				} else {
+					ext = stdExt
+				}
+			} else {
+				// Otherwise, derive the output path from the input path
+				dir, base = bundler.PathRelativeToOutbase(
+					&c.graph.Files[chunk.sourceIndex].InputFile,
+					c.options,
+					c.fs,
+					!file.IsUserSpecifiedEntryPoint(),
+					c.graph.EntryPoints()[chunk.entryPointBit].OutputPath,
+				)
+				ext = stdExt
+			}
+		} else {
+			dir = "/"
+			base = "chunk"
+			ext = stdExt
+			template = c.options.ChunkPathTemplate
+		}
+
+		// Determine the output path template
+		templateExt := strings.TrimPrefix(ext, ".")
+		template = append(append(make([]config.PathTemplate, 0, len(template)+1), template...), config.PathTemplate{Data: ext})
+		chunk.finalTemplate = config.SubstituteTemplate(template, config.PathPlaceholders{
+			Dir:  &dir,
+			Name: &base,
+			Ext:  &templateExt,
+		})
+	}
+
+	c.chunks = sortedChunks
+}
+
+type chunkOrder struct {
+	sourceIndex uint32
+	distance    uint32
+	tieBreaker  uint32
+}
+
+// This type is just so we can use Go's native sort function
+type chunkOrderArray []chunkOrder
+
+func (a chunkOrderArray) Len() int          { return len(a) }
+func (a chunkOrderArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a chunkOrderArray) Less(i int, j int) bool {
+	ai := a[i]
+	aj := a[j]
+	return ai.distance < aj.distance || (ai.distance == aj.distance && ai.tieBreaker < aj.tieBreaker)
+}
+
+func appendOrExtendPartRange(ranges []partRange, sourceIndex uint32, partIndex uint32) []partRange {
+	if i := len(ranges) - 1; i >= 0 {
+		if r := &ranges[i]; r.sourceIndex == sourceIndex && r.partIndexEnd == partIndex {
+			r.partIndexEnd = partIndex + 1
+			return ranges
+		}
+	}
+
+	return append(ranges, partRange{
+		sourceIndex:    sourceIndex,
+		partIndexBegin: partIndex,
+		partIndexEnd:   partIndex + 1,
+	})
+}
+
+func (c *linkerContext) shouldIncludePart(repr *graph.JSRepr, part js_ast.Part) bool {
+	// As an optimization, ignore parts containing a single import statement to
+	// an internal non-wrapped file. These will be ignored anyway and it's a
+	// performance hit to spin up a goroutine only to discover this later.
+	if len(part.Stmts) == 1 {
+		if s, ok := part.Stmts[0].Data.(*js_ast.SImport); ok {
+			record := &repr.AST.ImportRecords[s.ImportRecordIndex]
+			if record.SourceIndex.IsValid() && c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr).Meta.Wrap == graph.WrapNone {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+func (c *linkerContext) findImportedPartsInJSOrder(chunk *chunkInfo) (js []uint32, jsParts []partRange) {
+	sorted := make(chunkOrderArray, 0, len(chunk.filesWithPartsInChunk))
+
+	// Attach information to the files for use with sorting
+	for sourceIndex := range chunk.filesWithPartsInChunk {
+		file := &c.graph.Files[sourceIndex]
+		sorted = append(sorted, chunkOrder{
+			sourceIndex: sourceIndex,
+			distance:    file.DistanceFromEntryPoint,
+			tieBreaker:  c.graph.StableSourceIndices[sourceIndex],
+		})
+	}
+
+	// Sort so files closest to an entry point come first. If two files are
+	// equidistant to an entry point, then break the tie by sorting on the
+	// stable source index derived from the DFS over all entry points.
+	sort.Sort(sorted)
+
+	visited := make(map[uint32]bool)
+	jsPartsPrefix := []partRange{}
+
+	// Traverse the graph using this stable order and linearize the files with
+	// dependencies before dependents
+	var visit func(uint32)
+	visit = func(sourceIndex uint32) {
+		if visited[sourceIndex] {
+			return
+		}
+
+		visited[sourceIndex] = true
+		file := &c.graph.Files[sourceIndex]
+
+		if repr, ok := file.InputFile.Repr.(*graph.JSRepr); ok {
+			isFileInThisChunk := chunk.entryBits.Equals(file.EntryBits)
+
+			// Wrapped files can't be split because they are all inside the wrapper
+			canFileBeSplit := repr.Meta.Wrap == graph.WrapNone
+
+			// Make sure the generated call to "__export(exports, ...)" comes first
+			// before anything else in this file
+			if canFileBeSplit && isFileInThisChunk && repr.AST.Parts[js_ast.NSExportPartIndex].IsLive {
+				jsParts = appendOrExtendPartRange(jsParts, sourceIndex, js_ast.NSExportPartIndex)
+			}
+
+			for partIndex, part := range repr.AST.Parts {
+				isPartInThisChunk := isFileInThisChunk && repr.AST.Parts[partIndex].IsLive
+
+				// Also traverse any files imported by this part
+				for _, importRecordIndex := range part.ImportRecordIndices {
+					record := &repr.AST.ImportRecords[importRecordIndex]
+					if record.SourceIndex.IsValid() && (record.Kind == ast.ImportStmt || isPartInThisChunk) {
+						if c.isExternalDynamicImport(record, sourceIndex) {
+							// Don't follow import() dependencies
+							continue
+						}
+						visit(record.SourceIndex.GetIndex())
+					}
+				}
+
+				// Then include this part after the files it imports
+				if isPartInThisChunk {
+					isFileInThisChunk = true
+					if canFileBeSplit && uint32(partIndex) != js_ast.NSExportPartIndex && c.shouldIncludePart(repr, part) {
+						if sourceIndex == runtime.SourceIndex {
+							jsPartsPrefix = appendOrExtendPartRange(jsPartsPrefix, sourceIndex, uint32(partIndex))
+						} else {
+							jsParts = appendOrExtendPartRange(jsParts, sourceIndex, uint32(partIndex))
+						}
+					}
+				}
+			}
+
+			if isFileInThisChunk {
+				js = append(js, sourceIndex)
+
+				// CommonJS files are all-or-nothing so all parts must be contiguous
+				if !canFileBeSplit {
+					jsPartsPrefix = append(jsPartsPrefix, partRange{
+						sourceIndex:    sourceIndex,
+						partIndexBegin: 0,
+						partIndexEnd:   uint32(len(repr.AST.Parts)),
+					})
+				}
+			}
+		}
+	}
+
+	// Always put the runtime code first before anything else
+	visit(runtime.SourceIndex)
+	for _, data := range sorted {
+		visit(data.sourceIndex)
+	}
+	jsParts = append(jsPartsPrefix, jsParts...)
+	return
+}
+
+func (c *linkerContext) shouldRemoveImportExportStmt(
+	sourceIndex uint32,
+	stmtList *stmtList,
+	loc logger.Loc,
+	namespaceRef ast.Ref,
+	importRecordIndex uint32,
+) bool {
+	repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+	record := &repr.AST.ImportRecords[importRecordIndex]
+
+	// Is this an external import?
+	if !record.SourceIndex.IsValid() {
+		// Keep the "import" statement if "import" statements are supported
+		if c.options.OutputFormat.KeepESMImportExportSyntax() {
+			return false
+		}
+
+		// Otherwise, replace this statement with a call to "require()"
+		stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{
+			Loc: loc,
+			Data: &js_ast.SLocal{Decls: []js_ast.Decl{{
+				Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: namespaceRef}},
+				ValueOrNil: js_ast.Expr{Loc: record.Range.Loc, Data: &js_ast.ERequireString{
+					ImportRecordIndex: importRecordIndex,
+				}},
+			}}},
+		})
+		return true
+	}
+
+	// We don't need a call to "require()" if this is a self-import inside a
+	// CommonJS-style module, since we can just reference the exports directly.
+	if repr.AST.ExportsKind == js_ast.ExportsCommonJS && ast.FollowSymbols(c.graph.Symbols, namespaceRef) == repr.AST.ExportsRef {
+		return true
+	}
+
+	otherFile := &c.graph.Files[record.SourceIndex.GetIndex()]
+	otherRepr := otherFile.InputFile.Repr.(*graph.JSRepr)
+	switch otherRepr.Meta.Wrap {
+	case graph.WrapNone:
+		// Remove the statement entirely if this module is not wrapped
+
+	case graph.WrapCJS:
+		// Replace the statement with a call to "require()"
+		stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{
+			Loc: loc,
+			Data: &js_ast.SLocal{Decls: []js_ast.Decl{{
+				Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: namespaceRef}},
+				ValueOrNil: js_ast.Expr{Loc: record.Range.Loc, Data: &js_ast.ERequireString{
+					ImportRecordIndex: importRecordIndex,
+				}},
+			}}},
+		})
+
+	case graph.WrapESM:
+		// Ignore this file if it's not included in the bundle. This can happen for
+		// wrapped ESM files but not for wrapped CommonJS files because we allow
+		// tree shaking inside wrapped ESM files.
+		if !otherFile.IsLive {
+			break
+		}
+
+		// Replace the statement with a call to "init()"
+		value := js_ast.Expr{Loc: loc, Data: &js_ast.ECall{Target: js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: otherRepr.AST.WrapperRef}}}}
+		if otherRepr.Meta.IsAsyncOrHasAsyncDependency {
+			// This currently evaluates sibling dependencies in serial instead of in
+			// parallel, which is incorrect. This should be changed to store a promise
+			// and await all stored promises after all imports but before any code.
+			value.Data = &js_ast.EAwait{Value: value}
+		}
+		stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{Loc: loc, Data: &js_ast.SExpr{Value: value}})
+	}
+
+	return true
+}
+
+func (c *linkerContext) convertStmtsForChunk(sourceIndex uint32, stmtList *stmtList, partStmts []js_ast.Stmt) {
+	file := &c.graph.Files[sourceIndex]
+	shouldStripExports := c.options.Mode != config.ModePassThrough || !file.IsEntryPoint()
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+	shouldExtractESMStmtsForWrap := repr.Meta.Wrap != graph.WrapNone
+
+	// If this file is a CommonJS entry point, double-write re-exports to the
+	// external CommonJS "module.exports" object in addition to our internal ESM
+	// export namespace object. The difference between these two objects is that
+	// our internal one must not have the "__esModule" marker while the external
+	// one must have the "__esModule" marker. This is done because an ES module
+	// importing itself should not see the "__esModule" marker but a CommonJS module
+	// importing us should see the "__esModule" marker.
+	var moduleExportsForReExportOrNil js_ast.Expr
+	if c.options.OutputFormat == config.FormatCommonJS && file.IsEntryPoint() {
+		moduleExportsForReExportOrNil = js_ast.Expr{Data: &js_ast.EDot{
+			Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: c.unboundModuleRef}},
+			Name:   "exports",
+		}}
+	}
+
+	for _, stmt := range partStmts {
+		switch s := stmt.Data.(type) {
+		case *js_ast.SImport:
+			// "import * as ns from 'path'"
+			// "import {foo} from 'path'"
+			if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) {
+				continue
+			}
+
+			if c.options.UnsupportedJSFeatures.Has(compat.ArbitraryModuleNamespaceNames) && s.Items != nil {
+				for _, item := range *s.Items {
+					c.maybeForbidArbitraryModuleNamespaceIdentifier("import", sourceIndex, item.AliasLoc, item.Alias)
+				}
+			}
+
+			// Make sure these don't end up in the wrapper closure
+			if shouldExtractESMStmtsForWrap {
+				stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt)
+				continue
+			}
+
+		case *js_ast.SExportStar:
+			// "export * as ns from 'path'"
+			if s.Alias != nil {
+				if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) {
+					continue
+				}
+
+				if c.options.UnsupportedJSFeatures.Has(compat.ArbitraryModuleNamespaceNames) {
+					c.maybeForbidArbitraryModuleNamespaceIdentifier("export", sourceIndex, s.Alias.Loc, s.Alias.OriginalName)
+				}
+
+				if shouldStripExports {
+					// Turn this statement into "import * as ns from 'path'"
+					stmt.Data = &js_ast.SImport{
+						NamespaceRef:      s.NamespaceRef,
+						StarNameLoc:       &s.Alias.Loc,
+						ImportRecordIndex: s.ImportRecordIndex,
+					}
+				}
+
+				// Make sure these don't end up in the wrapper closure
+				if shouldExtractESMStmtsForWrap {
+					stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt)
+					continue
+				}
+				break
+			}
+
+			// "export * from 'path'"
+			if !shouldStripExports {
+				break
+			}
+			record := &repr.AST.ImportRecords[s.ImportRecordIndex]
+
+			// Is this export star evaluated at run time?
+			if !record.SourceIndex.IsValid() && c.options.OutputFormat.KeepESMImportExportSyntax() {
+				if record.Flags.Has(ast.CallsRunTimeReExportFn) {
+					// Turn this statement into "import * as ns from 'path'"
+					stmt.Data = &js_ast.SImport{
+						NamespaceRef:      s.NamespaceRef,
+						StarNameLoc:       &logger.Loc{Start: stmt.Loc.Start},
+						ImportRecordIndex: s.ImportRecordIndex,
+					}
+
+					// Prefix this module with "__reExport(exports, ns, module.exports)"
+					exportStarRef := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope.Members["__reExport"].Ref
+					args := []js_ast.Expr{
+						{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}},
+						{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: s.NamespaceRef}},
+					}
+					if moduleExportsForReExportOrNil.Data != nil {
+						args = append(args, moduleExportsForReExportOrNil)
+					}
+					stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{
+						Loc: stmt.Loc,
+						Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.ECall{
+							Target: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: exportStarRef}},
+							Args:   args,
+						}}},
+					})
+
+					// Make sure these don't end up in the wrapper closure
+					if shouldExtractESMStmtsForWrap {
+						stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt)
+						continue
+					}
+				}
+			} else {
+				if record.SourceIndex.IsValid() {
+					if otherRepr := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr); otherRepr.Meta.Wrap == graph.WrapESM {
+						stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{Loc: stmt.Loc,
+							Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.ECall{
+								Target: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: otherRepr.AST.WrapperRef}}}}}})
+					}
+				}
+
+				if record.Flags.Has(ast.CallsRunTimeReExportFn) {
+					var target js_ast.E
+					if record.SourceIndex.IsValid() {
+						if otherRepr := c.graph.Files[record.SourceIndex.GetIndex()].InputFile.Repr.(*graph.JSRepr); otherRepr.AST.ExportsKind == js_ast.ExportsESMWithDynamicFallback {
+							// Prefix this module with "__reExport(exports, otherExports, module.exports)"
+							target = &js_ast.EIdentifier{Ref: otherRepr.AST.ExportsRef}
+						}
+					}
+					if target == nil {
+						// Prefix this module with "__reExport(exports, require(path), module.exports)"
+						target = &js_ast.ERequireString{
+							ImportRecordIndex: s.ImportRecordIndex,
+						}
+					}
+					exportStarRef := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope.Members["__reExport"].Ref
+					args := []js_ast.Expr{
+						{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}},
+						{Loc: record.Range.Loc, Data: target},
+					}
+					if moduleExportsForReExportOrNil.Data != nil {
+						args = append(args, moduleExportsForReExportOrNil)
+					}
+					stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{
+						Loc: stmt.Loc,
+						Data: &js_ast.SExpr{Value: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.ECall{
+							Target: js_ast.Expr{Loc: stmt.Loc, Data: &js_ast.EIdentifier{Ref: exportStarRef}},
+							Args:   args,
+						}}},
+					})
+				}
+
+				// Remove the export star statement
+				continue
+			}
+
+		case *js_ast.SExportFrom:
+			// "export {foo} from 'path'"
+			if c.shouldRemoveImportExportStmt(sourceIndex, stmtList, stmt.Loc, s.NamespaceRef, s.ImportRecordIndex) {
+				continue
+			}
+
+			if c.options.UnsupportedJSFeatures.Has(compat.ArbitraryModuleNamespaceNames) {
+				for _, item := range s.Items {
+					c.maybeForbidArbitraryModuleNamespaceIdentifier("export", sourceIndex, item.AliasLoc, item.Alias)
+					if item.AliasLoc != item.Name.Loc {
+						c.maybeForbidArbitraryModuleNamespaceIdentifier("import", sourceIndex, item.Name.Loc, item.OriginalName)
+					}
+				}
+			}
+
+			if shouldStripExports {
+				// Turn this statement into "import {foo} from 'path'"
+				for i, item := range s.Items {
+					s.Items[i].Alias = item.OriginalName
+				}
+				stmt.Data = &js_ast.SImport{
+					NamespaceRef:      s.NamespaceRef,
+					Items:             &s.Items,
+					ImportRecordIndex: s.ImportRecordIndex,
+					IsSingleLine:      s.IsSingleLine,
+				}
+			}
+
+			// Make sure these don't end up in the wrapper closure
+			if shouldExtractESMStmtsForWrap {
+				stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt)
+				continue
+			}
+
+		case *js_ast.SExportClause:
+			if shouldStripExports {
+				// Remove export statements entirely
+				continue
+			}
+
+			if c.options.UnsupportedJSFeatures.Has(compat.ArbitraryModuleNamespaceNames) {
+				for _, item := range s.Items {
+					c.maybeForbidArbitraryModuleNamespaceIdentifier("export", sourceIndex, item.AliasLoc, item.Alias)
+				}
+			}
+
+			// Make sure these don't end up in the wrapper closure
+			if shouldExtractESMStmtsForWrap {
+				stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt)
+				continue
+			}
+
+		case *js_ast.SFunction:
+			// Strip the "export" keyword while bundling
+			if shouldStripExports && s.IsExport {
+				// Be careful to not modify the original statement
+				clone := *s
+				clone.IsExport = false
+				stmt.Data = &clone
+			}
+
+		case *js_ast.SClass:
+			if shouldStripExports && s.IsExport {
+				// Be careful to not modify the original statement
+				clone := *s
+				clone.IsExport = false
+				stmt.Data = &clone
+			}
+
+		case *js_ast.SLocal:
+			if shouldStripExports && s.IsExport {
+				// Be careful to not modify the original statement
+				clone := *s
+				clone.IsExport = false
+				stmt.Data = &clone
+			}
+
+		case *js_ast.SExportDefault:
+			// If we're bundling, convert "export default" into a normal declaration
+			if shouldStripExports {
+				switch s2 := s.Value.Data.(type) {
+				case *js_ast.SExpr:
+					// "export default foo;" => "var default = foo;"
+					stmt = js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SLocal{Decls: []js_ast.Decl{
+						{Binding: js_ast.Binding{Loc: s.DefaultName.Loc, Data: &js_ast.BIdentifier{Ref: s.DefaultName.Ref}}, ValueOrNil: s2.Value},
+					}}}
+
+				case *js_ast.SFunction:
+					// "export default function() {}" => "function default() {}"
+					// "export default function foo() {}" => "function foo() {}"
+
+					// Be careful to not modify the original statement
+					s2 = &js_ast.SFunction{Fn: s2.Fn}
+					s2.Fn.Name = &s.DefaultName
+
+					stmt = js_ast.Stmt{Loc: s.Value.Loc, Data: s2}
+
+				case *js_ast.SClass:
+					// "export default class {}" => "class default {}"
+					// "export default class Foo {}" => "class Foo {}"
+
+					// Be careful to not modify the original statement
+					s2 = &js_ast.SClass{Class: s2.Class}
+					s2.Class.Name = &s.DefaultName
+
+					stmt = js_ast.Stmt{Loc: s.Value.Loc, Data: s2}
+
+				default:
+					panic("Internal error")
+				}
+			}
+		}
+
+		stmtList.insideWrapperSuffix = append(stmtList.insideWrapperSuffix, stmt)
+	}
+}
+
+// "var a = 1; var b = 2;" => "var a = 1, b = 2;"
+func mergeAdjacentLocalStmts(stmts []js_ast.Stmt) []js_ast.Stmt {
+	if len(stmts) == 0 {
+		return stmts
+	}
+
+	didMergeWithPreviousLocal := false
+	end := 1
+
+	for _, stmt := range stmts[1:] {
+		// Try to merge with the previous variable statement
+		if after, ok := stmt.Data.(*js_ast.SLocal); ok {
+			if before, ok := stmts[end-1].Data.(*js_ast.SLocal); ok {
+				// It must be the same kind of variable statement (i.e. let/var/const)
+				if before.Kind == after.Kind && before.IsExport == after.IsExport {
+					if didMergeWithPreviousLocal {
+						// Avoid O(n^2) behavior for repeated variable declarations
+						before.Decls = append(before.Decls, after.Decls...)
+					} else {
+						// Be careful to not modify the original statement
+						didMergeWithPreviousLocal = true
+						clone := *before
+						clone.Decls = make([]js_ast.Decl, 0, len(before.Decls)+len(after.Decls))
+						clone.Decls = append(clone.Decls, before.Decls...)
+						clone.Decls = append(clone.Decls, after.Decls...)
+						stmts[end-1].Data = &clone
+					}
+					continue
+				}
+			}
+		}
+
+		// Otherwise, append a normal statement
+		didMergeWithPreviousLocal = false
+		stmts[end] = stmt
+		end++
+	}
+
+	return stmts[:end]
+}
+
+type stmtList struct {
+	// These statements come first, and can be inside the wrapper
+	insideWrapperPrefix []js_ast.Stmt
+
+	// These statements come last, and can be inside the wrapper
+	insideWrapperSuffix []js_ast.Stmt
+
+	outsideWrapperPrefix []js_ast.Stmt
+}
+
+type compileResultJS struct {
+	js_printer.PrintResult
+
+	sourceIndex uint32
+
+	// This is the line and column offset since the previous JavaScript string
+	// or the start of the file if this is the first JavaScript string.
+	generatedOffset sourcemap.LineColumnOffset
+}
+
+func (c *linkerContext) requireOrImportMetaForSource(sourceIndex uint32) (meta js_printer.RequireOrImportMeta) {
+	repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+	meta.WrapperRef = repr.AST.WrapperRef
+	meta.IsWrapperAsync = repr.Meta.IsAsyncOrHasAsyncDependency
+	if repr.Meta.Wrap == graph.WrapESM {
+		meta.ExportsRef = repr.AST.ExportsRef
+	} else {
+		meta.ExportsRef = ast.InvalidRef
+	}
+	return
+}
+
+func (c *linkerContext) generateCodeForFileInChunkJS(
+	r renamer.Renamer,
+	waitGroup *sync.WaitGroup,
+	partRange partRange,
+	toCommonJSRef ast.Ref,
+	toESMRef ast.Ref,
+	runtimeRequireRef ast.Ref,
+	result *compileResultJS,
+	dataForSourceMaps []bundler.DataForSourceMap,
+) {
+	defer c.recoverInternalError(waitGroup, partRange.sourceIndex)
+
+	file := &c.graph.Files[partRange.sourceIndex]
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+	nsExportPartIndex := js_ast.NSExportPartIndex
+	needsWrapper := false
+	stmtList := stmtList{}
+
+	// The top-level directive must come first (the non-wrapped case is handled
+	// by the chunk generation code, although only for the entry point)
+	if repr.Meta.Wrap != graph.WrapNone && !file.IsEntryPoint() {
+		for _, directive := range repr.AST.Directives {
+			stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, js_ast.Stmt{
+				Data: &js_ast.SDirective{Value: helpers.StringToUTF16(directive)},
+			})
+		}
+	}
+
+	// Make sure the generated call to "__export(exports, ...)" comes first
+	// before anything else.
+	if nsExportPartIndex >= partRange.partIndexBegin && nsExportPartIndex < partRange.partIndexEnd &&
+		repr.AST.Parts[nsExportPartIndex].IsLive {
+		c.convertStmtsForChunk(partRange.sourceIndex, &stmtList, repr.AST.Parts[nsExportPartIndex].Stmts)
+
+		// Move everything to the prefix list
+		if repr.Meta.Wrap == graph.WrapESM {
+			stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmtList.insideWrapperSuffix...)
+		} else {
+			stmtList.insideWrapperPrefix = append(stmtList.insideWrapperPrefix, stmtList.insideWrapperSuffix...)
+		}
+		stmtList.insideWrapperSuffix = nil
+	}
+
+	var partIndexForLazyDefaultExport ast.Index32
+	if repr.AST.HasLazyExport {
+		if defaultExport, ok := repr.Meta.ResolvedExports["default"]; ok {
+			partIndexForLazyDefaultExport = ast.MakeIndex32(repr.TopLevelSymbolToParts(defaultExport.Ref)[0])
+		}
+	}
+
+	// Add all other parts in this chunk
+	for partIndex := partRange.partIndexBegin; partIndex < partRange.partIndexEnd; partIndex++ {
+		part := repr.AST.Parts[partIndex]
+		if !repr.AST.Parts[partIndex].IsLive {
+			// Skip the part if it's not in this chunk
+			continue
+		}
+
+		if uint32(partIndex) == nsExportPartIndex {
+			// Skip the generated call to "__export()" that was extracted above
+			continue
+		}
+
+		// Mark if we hit the dummy part representing the wrapper
+		if uint32(partIndex) == repr.Meta.WrapperPartIndex.GetIndex() {
+			needsWrapper = true
+			continue
+		}
+
+		stmts := part.Stmts
+
+		// If this could be a JSON file that exports a top-level object literal, go
+		// over the non-default top-level properties that ended up being imported
+		// and substitute references to them into the main top-level object literal.
+		// So this JSON file:
+		//
+		//   {
+		//     "foo": [1, 2, 3],
+		//     "bar": [4, 5, 6],
+		//   }
+		//
+		// is initially compiled into this:
+		//
+		//   export var foo = [1, 2, 3];
+		//   export var bar = [4, 5, 6];
+		//   export default {
+		//     foo: [1, 2, 3],
+		//     bar: [4, 5, 6],
+		//   };
+		//
+		// But we turn it into this if both "foo" and "default" are imported:
+		//
+		//   export var foo = [1, 2, 3];
+		//   export default {
+		//     foo,
+		//     bar: [4, 5, 6],
+		//   };
+		//
+		if partIndexForLazyDefaultExport.IsValid() && partIndex == partIndexForLazyDefaultExport.GetIndex() {
+			stmt := stmts[0]
+			defaultExport := stmt.Data.(*js_ast.SExportDefault)
+			defaultExpr := defaultExport.Value.Data.(*js_ast.SExpr)
+
+			// Be careful: the top-level value in a JSON file is not necessarily an object
+			if object, ok := defaultExpr.Value.Data.(*js_ast.EObject); ok {
+				objectClone := *object
+				objectClone.Properties = append([]js_ast.Property{}, objectClone.Properties...)
+
+				// If any top-level properties ended up being imported directly, change
+				// the property to just reference the corresponding variable instead
+				for i, property := range object.Properties {
+					if str, ok := property.Key.Data.(*js_ast.EString); ok {
+						if name := helpers.UTF16ToString(str.Value); name != "default" {
+							if export, ok := repr.Meta.ResolvedExports[name]; ok {
+								if part := repr.AST.Parts[repr.TopLevelSymbolToParts(export.Ref)[0]]; part.IsLive {
+									ref := part.Stmts[0].Data.(*js_ast.SLocal).Decls[0].Binding.Data.(*js_ast.BIdentifier).Ref
+									objectClone.Properties[i].ValueOrNil = js_ast.Expr{Loc: property.Key.Loc, Data: &js_ast.EIdentifier{Ref: ref}}
+								}
+							}
+						}
+					}
+				}
+
+				// Avoid mutating the original AST
+				defaultExprClone := *defaultExpr
+				defaultExprClone.Value.Data = &objectClone
+				defaultExportClone := *defaultExport
+				defaultExportClone.Value.Data = &defaultExprClone
+				stmt.Data = &defaultExportClone
+				stmts = []js_ast.Stmt{stmt}
+			}
+		}
+
+		c.convertStmtsForChunk(partRange.sourceIndex, &stmtList, stmts)
+	}
+
+	// Hoist all import statements before any normal statements. ES6 imports
+	// are different than CommonJS imports. All modules imported via ES6 import
+	// statements are evaluated before the module doing the importing is
+	// evaluated (well, except for cyclic import scenarios). We need to preserve
+	// these semantics even when modules imported via ES6 import statements end
+	// up being CommonJS modules.
+	stmts := stmtList.insideWrapperSuffix
+	if len(stmtList.insideWrapperPrefix) > 0 {
+		stmts = append(stmtList.insideWrapperPrefix, stmts...)
+	}
+	if c.options.MinifySyntax {
+		stmts = mergeAdjacentLocalStmts(stmts)
+	}
+
+	// Optionally wrap all statements in a closure
+	if needsWrapper {
+		switch repr.Meta.Wrap {
+		case graph.WrapCJS:
+			// Only include the arguments that are actually used
+			args := []js_ast.Arg{}
+			if repr.AST.UsesExportsRef || repr.AST.UsesModuleRef {
+				args = append(args, js_ast.Arg{Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.ExportsRef}}})
+				if repr.AST.UsesModuleRef {
+					args = append(args, js_ast.Arg{Binding: js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.ModuleRef}}})
+				}
+			}
+
+			var cjsArgs []js_ast.Expr
+			if c.options.ProfilerNames {
+				// "__commonJS({ 'file.js'(exports, module) { ... } })"
+				kind := js_ast.PropertyField
+				if !c.options.UnsupportedJSFeatures.Has(compat.ObjectExtensions) {
+					kind = js_ast.PropertyMethod
+				}
+				cjsArgs = []js_ast.Expr{{Data: &js_ast.EObject{Properties: []js_ast.Property{{
+					Kind:       kind,
+					Key:        js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(file.InputFile.Source.PrettyPath)}},
+					ValueOrNil: js_ast.Expr{Data: &js_ast.EFunction{Fn: js_ast.Fn{Args: args, Body: js_ast.FnBody{Block: js_ast.SBlock{Stmts: stmts}}}}},
+				}}}}}
+			} else if c.options.UnsupportedJSFeatures.Has(compat.Arrow) {
+				// "__commonJS(function (exports, module) { ... })"
+				cjsArgs = []js_ast.Expr{{Data: &js_ast.EFunction{Fn: js_ast.Fn{Args: args, Body: js_ast.FnBody{Block: js_ast.SBlock{Stmts: stmts}}}}}}
+			} else {
+				// "__commonJS((exports, module) => { ... })"
+				cjsArgs = []js_ast.Expr{{Data: &js_ast.EArrow{Args: args, Body: js_ast.FnBody{Block: js_ast.SBlock{Stmts: stmts}}}}}
+			}
+			value := js_ast.Expr{Data: &js_ast.ECall{
+				Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: c.cjsRuntimeRef}},
+				Args:   cjsArgs,
+			}}
+
+			// "var require_foo = __commonJS(...);"
+			stmts = append(stmtList.outsideWrapperPrefix, js_ast.Stmt{Data: &js_ast.SLocal{
+				Decls: []js_ast.Decl{{
+					Binding:    js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.WrapperRef}},
+					ValueOrNil: value,
+				}},
+			}})
+
+		case graph.WrapESM:
+			// The wrapper only needs to be "async" if there is a transitive async
+			// dependency. For correctness, we must not use "async" if the module
+			// isn't async because then calling "require()" on that module would
+			// swallow any exceptions thrown during module initialization.
+			isAsync := repr.Meta.IsAsyncOrHasAsyncDependency
+
+			// Hoist all top-level "var" and "function" declarations out of the closure
+			var decls []js_ast.Decl
+			end := 0
+			for _, stmt := range stmts {
+				switch s := stmt.Data.(type) {
+				case *js_ast.SLocal:
+					// Convert the declarations to assignments
+					wrapIdentifier := func(loc logger.Loc, ref ast.Ref) js_ast.Expr {
+						decls = append(decls, js_ast.Decl{Binding: js_ast.Binding{Loc: loc, Data: &js_ast.BIdentifier{Ref: ref}}})
+						return js_ast.Expr{Loc: loc, Data: &js_ast.EIdentifier{Ref: ref}}
+					}
+					var value js_ast.Expr
+					for _, decl := range s.Decls {
+						binding := js_ast.ConvertBindingToExpr(decl.Binding, wrapIdentifier)
+						if decl.ValueOrNil.Data != nil {
+							value = js_ast.JoinWithComma(value, js_ast.Assign(binding, decl.ValueOrNil))
+						}
+					}
+					if value.Data == nil {
+						continue
+					}
+					stmt = js_ast.Stmt{Loc: stmt.Loc, Data: &js_ast.SExpr{Value: value}}
+
+				case *js_ast.SFunction:
+					stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, stmt)
+					continue
+				}
+
+				stmts[end] = stmt
+				end++
+			}
+			stmts = stmts[:end]
+
+			var esmArgs []js_ast.Expr
+			if c.options.ProfilerNames {
+				// "__esm({ 'file.js'() { ... } })"
+				kind := js_ast.PropertyField
+				if !c.options.UnsupportedJSFeatures.Has(compat.ObjectExtensions) {
+					kind = js_ast.PropertyMethod
+				}
+				esmArgs = []js_ast.Expr{{Data: &js_ast.EObject{Properties: []js_ast.Property{{
+					Kind:       kind,
+					Key:        js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(file.InputFile.Source.PrettyPath)}},
+					ValueOrNil: js_ast.Expr{Data: &js_ast.EFunction{Fn: js_ast.Fn{Body: js_ast.FnBody{Block: js_ast.SBlock{Stmts: stmts}}, IsAsync: isAsync}}},
+				}}}}}
+			} else if c.options.UnsupportedJSFeatures.Has(compat.Arrow) {
+				// "__esm(function () { ... })"
+				esmArgs = []js_ast.Expr{{Data: &js_ast.EFunction{Fn: js_ast.Fn{Body: js_ast.FnBody{Block: js_ast.SBlock{Stmts: stmts}}, IsAsync: isAsync}}}}
+			} else {
+				// "__esm(() => { ... })"
+				esmArgs = []js_ast.Expr{{Data: &js_ast.EArrow{Body: js_ast.FnBody{Block: js_ast.SBlock{Stmts: stmts}}, IsAsync: isAsync}}}
+			}
+			value := js_ast.Expr{Data: &js_ast.ECall{
+				Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: c.esmRuntimeRef}},
+				Args:   esmArgs,
+			}}
+
+			// "var foo, bar;"
+			if !c.options.MinifySyntax && len(decls) > 0 {
+				stmtList.outsideWrapperPrefix = append(stmtList.outsideWrapperPrefix, js_ast.Stmt{Data: &js_ast.SLocal{
+					Decls: decls,
+				}})
+				decls = nil
+			}
+
+			// "var init_foo = __esm(...);"
+			stmts = append(stmtList.outsideWrapperPrefix, js_ast.Stmt{Data: &js_ast.SLocal{
+				Decls: append(decls, js_ast.Decl{
+					Binding:    js_ast.Binding{Data: &js_ast.BIdentifier{Ref: repr.AST.WrapperRef}},
+					ValueOrNil: value,
+				}),
+			}})
+		}
+	}
+
+	// Only generate a source map if needed
+	var addSourceMappings bool
+	var inputSourceMap *sourcemap.SourceMap
+	var lineOffsetTables []sourcemap.LineOffsetTable
+	if file.InputFile.Loader.CanHaveSourceMap() && c.options.SourceMap != config.SourceMapNone {
+		addSourceMappings = true
+		inputSourceMap = file.InputFile.InputSourceMap
+		lineOffsetTables = dataForSourceMaps[partRange.sourceIndex].LineOffsetTables
+	}
+
+	// Indent the file if everything is wrapped in an IIFE
+	indent := 0
+	if c.options.OutputFormat == config.FormatIIFE {
+		indent++
+	}
+
+	// Convert the AST to JavaScript code
+	printOptions := js_printer.Options{
+		Indent:                       indent,
+		OutputFormat:                 c.options.OutputFormat,
+		MinifyIdentifiers:            c.options.MinifyIdentifiers,
+		MinifyWhitespace:             c.options.MinifyWhitespace,
+		MinifySyntax:                 c.options.MinifySyntax,
+		LineLimit:                    c.options.LineLimit,
+		ASCIIOnly:                    c.options.ASCIIOnly,
+		ToCommonJSRef:                toCommonJSRef,
+		ToESMRef:                     toESMRef,
+		RuntimeRequireRef:            runtimeRequireRef,
+		TSEnums:                      c.graph.TSEnums,
+		ConstValues:                  c.graph.ConstValues,
+		LegalComments:                c.options.LegalComments,
+		UnsupportedFeatures:          c.options.UnsupportedJSFeatures,
+		SourceMap:                    c.options.SourceMap,
+		AddSourceMappings:            addSourceMappings,
+		InputSourceMap:               inputSourceMap,
+		LineOffsetTables:             lineOffsetTables,
+		RequireOrImportMetaForSource: c.requireOrImportMetaForSource,
+		MangledProps:                 c.mangledProps,
+		NeedsMetafile:                c.options.NeedsMetafile,
+	}
+	tree := repr.AST
+	tree.Directives = nil // This is handled elsewhere
+	tree.Parts = []js_ast.Part{{Stmts: stmts}}
+	*result = compileResultJS{
+		PrintResult: js_printer.Print(tree, c.graph.Symbols, r, printOptions),
+		sourceIndex: partRange.sourceIndex,
+	}
+
+	if file.InputFile.Loader == config.LoaderFile {
+		result.JSONMetadataImports = append(result.JSONMetadataImports, fmt.Sprintf("\n        {\n          \"path\": %s,\n          \"kind\": \"file-loader\"\n        }",
+			helpers.QuoteForJSON(file.InputFile.UniqueKeyForAdditionalFile, c.options.ASCIIOnly)))
+	}
+
+	waitGroup.Done()
+}
+
+func (c *linkerContext) generateEntryPointTailJS(
+	r renamer.Renamer,
+	toCommonJSRef ast.Ref,
+	toESMRef ast.Ref,
+	sourceIndex uint32,
+) (result compileResultJS) {
+	file := &c.graph.Files[sourceIndex]
+	repr := file.InputFile.Repr.(*graph.JSRepr)
+	var stmts []js_ast.Stmt
+
+	switch c.options.OutputFormat {
+	case config.FormatPreserve:
+		if repr.Meta.Wrap != graph.WrapNone {
+			// "require_foo();"
+			// "init_foo();"
+			stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{
+				Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}},
+			}}}})
+		}
+
+	case config.FormatIIFE:
+		if repr.Meta.Wrap == graph.WrapCJS {
+			if len(c.options.GlobalName) > 0 {
+				// "return require_foo();"
+				stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SReturn{ValueOrNil: js_ast.Expr{Data: &js_ast.ECall{
+					Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}},
+				}}}})
+			} else {
+				// "require_foo();"
+				stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{
+					Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}},
+				}}}})
+			}
+		} else {
+			if repr.Meta.Wrap == graph.WrapESM {
+				// "init_foo();"
+				stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{
+					Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}},
+				}}}})
+			}
+
+			if repr.Meta.ForceIncludeExportsForEntryPoint {
+				// "return __toCommonJS(exports);"
+				stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SReturn{
+					ValueOrNil: js_ast.Expr{Data: &js_ast.ECall{
+						Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: toCommonJSRef}},
+						Args:   []js_ast.Expr{{Data: &js_ast.EIdentifier{Ref: repr.AST.ExportsRef}}},
+					}},
+				}})
+			}
+		}
+
+	case config.FormatCommonJS:
+		if repr.Meta.Wrap == graph.WrapCJS {
+			// "module.exports = require_foo();"
+			stmts = append(stmts, js_ast.AssignStmt(
+				js_ast.Expr{Data: &js_ast.EDot{
+					Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: c.unboundModuleRef}},
+					Name:   "exports",
+				}},
+				js_ast.Expr{Data: &js_ast.ECall{
+					Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}},
+				}},
+			))
+		} else {
+			if repr.Meta.Wrap == graph.WrapESM {
+				// "init_foo();"
+				stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: js_ast.Expr{Data: &js_ast.ECall{
+					Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}},
+				}}}})
+			}
+		}
+
+		// If we are generating CommonJS for node, encode the known export names in
+		// a form that node can understand them. This relies on the specific behavior
+		// of this parser, which the node project uses to detect named exports in
+		// CommonJS files: https://github.com/guybedford/cjs-module-lexer. Think of
+		// this code as an annotation for that parser.
+		if c.options.Platform == config.PlatformNode {
+			// Add a comment since otherwise people will surely wonder what this is.
+			// This annotation means you can do this and have it work:
+			//
+			//   import { name } from './file-from-esbuild.cjs'
+			//
+			// when "file-from-esbuild.cjs" looks like this:
+			//
+			//   __export(exports, { name: () => name });
+			//   0 && (module.exports = {name});
+			//
+			// The maintainer of "cjs-module-lexer" is receptive to adding esbuild-
+			// friendly patterns to this library. However, this library has already
+			// shipped in node and using existing patterns instead of defining new
+			// patterns is maximally compatible.
+			//
+			// An alternative to doing this could be to use "Object.defineProperties"
+			// instead of "__export" but support for that would need to be added to
+			// "cjs-module-lexer" and then we would need to be ok with not supporting
+			// older versions of node that don't have that newly-added support.
+
+			// "{a, b, if: null}"
+			var moduleExports []js_ast.Property
+			for _, export := range repr.Meta.SortedAndFilteredExportAliases {
+				if export == "default" {
+					// In node the default export is always "module.exports" regardless of
+					// what the annotation says. So don't bother generating "default".
+					continue
+				}
+
+				// "{if: null}"
+				var valueOrNil js_ast.Expr
+				if _, ok := js_lexer.Keywords[export]; ok {
+					// Make sure keywords don't cause a syntax error. This has to map to
+					// "null" instead of something shorter like "0" because the library
+					// "cjs-module-lexer" only supports identifiers in this position, and
+					// it thinks "null" is an identifier.
+					valueOrNil = js_ast.Expr{Data: js_ast.ENullShared}
+				}
+
+				moduleExports = append(moduleExports, js_ast.Property{
+					Key:        js_ast.Expr{Data: &js_ast.EString{Value: helpers.StringToUTF16(export)}},
+					ValueOrNil: valueOrNil,
+				})
+			}
+
+			// Add annotations for re-exports: "{...require('./foo')}"
+			for _, importRecordIndex := range repr.AST.ExportStarImportRecords {
+				if record := &repr.AST.ImportRecords[importRecordIndex]; !record.SourceIndex.IsValid() {
+					moduleExports = append(moduleExports, js_ast.Property{
+						Kind:       js_ast.PropertySpread,
+						ValueOrNil: js_ast.Expr{Data: &js_ast.ERequireString{ImportRecordIndex: importRecordIndex}},
+					})
+				}
+			}
+
+			if len(moduleExports) > 0 {
+				// "0 && (module.exports = {a, b, if: null});"
+				expr := js_ast.Expr{Data: &js_ast.EBinary{
+					Op:   js_ast.BinOpLogicalAnd,
+					Left: js_ast.Expr{Data: &js_ast.ENumber{Value: 0}},
+					Right: js_ast.Assign(
+						js_ast.Expr{Data: &js_ast.EDot{
+							Target: js_ast.Expr{Data: &js_ast.EIdentifier{Ref: c.unboundModuleRef}},
+							Name:   "exports",
+						}},
+						js_ast.Expr{Data: &js_ast.EObject{Properties: moduleExports}},
+					),
+				}}
+
+				if !c.options.MinifyWhitespace {
+					stmts = append(stmts,
+						js_ast.Stmt{Data: &js_ast.SComment{Text: `// Annotate the CommonJS export names for ESM import in node:`}},
+					)
+				}
+
+				stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExpr{Value: expr}})
+			}
+		}
+
+	case config.FormatESModule:
+		if repr.Meta.Wrap == graph.WrapCJS {
+			// "export default require_foo();"
+			stmts = append(stmts, js_ast.Stmt{
+				Data: &js_ast.SExportDefault{Value: js_ast.Stmt{
+					Data: &js_ast.SExpr{Value: js_ast.Expr{
+						Data: &js_ast.ECall{Target: js_ast.Expr{
+							Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}}}}}}})
+		} else {
+			if repr.Meta.Wrap == graph.WrapESM {
+				if repr.Meta.IsAsyncOrHasAsyncDependency {
+					// "await init_foo();"
+					stmts = append(stmts, js_ast.Stmt{
+						Data: &js_ast.SExpr{Value: js_ast.Expr{
+							Data: &js_ast.EAwait{Value: js_ast.Expr{
+								Data: &js_ast.ECall{Target: js_ast.Expr{
+									Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}}}}}}})
+				} else {
+					// "init_foo();"
+					stmts = append(stmts, js_ast.Stmt{
+						Data: &js_ast.SExpr{
+							Value: js_ast.Expr{Data: &js_ast.ECall{Target: js_ast.Expr{
+								Data: &js_ast.EIdentifier{Ref: repr.AST.WrapperRef}}}}}})
+				}
+			}
+
+			if len(repr.Meta.SortedAndFilteredExportAliases) > 0 {
+				// If the output format is ES6 modules and we're an entry point, generate an
+				// ES6 export statement containing all exports. Except don't do that if this
+				// entry point is a CommonJS-style module, since that would generate an ES6
+				// export statement that's not top-level. Instead, we will export the CommonJS
+				// exports as a default export later on.
+				var items []js_ast.ClauseItem
+
+				for i, alias := range repr.Meta.SortedAndFilteredExportAliases {
+					export := repr.Meta.ResolvedExports[alias]
+
+					// If this is an export of an import, reference the symbol that the import
+					// was eventually resolved to. We need to do this because imports have
+					// already been resolved by this point, so we can't generate a new import
+					// and have that be resolved later.
+					if importData, ok := c.graph.Files[export.SourceIndex].InputFile.Repr.(*graph.JSRepr).Meta.ImportsToBind[export.Ref]; ok {
+						export.Ref = importData.Ref
+						export.SourceIndex = importData.SourceIndex
+					}
+
+					// Exports of imports need EImportIdentifier in case they need to be re-
+					// written to a property access later on
+					if c.graph.Symbols.Get(export.Ref).NamespaceAlias != nil {
+						// Create both a local variable and an export clause for that variable.
+						// The local variable is initialized with the initial value of the
+						// export. This isn't fully correct because it's a "dead" binding and
+						// doesn't update with the "live" value as it changes. But ES6 modules
+						// don't have any syntax for bare named getter functions so this is the
+						// best we can do.
+						//
+						// These input files:
+						//
+						//   // entry_point.js
+						//   export {foo} from './cjs-format.js'
+						//
+						//   // cjs-format.js
+						//   Object.defineProperty(exports, 'foo', {
+						//     enumerable: true,
+						//     get: () => Math.random(),
+						//   })
+						//
+						// Become this output file:
+						//
+						//   // cjs-format.js
+						//   var require_cjs_format = __commonJS((exports) => {
+						//     Object.defineProperty(exports, "foo", {
+						//       enumerable: true,
+						//       get: () => Math.random()
+						//     });
+						//   });
+						//
+						//   // entry_point.js
+						//   var cjs_format = __toESM(require_cjs_format());
+						//   var export_foo = cjs_format.foo;
+						//   export {
+						//     export_foo as foo
+						//   };
+						//
+						tempRef := repr.Meta.CJSExportCopies[i]
+						stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SLocal{
+							Decls: []js_ast.Decl{{
+								Binding:    js_ast.Binding{Data: &js_ast.BIdentifier{Ref: tempRef}},
+								ValueOrNil: js_ast.Expr{Data: &js_ast.EImportIdentifier{Ref: export.Ref}},
+							}},
+						}})
+						items = append(items, js_ast.ClauseItem{
+							Name:  ast.LocRef{Ref: tempRef},
+							Alias: alias,
+						})
+					} else {
+						// Local identifiers can be exported using an export clause. This is done
+						// this way instead of leaving the "export" keyword on the local declaration
+						// itself both because it lets the local identifier be minified and because
+						// it works transparently for re-exports across files.
+						//
+						// These input files:
+						//
+						//   // entry_point.js
+						//   export * from './esm-format.js'
+						//
+						//   // esm-format.js
+						//   export let foo = 123
+						//
+						// Become this output file:
+						//
+						//   // esm-format.js
+						//   let foo = 123;
+						//
+						//   // entry_point.js
+						//   export {
+						//     foo
+						//   };
+						//
+						items = append(items, js_ast.ClauseItem{
+							Name:  ast.LocRef{Ref: export.Ref},
+							Alias: alias,
+						})
+					}
+				}
+
+				stmts = append(stmts, js_ast.Stmt{Data: &js_ast.SExportClause{Items: items}})
+			}
+		}
+	}
+
+	if len(stmts) == 0 {
+		return
+	}
+
+	tree := repr.AST
+	tree.Directives = nil
+	tree.Parts = []js_ast.Part{{Stmts: stmts}}
+
+	// Indent the file if everything is wrapped in an IIFE
+	indent := 0
+	if c.options.OutputFormat == config.FormatIIFE {
+		indent++
+	}
+
+	// Convert the AST to JavaScript code
+	printOptions := js_printer.Options{
+		Indent:                       indent,
+		OutputFormat:                 c.options.OutputFormat,
+		MinifyIdentifiers:            c.options.MinifyIdentifiers,
+		MinifyWhitespace:             c.options.MinifyWhitespace,
+		MinifySyntax:                 c.options.MinifySyntax,
+		LineLimit:                    c.options.LineLimit,
+		ASCIIOnly:                    c.options.ASCIIOnly,
+		ToCommonJSRef:                toCommonJSRef,
+		ToESMRef:                     toESMRef,
+		LegalComments:                c.options.LegalComments,
+		UnsupportedFeatures:          c.options.UnsupportedJSFeatures,
+		RequireOrImportMetaForSource: c.requireOrImportMetaForSource,
+		MangledProps:                 c.mangledProps,
+	}
+	result.PrintResult = js_printer.Print(tree, c.graph.Symbols, r, printOptions)
+	return
+}
+
+func (c *linkerContext) renameSymbolsInChunk(chunk *chunkInfo, filesInOrder []uint32, timer *helpers.Timer) renamer.Renamer {
+	if c.options.MinifyIdentifiers {
+		timer.Begin("Minify symbols")
+		defer timer.End("Minify symbols")
+	} else {
+		timer.Begin("Rename symbols")
+		defer timer.End("Rename symbols")
+	}
+
+	// Determine the reserved names (e.g. can't generate the name "if")
+	timer.Begin("Compute reserved names")
+	moduleScopes := make([]*js_ast.Scope, len(filesInOrder))
+	for i, sourceIndex := range filesInOrder {
+		moduleScopes[i] = c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope
+	}
+	reservedNames := renamer.ComputeReservedNames(moduleScopes, c.graph.Symbols)
+
+	// Node contains code that scans CommonJS modules in an attempt to statically
+	// detect the  set of export names that a module will use. However, it doesn't
+	// do any scope analysis so it can be fooled by local variables with the same
+	// name as the CommonJS module-scope variables "exports" and "module". Avoid
+	// using these names in this case even if there is not a risk of a name
+	// collision because there is still a risk of node incorrectly detecting
+	// something in a nested scope as an top-level export. Here's a case where
+	// this happened: https://github.com/evanw/esbuild/issues/3544
+	if c.options.OutputFormat == config.FormatCommonJS && c.options.Platform == config.PlatformNode {
+		reservedNames["exports"] = 1
+		reservedNames["module"] = 1
+	}
+
+	// These are used to implement bundling, and need to be free for use
+	if c.options.Mode != config.ModePassThrough {
+		reservedNames["require"] = 1
+		reservedNames["Promise"] = 1
+	}
+	timer.End("Compute reserved names")
+
+	// Make sure imports get a chance to be renamed too
+	var sortedImportsFromOtherChunks stableRefArray
+	for _, imports := range chunk.chunkRepr.(*chunkReprJS).importsFromOtherChunks {
+		for _, item := range imports {
+			sortedImportsFromOtherChunks = append(sortedImportsFromOtherChunks, stableRef{
+				StableSourceIndex: c.graph.StableSourceIndices[item.ref.SourceIndex],
+				Ref:               item.ref,
+			})
+		}
+	}
+	sort.Sort(sortedImportsFromOtherChunks)
+
+	// Minification uses frequency analysis to give shorter names to more frequent symbols
+	if c.options.MinifyIdentifiers {
+		// Determine the first top-level slot (i.e. not in a nested scope)
+		var firstTopLevelSlots ast.SlotCounts
+		for _, sourceIndex := range filesInOrder {
+			firstTopLevelSlots.UnionMax(c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr).AST.NestedScopeSlotCounts)
+		}
+		r := renamer.NewMinifyRenamer(c.graph.Symbols, firstTopLevelSlots, reservedNames)
+
+		// Accumulate nested symbol usage counts
+		timer.Begin("Accumulate symbol counts")
+		timer.Begin("Parallel phase")
+		allTopLevelSymbols := make([]renamer.StableSymbolCountArray, len(filesInOrder))
+		stableSourceIndices := c.graph.StableSourceIndices
+		freq := ast.CharFreq{}
+		waitGroup := sync.WaitGroup{}
+		waitGroup.Add(len(filesInOrder))
+		for i, sourceIndex := range filesInOrder {
+			repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+
+			// Do this outside of the goroutine because it's not atomic
+			if repr.AST.CharFreq != nil {
+				freq.Include(repr.AST.CharFreq)
+			}
+
+			go func(topLevelSymbols *renamer.StableSymbolCountArray, repr *graph.JSRepr) {
+				if repr.AST.UsesExportsRef {
+					r.AccumulateSymbolCount(topLevelSymbols, repr.AST.ExportsRef, 1, stableSourceIndices)
+				}
+				if repr.AST.UsesModuleRef {
+					r.AccumulateSymbolCount(topLevelSymbols, repr.AST.ModuleRef, 1, stableSourceIndices)
+				}
+
+				for partIndex, part := range repr.AST.Parts {
+					if !repr.AST.Parts[partIndex].IsLive {
+						// Skip the part if it's not in this chunk
+						continue
+					}
+
+					// Accumulate symbol use counts
+					r.AccumulateSymbolUseCounts(topLevelSymbols, part.SymbolUses, stableSourceIndices)
+
+					// Make sure to also count the declaration in addition to the uses
+					for _, declared := range part.DeclaredSymbols {
+						r.AccumulateSymbolCount(topLevelSymbols, declared.Ref, 1, stableSourceIndices)
+					}
+				}
+
+				sort.Sort(topLevelSymbols)
+				waitGroup.Done()
+			}(&allTopLevelSymbols[i], repr)
+		}
+		waitGroup.Wait()
+		timer.End("Parallel phase")
+
+		// Accumulate top-level symbol usage counts
+		timer.Begin("Serial phase")
+		capacity := len(sortedImportsFromOtherChunks)
+		for _, array := range allTopLevelSymbols {
+			capacity += len(array)
+		}
+		topLevelSymbols := make(renamer.StableSymbolCountArray, 0, capacity)
+		for _, stable := range sortedImportsFromOtherChunks {
+			r.AccumulateSymbolCount(&topLevelSymbols, stable.Ref, 1, stableSourceIndices)
+		}
+		for _, array := range allTopLevelSymbols {
+			topLevelSymbols = append(topLevelSymbols, array...)
+		}
+		r.AllocateTopLevelSymbolSlots(topLevelSymbols)
+		timer.End("Serial phase")
+		timer.End("Accumulate symbol counts")
+
+		// Add all of the character frequency histograms for all files in this
+		// chunk together, then use it to compute the character sequence used to
+		// generate minified names. This results in slightly better gzip compression
+		// over assigning minified names in order (i.e. "a b c ..."). Even though
+		// it's a very small win, we still do it because it's simple to do and very
+		// cheap to compute.
+		minifier := ast.DefaultNameMinifierJS.ShuffleByCharFreq(freq)
+		timer.Begin("Assign names by frequency")
+		r.AssignNamesByFrequency(&minifier)
+		timer.End("Assign names by frequency")
+		return r
+	}
+
+	// When we're not minifying, just append numbers to symbol names to avoid collisions
+	r := renamer.NewNumberRenamer(c.graph.Symbols, reservedNames)
+	nestedScopes := make(map[uint32][]*js_ast.Scope)
+
+	timer.Begin("Add top-level symbols")
+	for _, stable := range sortedImportsFromOtherChunks {
+		r.AddTopLevelSymbol(stable.Ref)
+	}
+	for _, sourceIndex := range filesInOrder {
+		repr := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+		var scopes []*js_ast.Scope
+
+		// Modules wrapped in a CommonJS closure look like this:
+		//
+		//   // foo.js
+		//   var require_foo = __commonJS((exports, module) => {
+		//     exports.foo = 123;
+		//   });
+		//
+		// The symbol "require_foo" is stored in "file.ast.WrapperRef". We want
+		// to be able to minify everything inside the closure without worrying
+		// about collisions with other CommonJS modules. Set up the scopes such
+		// that it appears as if the file was structured this way all along. It's
+		// not completely accurate (e.g. we don't set the parent of the module
+		// scope to this new top-level scope) but it's good enough for the
+		// renaming code.
+		if repr.Meta.Wrap == graph.WrapCJS {
+			r.AddTopLevelSymbol(repr.AST.WrapperRef)
+
+			// External import statements will be hoisted outside of the CommonJS
+			// wrapper if the output format supports import statements. We need to
+			// add those symbols to the top-level scope to avoid causing name
+			// collisions. This code special-cases only those symbols.
+			if c.options.OutputFormat.KeepESMImportExportSyntax() {
+				for _, part := range repr.AST.Parts {
+					for _, stmt := range part.Stmts {
+						switch s := stmt.Data.(type) {
+						case *js_ast.SImport:
+							if !repr.AST.ImportRecords[s.ImportRecordIndex].SourceIndex.IsValid() {
+								r.AddTopLevelSymbol(s.NamespaceRef)
+								if s.DefaultName != nil {
+									r.AddTopLevelSymbol(s.DefaultName.Ref)
+								}
+								if s.Items != nil {
+									for _, item := range *s.Items {
+										r.AddTopLevelSymbol(item.Name.Ref)
+									}
+								}
+							}
+
+						case *js_ast.SExportStar:
+							if !repr.AST.ImportRecords[s.ImportRecordIndex].SourceIndex.IsValid() {
+								r.AddTopLevelSymbol(s.NamespaceRef)
+							}
+
+						case *js_ast.SExportFrom:
+							if !repr.AST.ImportRecords[s.ImportRecordIndex].SourceIndex.IsValid() {
+								r.AddTopLevelSymbol(s.NamespaceRef)
+								for _, item := range s.Items {
+									r.AddTopLevelSymbol(item.Name.Ref)
+								}
+							}
+						}
+					}
+				}
+			}
+
+			nestedScopes[sourceIndex] = []*js_ast.Scope{repr.AST.ModuleScope}
+			continue
+		}
+
+		// Modules wrapped in an ESM closure look like this:
+		//
+		//   // foo.js
+		//   var foo, foo_exports = {};
+		//   __export(foo_exports, {
+		//     foo: () => foo
+		//   });
+		//   let init_foo = __esm(() => {
+		//     foo = 123;
+		//   });
+		//
+		// The symbol "init_foo" is stored in "file.ast.WrapperRef". We need to
+		// minify everything inside the closure without introducing a new scope
+		// since all top-level variables will be hoisted outside of the closure.
+		if repr.Meta.Wrap == graph.WrapESM {
+			r.AddTopLevelSymbol(repr.AST.WrapperRef)
+		}
+
+		// Rename each top-level symbol declaration in this chunk
+		for partIndex, part := range repr.AST.Parts {
+			if repr.AST.Parts[partIndex].IsLive {
+				for _, declared := range part.DeclaredSymbols {
+					if declared.IsTopLevel {
+						r.AddTopLevelSymbol(declared.Ref)
+					}
+				}
+				scopes = append(scopes, part.Scopes...)
+			}
+		}
+
+		nestedScopes[sourceIndex] = scopes
+	}
+	timer.End("Add top-level symbols")
+
+	// Recursively rename symbols in child scopes now that all top-level
+	// symbols have been renamed. This is done in parallel because the symbols
+	// inside nested scopes are independent and can't conflict.
+	timer.Begin("Assign names by scope")
+	r.AssignNamesByScope(nestedScopes)
+	timer.End("Assign names by scope")
+	return r
+}
+
+func (c *linkerContext) generateChunkJS(chunkIndex int, chunkWaitGroup *sync.WaitGroup) {
+	defer c.recoverInternalError(chunkWaitGroup, runtime.SourceIndex)
+
+	chunk := &c.chunks[chunkIndex]
+
+	timer := c.timer.Fork()
+	if timer != nil {
+		timeName := fmt.Sprintf("Generate chunk %q", path.Clean(config.TemplateToString(chunk.finalTemplate)))
+		timer.Begin(timeName)
+		defer c.timer.Join(timer)
+		defer timer.End(timeName)
+	}
+
+	chunkRepr := chunk.chunkRepr.(*chunkReprJS)
+	compileResults := make([]compileResultJS, 0, len(chunkRepr.partsInChunkInOrder))
+	runtimeMembers := c.graph.Files[runtime.SourceIndex].InputFile.Repr.(*graph.JSRepr).AST.ModuleScope.Members
+	toCommonJSRef := ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__toCommonJS"].Ref)
+	toESMRef := ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__toESM"].Ref)
+	runtimeRequireRef := ast.FollowSymbols(c.graph.Symbols, runtimeMembers["__require"].Ref)
+	r := c.renameSymbolsInChunk(chunk, chunkRepr.filesInChunkInOrder, timer)
+	dataForSourceMaps := c.dataForSourceMaps()
+
+	// Note: This contains placeholders instead of what the placeholders are
+	// substituted with. That should be fine though because this should only
+	// ever be used for figuring out how many "../" to add to a relative path
+	// from a chunk whose final path hasn't been calculated yet to a chunk
+	// whose final path has already been calculated. That and placeholders are
+	// never substituted with something containing a "/" so substitution should
+	// never change the "../" count.
+	chunkAbsDir := c.fs.Dir(c.fs.Join(c.options.AbsOutputDir, config.TemplateToString(chunk.finalTemplate)))
+
+	// Generate JavaScript for each file in parallel
+	timer.Begin("Print JavaScript files")
+	waitGroup := sync.WaitGroup{}
+	for _, partRange := range chunkRepr.partsInChunkInOrder {
+		// Skip the runtime in test output
+		if partRange.sourceIndex == runtime.SourceIndex && c.options.OmitRuntimeForTests {
+			continue
+		}
+
+		// Create a goroutine for this file
+		compileResults = append(compileResults, compileResultJS{})
+		compileResult := &compileResults[len(compileResults)-1]
+		waitGroup.Add(1)
+		go c.generateCodeForFileInChunkJS(
+			r,
+			&waitGroup,
+			partRange,
+			toCommonJSRef,
+			toESMRef,
+			runtimeRequireRef,
+			compileResult,
+			dataForSourceMaps,
+		)
+	}
+
+	// Also generate the cross-chunk binding code
+	var crossChunkPrefix []byte
+	var crossChunkSuffix []byte
+	var jsonMetadataImports []string
+	{
+		// Indent the file if everything is wrapped in an IIFE
+		indent := 0
+		if c.options.OutputFormat == config.FormatIIFE {
+			indent++
+		}
+		printOptions := js_printer.Options{
+			Indent:            indent,
+			OutputFormat:      c.options.OutputFormat,
+			MinifyIdentifiers: c.options.MinifyIdentifiers,
+			MinifyWhitespace:  c.options.MinifyWhitespace,
+			MinifySyntax:      c.options.MinifySyntax,
+			LineLimit:         c.options.LineLimit,
+			NeedsMetafile:     c.options.NeedsMetafile,
+		}
+		crossChunkImportRecords := make([]ast.ImportRecord, len(chunk.crossChunkImports))
+		for i, chunkImport := range chunk.crossChunkImports {
+			crossChunkImportRecords[i] = ast.ImportRecord{
+				Kind:  chunkImport.importKind,
+				Path:  logger.Path{Text: c.chunks[chunkImport.chunkIndex].uniqueKey},
+				Flags: ast.ShouldNotBeExternalInMetafile | ast.ContainsUniqueKey,
+			}
+		}
+		crossChunkResult := js_printer.Print(js_ast.AST{
+			ImportRecords: crossChunkImportRecords,
+			Parts:         []js_ast.Part{{Stmts: chunkRepr.crossChunkPrefixStmts}},
+		}, c.graph.Symbols, r, printOptions)
+		crossChunkPrefix = crossChunkResult.JS
+		jsonMetadataImports = crossChunkResult.JSONMetadataImports
+		crossChunkSuffix = js_printer.Print(js_ast.AST{
+			Parts: []js_ast.Part{{Stmts: chunkRepr.crossChunkSuffixStmts}},
+		}, c.graph.Symbols, r, printOptions).JS
+	}
+
+	// Generate the exports for the entry point, if there are any
+	var entryPointTail compileResultJS
+	if chunk.isEntryPoint {
+		entryPointTail = c.generateEntryPointTailJS(
+			r,
+			toCommonJSRef,
+			toESMRef,
+			chunk.sourceIndex,
+		)
+	}
+
+	waitGroup.Wait()
+	timer.End("Print JavaScript files")
+	timer.Begin("Join JavaScript files")
+
+	j := helpers.Joiner{}
+	prevOffset := sourcemap.LineColumnOffset{}
+
+	// Optionally strip whitespace
+	indent := ""
+	space := " "
+	newline := "\n"
+	if c.options.MinifyWhitespace {
+		space = ""
+		newline = ""
+	}
+	newlineBeforeComment := false
+	isExecutable := false
+
+	// Start with the hashbang if there is one. This must be done before the
+	// banner because it only works if it's literally the first character.
+	if chunk.isEntryPoint {
+		if repr := c.graph.Files[chunk.sourceIndex].InputFile.Repr.(*graph.JSRepr); repr.AST.Hashbang != "" {
+			hashbang := repr.AST.Hashbang + "\n"
+			prevOffset.AdvanceString(hashbang)
+			j.AddString(hashbang)
+			newlineBeforeComment = true
+			isExecutable = true
+		}
+	}
+
+	// Then emit the banner after the hashbang. This must come before the
+	// "use strict" directive below because some people use the banner to
+	// emit a hashbang, which must be the first thing in the file.
+	if len(c.options.JSBanner) > 0 {
+		prevOffset.AdvanceString(c.options.JSBanner)
+		prevOffset.AdvanceString("\n")
+		j.AddString(c.options.JSBanner)
+		j.AddString("\n")
+		newlineBeforeComment = true
+	}
+
+	// Add the top-level directive if present (but omit "use strict" in ES
+	// modules because all ES modules are automatically in strict mode)
+	if chunk.isEntryPoint {
+		repr := c.graph.Files[chunk.sourceIndex].InputFile.Repr.(*graph.JSRepr)
+		for _, directive := range repr.AST.Directives {
+			if directive != "use strict" || c.options.OutputFormat != config.FormatESModule {
+				quoted := string(helpers.QuoteForJSON(directive, c.options.ASCIIOnly)) + ";" + newline
+				prevOffset.AdvanceString(quoted)
+				j.AddString(quoted)
+				newlineBeforeComment = true
+			}
+		}
+	}
+
+	// Optionally wrap with an IIFE
+	if c.options.OutputFormat == config.FormatIIFE {
+		var text string
+		indent = "  "
+		if len(c.options.GlobalName) > 0 {
+			text = c.generateGlobalNamePrefix()
+		}
+		if c.options.UnsupportedJSFeatures.Has(compat.Arrow) {
+			text += "(function()" + space + "{" + newline
+		} else {
+			text += "(()" + space + "=>" + space + "{" + newline
+		}
+		prevOffset.AdvanceString(text)
+		j.AddString(text)
+		newlineBeforeComment = false
+	}
+
+	// Put the cross-chunk prefix inside the IIFE
+	if len(crossChunkPrefix) > 0 {
+		newlineBeforeComment = true
+		prevOffset.AdvanceBytes(crossChunkPrefix)
+		j.AddBytes(crossChunkPrefix)
+	}
+
+	// Start the metadata
+	jMeta := helpers.Joiner{}
+	if c.options.NeedsMetafile {
+		// Print imports
+		isFirstMeta := true
+		jMeta.AddString("{\n      \"imports\": [")
+		for _, json := range jsonMetadataImports {
+			if isFirstMeta {
+				isFirstMeta = false
+			} else {
+				jMeta.AddString(",")
+			}
+			jMeta.AddString(json)
+		}
+		for _, compileResult := range compileResults {
+			for _, json := range compileResult.JSONMetadataImports {
+				if isFirstMeta {
+					isFirstMeta = false
+				} else {
+					jMeta.AddString(",")
+				}
+				jMeta.AddString(json)
+			}
+		}
+		if !isFirstMeta {
+			jMeta.AddString("\n      ")
+		}
+
+		// Print exports
+		jMeta.AddString("],\n      \"exports\": [")
+		var aliases []string
+		if c.options.OutputFormat.KeepESMImportExportSyntax() {
+			if chunk.isEntryPoint {
+				if fileRepr := c.graph.Files[chunk.sourceIndex].InputFile.Repr.(*graph.JSRepr); fileRepr.Meta.Wrap == graph.WrapCJS {
+					aliases = []string{"default"}
+				} else {
+					resolvedExports := fileRepr.Meta.ResolvedExports
+					aliases = make([]string, 0, len(resolvedExports))
+					for alias := range resolvedExports {
+						aliases = append(aliases, alias)
+					}
+				}
+			} else {
+				aliases = make([]string, 0, len(chunkRepr.exportsToOtherChunks))
+				for _, alias := range chunkRepr.exportsToOtherChunks {
+					aliases = append(aliases, alias)
+				}
+			}
+		}
+		isFirstMeta = true
+		sort.Strings(aliases) // Sort for determinism
+		for _, alias := range aliases {
+			if isFirstMeta {
+				isFirstMeta = false
+			} else {
+				jMeta.AddString(",")
+			}
+			jMeta.AddString(fmt.Sprintf("\n        %s",
+				helpers.QuoteForJSON(alias, c.options.ASCIIOnly)))
+		}
+		if !isFirstMeta {
+			jMeta.AddString("\n      ")
+		}
+		jMeta.AddString("],\n")
+		if chunk.isEntryPoint {
+			entryPoint := c.graph.Files[chunk.sourceIndex].InputFile.Source.PrettyPath
+			jMeta.AddString(fmt.Sprintf("      \"entryPoint\": %s,\n", helpers.QuoteForJSON(entryPoint, c.options.ASCIIOnly)))
+		}
+		if chunkRepr.hasCSSChunk {
+			jMeta.AddString(fmt.Sprintf("      \"cssBundle\": %s,\n", helpers.QuoteForJSON(c.chunks[chunkRepr.cssChunkIndex].uniqueKey, c.options.ASCIIOnly)))
+		}
+		jMeta.AddString("      \"inputs\": {")
+	}
+
+	// Concatenate the generated JavaScript chunks together
+	var compileResultsForSourceMap []compileResultForSourceMap
+	var legalCommentList []legalCommentEntry
+	var metaOrder []uint32
+	var metaBytes map[uint32][][]byte
+	prevFileNameComment := uint32(0)
+	if c.options.NeedsMetafile {
+		metaOrder = make([]uint32, 0, len(compileResults))
+		metaBytes = make(map[uint32][][]byte, len(compileResults))
+	}
+	for _, compileResult := range compileResults {
+		if len(compileResult.ExtractedLegalComments) > 0 {
+			legalCommentList = append(legalCommentList, legalCommentEntry{
+				sourceIndex: compileResult.sourceIndex,
+				comments:    compileResult.ExtractedLegalComments,
+			})
+		}
+
+		// Add a comment with the file path before the file contents
+		if c.options.Mode == config.ModeBundle && !c.options.MinifyWhitespace &&
+			prevFileNameComment != compileResult.sourceIndex && len(compileResult.JS) > 0 {
+			if newlineBeforeComment {
+				prevOffset.AdvanceString("\n")
+				j.AddString("\n")
+			}
+
+			path := c.graph.Files[compileResult.sourceIndex].InputFile.Source.PrettyPath
+
+			// Make sure newlines in the path can't cause a syntax error. This does
+			// not minimize allocations because it's expected that this case never
+			// comes up in practice.
+			path = strings.ReplaceAll(path, "\r", "\\r")
+			path = strings.ReplaceAll(path, "\n", "\\n")
+			path = strings.ReplaceAll(path, "\u2028", "\\u2028")
+			path = strings.ReplaceAll(path, "\u2029", "\\u2029")
+
+			text := fmt.Sprintf("%s// %s\n", indent, path)
+			prevOffset.AdvanceString(text)
+			j.AddString(text)
+			prevFileNameComment = compileResult.sourceIndex
+		}
+
+		// Don't include the runtime in source maps
+		if c.graph.Files[compileResult.sourceIndex].InputFile.OmitFromSourceMapsAndMetafile {
+			prevOffset.AdvanceString(string(compileResult.JS))
+			j.AddBytes(compileResult.JS)
+		} else {
+			// Save the offset to the start of the stored JavaScript
+			compileResult.generatedOffset = prevOffset
+			j.AddBytes(compileResult.JS)
+
+			// Ignore empty source map chunks
+			if compileResult.SourceMapChunk.ShouldIgnore {
+				prevOffset.AdvanceBytes(compileResult.JS)
+			} else {
+				prevOffset = sourcemap.LineColumnOffset{}
+
+				// Include this file in the source map
+				if c.options.SourceMap != config.SourceMapNone {
+					compileResultsForSourceMap = append(compileResultsForSourceMap, compileResultForSourceMap{
+						sourceMapChunk:  compileResult.SourceMapChunk,
+						generatedOffset: compileResult.generatedOffset,
+						sourceIndex:     compileResult.sourceIndex,
+					})
+				}
+			}
+
+			// Include this file in the metadata
+			if c.options.NeedsMetafile {
+				// Accumulate file sizes since a given file may be split into multiple parts
+				bytes, ok := metaBytes[compileResult.sourceIndex]
+				if !ok {
+					metaOrder = append(metaOrder, compileResult.sourceIndex)
+				}
+				metaBytes[compileResult.sourceIndex] = append(bytes, compileResult.JS)
+			}
+		}
+
+		// Put a newline before the next file path comment
+		if len(compileResult.JS) > 0 {
+			newlineBeforeComment = true
+		}
+	}
+
+	// Stick the entry point tail at the end of the file. Deliberately don't
+	// include any source mapping information for this because it's automatically
+	// generated and doesn't correspond to a location in the input file.
+	j.AddBytes(entryPointTail.JS)
+
+	// Put the cross-chunk suffix inside the IIFE
+	if len(crossChunkSuffix) > 0 {
+		if newlineBeforeComment {
+			j.AddString(newline)
+		}
+		j.AddBytes(crossChunkSuffix)
+	}
+
+	// Optionally wrap with an IIFE
+	if c.options.OutputFormat == config.FormatIIFE {
+		j.AddString("})();" + newline)
+	}
+
+	// Make sure the file ends with a newline
+	j.EnsureNewlineAtEnd()
+	slashTag := "/script"
+	if c.options.UnsupportedJSFeatures.Has(compat.InlineScript) {
+		slashTag = ""
+	}
+	c.maybeAppendLegalComments(c.options.LegalComments, legalCommentList, chunk, &j, slashTag)
+
+	if len(c.options.JSFooter) > 0 {
+		j.AddString(c.options.JSFooter)
+		j.AddString("\n")
+	}
+
+	// The JavaScript contents are done now that the source map comment is in
+	chunk.intermediateOutput = c.breakJoinerIntoPieces(j)
+	timer.End("Join JavaScript files")
+
+	if c.options.SourceMap != config.SourceMapNone {
+		timer.Begin("Generate source map")
+		canHaveShifts := chunk.intermediateOutput.pieces != nil
+		chunk.outputSourceMap = c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps, canHaveShifts)
+		timer.End("Generate source map")
+	}
+
+	// End the metadata lazily. The final output size is not known until the
+	// final import paths are substituted into the output pieces generated below.
+	if c.options.NeedsMetafile {
+		pieces := make([][]intermediateOutput, len(metaOrder))
+		for i, sourceIndex := range metaOrder {
+			slices := metaBytes[sourceIndex]
+			outputs := make([]intermediateOutput, len(slices))
+			for j, slice := range slices {
+				outputs[j] = c.breakOutputIntoPieces(slice)
+			}
+			pieces[i] = outputs
+		}
+		chunk.jsonMetadataChunkCallback = func(finalOutputSize int) helpers.Joiner {
+			finalRelDir := c.fs.Dir(chunk.finalRelPath)
+			for i, sourceIndex := range metaOrder {
+				if i > 0 {
+					jMeta.AddString(",")
+				}
+				count := 0
+				for _, output := range pieces[i] {
+					count += c.accurateFinalByteCount(output, finalRelDir)
+				}
+				jMeta.AddString(fmt.Sprintf("\n        %s: {\n          \"bytesInOutput\": %d\n        %s}",
+					helpers.QuoteForJSON(c.graph.Files[sourceIndex].InputFile.Source.PrettyPath, c.options.ASCIIOnly),
+					count, c.generateExtraDataForFileJS(sourceIndex)))
+			}
+			if len(metaOrder) > 0 {
+				jMeta.AddString("\n      ")
+			}
+			jMeta.AddString(fmt.Sprintf("},\n      \"bytes\": %d\n    }", finalOutputSize))
+			return jMeta
+		}
+	}
+
+	c.generateIsolatedHashInParallel(chunk)
+	chunk.isExecutable = isExecutable
+	chunkWaitGroup.Done()
+}
+
+func (c *linkerContext) generateGlobalNamePrefix() string {
+	var text string
+	globalName := c.options.GlobalName
+	prefix := globalName[0]
+	space := " "
+	join := ";\n"
+
+	if c.options.MinifyWhitespace {
+		space = ""
+		join = ";"
+	}
+
+	// Use "||=" to make the code more compact when it's supported
+	if len(globalName) > 1 && !c.options.UnsupportedJSFeatures.Has(compat.LogicalAssignment) {
+		if js_printer.CanEscapeIdentifier(prefix, c.options.UnsupportedJSFeatures, c.options.ASCIIOnly) {
+			if c.options.ASCIIOnly {
+				prefix = string(js_printer.QuoteIdentifier(nil, prefix, c.options.UnsupportedJSFeatures))
+			}
+			text = fmt.Sprintf("var %s%s", prefix, join)
+		} else {
+			prefix = fmt.Sprintf("this[%s]", helpers.QuoteForJSON(prefix, c.options.ASCIIOnly))
+		}
+		for _, name := range globalName[1:] {
+			var dotOrIndex string
+			if js_printer.CanEscapeIdentifier(name, c.options.UnsupportedJSFeatures, c.options.ASCIIOnly) {
+				if c.options.ASCIIOnly {
+					name = string(js_printer.QuoteIdentifier(nil, name, c.options.UnsupportedJSFeatures))
+				}
+				dotOrIndex = fmt.Sprintf(".%s", name)
+			} else {
+				dotOrIndex = fmt.Sprintf("[%s]", helpers.QuoteForJSON(name, c.options.ASCIIOnly))
+			}
+			prefix = fmt.Sprintf("(%s%s||=%s{})%s", prefix, space, space, dotOrIndex)
+		}
+		return fmt.Sprintf("%s%s%s=%s", text, prefix, space, space)
+	}
+
+	if js_printer.CanEscapeIdentifier(prefix, c.options.UnsupportedJSFeatures, c.options.ASCIIOnly) {
+		if c.options.ASCIIOnly {
+			prefix = string(js_printer.QuoteIdentifier(nil, prefix, c.options.UnsupportedJSFeatures))
+		}
+		text = fmt.Sprintf("var %s%s=%s", prefix, space, space)
+	} else {
+		prefix = fmt.Sprintf("this[%s]", helpers.QuoteForJSON(prefix, c.options.ASCIIOnly))
+		text = fmt.Sprintf("%s%s=%s", prefix, space, space)
+	}
+
+	for _, name := range globalName[1:] {
+		oldPrefix := prefix
+		if js_printer.CanEscapeIdentifier(name, c.options.UnsupportedJSFeatures, c.options.ASCIIOnly) {
+			if c.options.ASCIIOnly {
+				name = string(js_printer.QuoteIdentifier(nil, name, c.options.UnsupportedJSFeatures))
+			}
+			prefix = fmt.Sprintf("%s.%s", prefix, name)
+		} else {
+			prefix = fmt.Sprintf("%s[%s]", prefix, helpers.QuoteForJSON(name, c.options.ASCIIOnly))
+		}
+		text += fmt.Sprintf("%s%s||%s{}%s%s%s=%s", oldPrefix, space, space, join, prefix, space, space)
+	}
+
+	return text
+}
+
+type compileResultCSS struct {
+	css_printer.PrintResult
+
+	// This is the line and column offset since the previous CSS string
+	// or the start of the file if this is the first CSS string.
+	generatedOffset sourcemap.LineColumnOffset
+
+	// The source index can be invalid for short snippets that aren't necessarily
+	// tied to any one file and/or that don't really need source mappings. The
+	// source index is really only valid for the compile result that contains the
+	// main contents of a file, which we try to only ever write out once.
+	sourceIndex ast.Index32
+	hasCharset  bool
+}
+
+func (c *linkerContext) generateChunkCSS(chunkIndex int, chunkWaitGroup *sync.WaitGroup) {
+	defer c.recoverInternalError(chunkWaitGroup, runtime.SourceIndex)
+
+	chunk := &c.chunks[chunkIndex]
+
+	timer := c.timer.Fork()
+	if timer != nil {
+		timeName := fmt.Sprintf("Generate chunk %q", path.Clean(config.TemplateToString(chunk.finalTemplate)))
+		timer.Begin(timeName)
+		defer c.timer.Join(timer)
+		defer timer.End(timeName)
+	}
+
+	chunkRepr := chunk.chunkRepr.(*chunkReprCSS)
+	compileResults := make([]compileResultCSS, len(chunkRepr.importsInChunkInOrder))
+	dataForSourceMaps := c.dataForSourceMaps()
+
+	// Note: This contains placeholders instead of what the placeholders are
+	// substituted with. That should be fine though because this should only
+	// ever be used for figuring out how many "../" to add to a relative path
+	// from a chunk whose final path hasn't been calculated yet to a chunk
+	// whose final path has already been calculated. That and placeholders are
+	// never substituted with something containing a "/" so substitution should
+	// never change the "../" count.
+	chunkAbsDir := c.fs.Dir(c.fs.Join(c.options.AbsOutputDir, config.TemplateToString(chunk.finalTemplate)))
+
+	// Remove duplicate rules across files. This must be done in serial, not
+	// in parallel, and must be done from the last rule to the first rule.
+	timer.Begin("Prepare CSS ASTs")
+	asts := make([]css_ast.AST, len(chunkRepr.importsInChunkInOrder))
+	var remover css_parser.DuplicateRuleRemover
+	if c.options.MinifySyntax {
+		remover = css_parser.MakeDuplicateRuleMangler(c.graph.Symbols)
+	}
+	for i := len(chunkRepr.importsInChunkInOrder) - 1; i >= 0; i-- {
+		entry := chunkRepr.importsInChunkInOrder[i]
+		switch entry.kind {
+		case cssImportLayers:
+			var rules []css_ast.Rule
+			if len(entry.layers) > 0 {
+				rules = append(rules, css_ast.Rule{Data: &css_ast.RAtLayer{Names: entry.layers}})
+			}
+			rules, importRecords := wrapRulesWithConditions(rules, nil, entry.conditions, entry.conditionImportRecords)
+			asts[i] = css_ast.AST{Rules: rules, ImportRecords: importRecords}
+
+		case cssImportExternalPath:
+			var conditions *css_ast.ImportConditions
+			if len(entry.conditions) > 0 {
+				conditions = &entry.conditions[0]
+
+				// Handling a chain of nested conditions is complicated. We can't
+				// necessarily join them together because a) there may be multiple
+				// layer names and b) layer names are only supposed to be inserted
+				// into the layer order if the parent conditions are applied.
+				//
+				// Instead we handle them by preserving the "@import" nesting using
+				// imports of data URL stylesheets. This may seem strange but I think
+				// this is the only way to do this in CSS.
+				for i := len(entry.conditions) - 1; i > 0; i-- {
+					astImport := css_ast.AST{
+						Rules: []css_ast.Rule{{Data: &css_ast.RAtImport{
+							ImportRecordIndex: uint32(len(entry.conditionImportRecords)),
+							ImportConditions:  &entry.conditions[i],
+						}}},
+						ImportRecords: append(entry.conditionImportRecords, ast.ImportRecord{
+							Kind: ast.ImportAt,
+							Path: entry.externalPath,
+						}),
+					}
+					astResult := css_printer.Print(astImport, c.graph.Symbols, css_printer.Options{
+						MinifyWhitespace: c.options.MinifyWhitespace,
+						ASCIIOnly:        c.options.ASCIIOnly,
+					})
+					entry.externalPath = logger.Path{Text: helpers.EncodeStringAsShortestDataURL("text/css", string(bytes.TrimSpace(astResult.CSS)))}
+				}
+			}
+			asts[i] = css_ast.AST{
+				ImportRecords: append(append([]ast.ImportRecord{}, entry.conditionImportRecords...), ast.ImportRecord{
+					Kind: ast.ImportAt,
+					Path: entry.externalPath,
+				}),
+				Rules: []css_ast.Rule{{Data: &css_ast.RAtImport{
+					ImportRecordIndex: uint32(len(entry.conditionImportRecords)),
+					ImportConditions:  conditions,
+				}}},
+			}
+
+		case cssImportSourceIndex:
+			file := &c.graph.Files[entry.sourceIndex]
+			ast := file.InputFile.Repr.(*graph.CSSRepr).AST
+
+			// Filter out "@charset", "@import", and leading "@layer" rules
+			rules := make([]css_ast.Rule, 0, len(ast.Rules))
+			didFindAtImport := false
+			didFindAtLayer := false
+			for _, rule := range ast.Rules {
+				switch rule.Data.(type) {
+				case *css_ast.RAtCharset:
+					compileResults[i].hasCharset = true
+					continue
+				case *css_ast.RAtLayer:
+					didFindAtLayer = true
+				case *css_ast.RAtImport:
+					if !didFindAtImport {
+						didFindAtImport = true
+						if didFindAtLayer {
+							// Filter out the pre-import layers once we see the first
+							// "@import". These layers are special-cased by the linker
+							// and already appear as a separate entry in import order.
+							end := 0
+							for _, rule := range rules {
+								if _, ok := rule.Data.(*css_ast.RAtLayer); !ok {
+									rules[end] = rule
+									end++
+								}
+							}
+							rules = rules[:end]
+						}
+					}
+					continue
+				}
+				rules = append(rules, rule)
+			}
+
+			rules, ast.ImportRecords = wrapRulesWithConditions(rules, ast.ImportRecords, entry.conditions, entry.conditionImportRecords)
+
+			// Remove top-level duplicate rules across files
+			if c.options.MinifySyntax {
+				rules = remover.RemoveDuplicateRulesInPlace(entry.sourceIndex, rules, ast.ImportRecords)
+			}
+
+			ast.Rules = rules
+			asts[i] = ast
+		}
+	}
+	timer.End("Prepare CSS ASTs")
+
+	// Generate CSS for each file in parallel
+	timer.Begin("Print CSS files")
+	waitGroup := sync.WaitGroup{}
+	for i, entry := range chunkRepr.importsInChunkInOrder {
+		// Create a goroutine for this file
+		waitGroup.Add(1)
+		go func(i int, entry cssImportOrder, compileResult *compileResultCSS) {
+			cssOptions := css_printer.Options{
+				MinifyWhitespace:    c.options.MinifyWhitespace,
+				LineLimit:           c.options.LineLimit,
+				ASCIIOnly:           c.options.ASCIIOnly,
+				LegalComments:       c.options.LegalComments,
+				SourceMap:           c.options.SourceMap,
+				UnsupportedFeatures: c.options.UnsupportedCSSFeatures,
+				NeedsMetafile:       c.options.NeedsMetafile,
+				LocalNames:          c.mangledProps,
+			}
+
+			if entry.kind == cssImportSourceIndex {
+				defer c.recoverInternalError(&waitGroup, entry.sourceIndex)
+				file := &c.graph.Files[entry.sourceIndex]
+
+				// Only generate a source map if needed
+				if file.InputFile.Loader.CanHaveSourceMap() && c.options.SourceMap != config.SourceMapNone {
+					cssOptions.AddSourceMappings = true
+					cssOptions.InputSourceMap = file.InputFile.InputSourceMap
+					cssOptions.LineOffsetTables = dataForSourceMaps[entry.sourceIndex].LineOffsetTables
+				}
+
+				cssOptions.InputSourceIndex = entry.sourceIndex
+				compileResult.sourceIndex = ast.MakeIndex32(entry.sourceIndex)
+			}
+
+			compileResult.PrintResult = css_printer.Print(asts[i], c.graph.Symbols, cssOptions)
+			waitGroup.Done()
+		}(i, entry, &compileResults[i])
+	}
+
+	waitGroup.Wait()
+	timer.End("Print CSS files")
+	timer.Begin("Join CSS files")
+	j := helpers.Joiner{}
+	prevOffset := sourcemap.LineColumnOffset{}
+	newlineBeforeComment := false
+
+	if len(c.options.CSSBanner) > 0 {
+		prevOffset.AdvanceString(c.options.CSSBanner)
+		j.AddString(c.options.CSSBanner)
+		prevOffset.AdvanceString("\n")
+		j.AddString("\n")
+	}
+
+	// Generate any prefix rules now
+	var jsonMetadataImports []string
+	{
+		tree := css_ast.AST{}
+
+		// "@charset" is the only thing that comes before "@import"
+		for _, compileResult := range compileResults {
+			if compileResult.hasCharset {
+				tree.Rules = append(tree.Rules, css_ast.Rule{Data: &css_ast.RAtCharset{Encoding: "UTF-8"}})
+				break
+			}
+		}
+
+		if len(tree.Rules) > 0 {
+			result := css_printer.Print(tree, c.graph.Symbols, css_printer.Options{
+				MinifyWhitespace: c.options.MinifyWhitespace,
+				LineLimit:        c.options.LineLimit,
+				ASCIIOnly:        c.options.ASCIIOnly,
+				NeedsMetafile:    c.options.NeedsMetafile,
+			})
+			jsonMetadataImports = result.JSONMetadataImports
+			if len(result.CSS) > 0 {
+				prevOffset.AdvanceBytes(result.CSS)
+				j.AddBytes(result.CSS)
+				newlineBeforeComment = true
+			}
+		}
+	}
+
+	// Start the metadata
+	jMeta := helpers.Joiner{}
+	if c.options.NeedsMetafile {
+		isFirstMeta := true
+		jMeta.AddString("{\n      \"imports\": [")
+		for _, json := range jsonMetadataImports {
+			if isFirstMeta {
+				isFirstMeta = false
+			} else {
+				jMeta.AddString(",")
+			}
+			jMeta.AddString(json)
+		}
+		for _, compileResult := range compileResults {
+			for _, json := range compileResult.JSONMetadataImports {
+				if isFirstMeta {
+					isFirstMeta = false
+				} else {
+					jMeta.AddString(",")
+				}
+				jMeta.AddString(json)
+			}
+		}
+		if !isFirstMeta {
+			jMeta.AddString("\n      ")
+		}
+		if chunk.isEntryPoint {
+			file := &c.graph.Files[chunk.sourceIndex]
+
+			// Do not generate "entryPoint" for CSS files that are the result of
+			// importing CSS into JavaScript. We want this to be a 1:1 relationship
+			// and there is already an output file for the JavaScript entry point.
+			if _, ok := file.InputFile.Repr.(*graph.CSSRepr); ok {
+				jMeta.AddString(fmt.Sprintf("],\n      \"entryPoint\": %s,\n      \"inputs\": {",
+					helpers.QuoteForJSON(file.InputFile.Source.PrettyPath, c.options.ASCIIOnly)))
+			} else {
+				jMeta.AddString("],\n      \"inputs\": {")
+			}
+		} else {
+			jMeta.AddString("],\n      \"inputs\": {")
+		}
+	}
+
+	// Concatenate the generated CSS chunks together
+	var compileResultsForSourceMap []compileResultForSourceMap
+	var legalCommentList []legalCommentEntry
+	for _, compileResult := range compileResults {
+		if len(compileResult.ExtractedLegalComments) > 0 && compileResult.sourceIndex.IsValid() {
+			legalCommentList = append(legalCommentList, legalCommentEntry{
+				sourceIndex: compileResult.sourceIndex.GetIndex(),
+				comments:    compileResult.ExtractedLegalComments,
+			})
+		}
+
+		if c.options.Mode == config.ModeBundle && !c.options.MinifyWhitespace && compileResult.sourceIndex.IsValid() {
+			var newline string
+			if newlineBeforeComment {
+				newline = "\n"
+			}
+			comment := fmt.Sprintf("%s/* %s */\n", newline, c.graph.Files[compileResult.sourceIndex.GetIndex()].InputFile.Source.PrettyPath)
+			prevOffset.AdvanceString(comment)
+			j.AddString(comment)
+		}
+		if len(compileResult.CSS) > 0 {
+			newlineBeforeComment = true
+		}
+
+		// Save the offset to the start of the stored JavaScript
+		compileResult.generatedOffset = prevOffset
+		j.AddBytes(compileResult.CSS)
+
+		// Ignore empty source map chunks
+		if compileResult.SourceMapChunk.ShouldIgnore {
+			prevOffset.AdvanceBytes(compileResult.CSS)
+		} else {
+			prevOffset = sourcemap.LineColumnOffset{}
+
+			// Include this file in the source map
+			if c.options.SourceMap != config.SourceMapNone && compileResult.sourceIndex.IsValid() {
+				compileResultsForSourceMap = append(compileResultsForSourceMap, compileResultForSourceMap{
+					sourceMapChunk:  compileResult.SourceMapChunk,
+					generatedOffset: compileResult.generatedOffset,
+					sourceIndex:     compileResult.sourceIndex.GetIndex(),
+				})
+			}
+		}
+	}
+
+	// Make sure the file ends with a newline
+	j.EnsureNewlineAtEnd()
+	slashTag := "/style"
+	if c.options.UnsupportedCSSFeatures.Has(compat.InlineStyle) {
+		slashTag = ""
+	}
+	c.maybeAppendLegalComments(c.options.LegalComments, legalCommentList, chunk, &j, slashTag)
+
+	if len(c.options.CSSFooter) > 0 {
+		j.AddString(c.options.CSSFooter)
+		j.AddString("\n")
+	}
+
+	// The CSS contents are done now that the source map comment is in
+	chunk.intermediateOutput = c.breakJoinerIntoPieces(j)
+	timer.End("Join CSS files")
+
+	if c.options.SourceMap != config.SourceMapNone {
+		timer.Begin("Generate source map")
+		canHaveShifts := chunk.intermediateOutput.pieces != nil
+		chunk.outputSourceMap = c.generateSourceMapForChunk(compileResultsForSourceMap, chunkAbsDir, dataForSourceMaps, canHaveShifts)
+		timer.End("Generate source map")
+	}
+
+	// End the metadata lazily. The final output size is not known until the
+	// final import paths are substituted into the output pieces generated below.
+	if c.options.NeedsMetafile {
+		pieces := make([]intermediateOutput, len(compileResults))
+		for i, compileResult := range compileResults {
+			pieces[i] = c.breakOutputIntoPieces(compileResult.CSS)
+		}
+		chunk.jsonMetadataChunkCallback = func(finalOutputSize int) helpers.Joiner {
+			finalRelDir := c.fs.Dir(chunk.finalRelPath)
+			isFirst := true
+			for i, compileResult := range compileResults {
+				if !compileResult.sourceIndex.IsValid() {
+					continue
+				}
+				if isFirst {
+					isFirst = false
+				} else {
+					jMeta.AddString(",")
+				}
+				jMeta.AddString(fmt.Sprintf("\n        %s: {\n          \"bytesInOutput\": %d\n        }",
+					helpers.QuoteForJSON(c.graph.Files[compileResult.sourceIndex.GetIndex()].InputFile.Source.PrettyPath, c.options.ASCIIOnly),
+					c.accurateFinalByteCount(pieces[i], finalRelDir)))
+			}
+			if len(compileResults) > 0 {
+				jMeta.AddString("\n      ")
+			}
+			jMeta.AddString(fmt.Sprintf("},\n      \"bytes\": %d\n    }", finalOutputSize))
+			return jMeta
+		}
+	}
+
+	c.generateIsolatedHashInParallel(chunk)
+	chunkWaitGroup.Done()
+}
+
+func wrapRulesWithConditions(
+	rules []css_ast.Rule, importRecords []ast.ImportRecord,
+	conditions []css_ast.ImportConditions, conditionImportRecords []ast.ImportRecord,
+) ([]css_ast.Rule, []ast.ImportRecord) {
+	for i := len(conditions) - 1; i >= 0; i-- {
+		item := conditions[i]
+
+		// Generate "@layer" wrappers. Note that empty "@layer" rules still have
+		// a side effect (they set the layer order) so they cannot be removed.
+		for _, t := range item.Layers {
+			if len(rules) == 0 {
+				if t.Children == nil {
+					// Omit an empty "@layer {}" entirely
+					continue
+				} else {
+					// Generate "@layer foo;" instead of "@layer foo {}"
+					rules = nil
+				}
+			}
+			var prelude []css_ast.Token
+			if t.Children != nil {
+				prelude = *t.Children
+			}
+			prelude, importRecords = css_ast.CloneTokensWithImportRecords(prelude, conditionImportRecords, nil, importRecords)
+			rules = []css_ast.Rule{{Data: &css_ast.RKnownAt{
+				AtToken: "layer",
+				Prelude: prelude,
+				Rules:   rules,
+			}}}
+		}
+
+		// Generate "@supports" wrappers. This is not done if the rule block is
+		// empty because empty "@supports" rules have no effect.
+		if len(rules) > 0 {
+			for _, t := range item.Supports {
+				t.Kind = css_lexer.TOpenParen
+				t.Text = "("
+				var prelude []css_ast.Token
+				prelude, importRecords = css_ast.CloneTokensWithImportRecords([]css_ast.Token{t}, conditionImportRecords, nil, importRecords)
+				rules = []css_ast.Rule{{Data: &css_ast.RKnownAt{
+					AtToken: "supports",
+					Prelude: prelude,
+					Rules:   rules,
+				}}}
+			}
+		}
+
+		// Generate "@media" wrappers. This is not done if the rule block is
+		// empty because empty "@media" rules have no effect.
+		if len(rules) > 0 && len(item.Media) > 0 {
+			var prelude []css_ast.Token
+			prelude, importRecords = css_ast.CloneTokensWithImportRecords(item.Media, conditionImportRecords, nil, importRecords)
+			rules = []css_ast.Rule{{Data: &css_ast.RKnownAt{
+				AtToken: "media",
+				Prelude: prelude,
+				Rules:   rules,
+			}}}
+		}
+	}
+
+	return rules, importRecords
+}
+
+type legalCommentEntry struct {
+	sourceIndex uint32
+	comments    []string
+}
+
+// Add all unique legal comments to the end of the file. These are
+// deduplicated because some projects have thousands of files with the same
+// comment. The comment must be preserved in the output for legal reasons but
+// at the same time we want to generate a small bundle when minifying.
+func (c *linkerContext) maybeAppendLegalComments(
+	legalComments config.LegalComments,
+	legalCommentList []legalCommentEntry,
+	chunk *chunkInfo,
+	j *helpers.Joiner,
+	slashTag string,
+) {
+	switch legalComments {
+	case config.LegalCommentsNone, config.LegalCommentsInline:
+		return
+	}
+
+	type thirdPartyEntry struct {
+		packagePath string
+		comments    []string
+	}
+
+	var uniqueFirstPartyComments []string
+	var thirdPartyComments []thirdPartyEntry
+	hasFirstPartyComment := make(map[string]struct{})
+
+	for _, entry := range legalCommentList {
+		source := c.graph.Files[entry.sourceIndex].InputFile.Source
+		packagePath := ""
+
+		// Try to extract a package name from the source path. If we can find a
+		// "node_modules" path component in the path, then assume this is a legal
+		// comment in third-party code and that everything after "node_modules" is
+		// the package name and subpath. If we can't, then assume this is a legal
+		// comment in first-party code.
+		//
+		// The rationale for this behavior: If we just include third-party comments
+		// as-is and the third-party comments don't say what package they're from
+		// (which isn't uncommon), then it'll look like that comment applies to
+		// all code in the file which is very wrong. So we need to somehow say
+		// where the comment comes from. But we don't want to say where every
+		// comment comes from because people probably won't appreciate this for
+		// first-party comments. And we don't want to include the whole path to
+		// each third-part module because a) that could contain information about
+		// the local machine that people don't want in their bundle and b) that
+		// could differ depending on unimportant details like the package manager
+		// used to install the packages (npm vs. pnpm vs. yarn).
+		if source.KeyPath.Namespace != "dataurl" {
+			path := source.KeyPath.Text
+			previous := len(path)
+			for previous > 0 {
+				slash := strings.LastIndexAny(path[:previous], "\\/")
+				component := path[slash+1 : previous]
+				if component == "node_modules" {
+					if previous < len(path) {
+						packagePath = strings.ReplaceAll(path[previous+1:], "\\", "/")
+					}
+					break
+				}
+				previous = slash
+			}
+		}
+
+		if packagePath != "" {
+			thirdPartyComments = append(thirdPartyComments, thirdPartyEntry{
+				packagePath: packagePath,
+				comments:    entry.comments,
+			})
+		} else {
+			for _, comment := range entry.comments {
+				if _, ok := hasFirstPartyComment[comment]; !ok {
+					hasFirstPartyComment[comment] = struct{}{}
+					uniqueFirstPartyComments = append(uniqueFirstPartyComments, comment)
+				}
+			}
+		}
+	}
+
+	switch legalComments {
+	case config.LegalCommentsEndOfFile:
+		for _, comment := range uniqueFirstPartyComments {
+			j.AddString(helpers.EscapeClosingTag(comment, slashTag))
+			j.AddString("\n")
+		}
+
+		if len(thirdPartyComments) > 0 {
+			j.AddString("/*! Bundled license information:\n")
+			for _, entry := range thirdPartyComments {
+				j.AddString(fmt.Sprintf("\n%s:\n", helpers.EscapeClosingTag(entry.packagePath, slashTag)))
+				for _, comment := range entry.comments {
+					comment = helpers.EscapeClosingTag(comment, slashTag)
+					if strings.HasPrefix(comment, "//") {
+						j.AddString(fmt.Sprintf("  (*%s *)\n", comment[2:]))
+					} else if strings.HasPrefix(comment, "/*") && strings.HasSuffix(comment, "*/") {
+						j.AddString(fmt.Sprintf("  (%s)\n", strings.ReplaceAll(comment[1:len(comment)-1], "\n", "\n  ")))
+					}
+				}
+			}
+			j.AddString("*/\n")
+		}
+
+	case config.LegalCommentsLinkedWithComment, config.LegalCommentsExternalWithoutComment:
+		var jComments helpers.Joiner
+
+		for _, comment := range uniqueFirstPartyComments {
+			jComments.AddString(comment)
+			jComments.AddString("\n")
+		}
+
+		if len(thirdPartyComments) > 0 {
+			if len(uniqueFirstPartyComments) > 0 {
+				jComments.AddString("\n")
+			}
+			jComments.AddString("Bundled license information:\n")
+			for _, entry := range thirdPartyComments {
+				jComments.AddString(fmt.Sprintf("\n%s:\n", entry.packagePath))
+				for _, comment := range entry.comments {
+					jComments.AddString(fmt.Sprintf("  %s\n", strings.ReplaceAll(comment, "\n", "\n  ")))
+				}
+			}
+		}
+
+		chunk.externalLegalComments = jComments.Done()
+	}
+}
+
+func (c *linkerContext) appendIsolatedHashesForImportedChunks(
+	hash hash.Hash,
+	chunkIndex uint32,
+	visited []uint32,
+	visitedKey uint32,
+) {
+	// Only visit each chunk at most once. This is important because there may be
+	// cycles in the chunk import graph. If there's a cycle, we want to include
+	// the hash of every chunk involved in the cycle (along with all of their
+	// dependencies). This depth-first traversal will naturally do that.
+	if visited[chunkIndex] == visitedKey {
+		return
+	}
+	visited[chunkIndex] = visitedKey
+	chunk := &c.chunks[chunkIndex]
+
+	// Visit the other chunks that this chunk imports before visiting this chunk
+	for _, chunkImport := range chunk.crossChunkImports {
+		c.appendIsolatedHashesForImportedChunks(hash, chunkImport.chunkIndex, visited, visitedKey)
+	}
+
+	// Mix in hashes for referenced asset paths (i.e. the "file" loader)
+	for _, piece := range chunk.intermediateOutput.pieces {
+		if piece.kind == outputPieceAssetIndex {
+			file := c.graph.Files[piece.index]
+			if len(file.InputFile.AdditionalFiles) != 1 {
+				panic("Internal error")
+			}
+			relPath, _ := c.fs.Rel(c.options.AbsOutputDir, file.InputFile.AdditionalFiles[0].AbsPath)
+
+			// Make sure to always use forward slashes, even on Windows
+			relPath = strings.ReplaceAll(relPath, "\\", "/")
+
+			// Mix in the hash for the relative path, which ends up as a JS string
+			hashWriteLengthPrefixed(hash, []byte(relPath))
+		}
+	}
+
+	// Mix in the hash for this chunk
+	hash.Write(chunk.waitForIsolatedHash())
+}
+
+func (c *linkerContext) breakJoinerIntoPieces(j helpers.Joiner) intermediateOutput {
+	// Optimization: If there can be no substitutions, just reuse the initial
+	// joiner that was used when generating the intermediate chunk output
+	// instead of creating another one and copying the whole file into it.
+	if !j.Contains(c.uniqueKeyPrefix, c.uniqueKeyPrefixBytes) {
+		return intermediateOutput{joiner: j}
+	}
+	return c.breakOutputIntoPieces(j.Done())
+}
+
+func (c *linkerContext) breakOutputIntoPieces(output []byte) intermediateOutput {
+	var pieces []outputPiece
+	prefix := c.uniqueKeyPrefixBytes
+	for {
+		// Scan for the next piece boundary
+		boundary := bytes.Index(output, prefix)
+
+		// Try to parse the piece boundary
+		var kind outputPieceIndexKind
+		var index uint32
+		if boundary != -1 {
+			if start := boundary + len(prefix); start+9 > len(output) {
+				boundary = -1
+			} else {
+				switch output[start] {
+				case 'A':
+					kind = outputPieceAssetIndex
+				case 'C':
+					kind = outputPieceChunkIndex
+				}
+				for j := 1; j < 9; j++ {
+					c := output[start+j]
+					if c < '0' || c > '9' {
+						boundary = -1
+						break
+					}
+					index = index*10 + uint32(c) - '0'
+				}
+			}
+		}
+
+		// Validate the boundary
+		switch kind {
+		case outputPieceAssetIndex:
+			if index >= uint32(len(c.graph.Files)) {
+				boundary = -1
+			}
+
+		case outputPieceChunkIndex:
+			if index >= uint32(len(c.chunks)) {
+				boundary = -1
+			}
+
+		default:
+			boundary = -1
+		}
+
+		// If we're at the end, generate one final piece
+		if boundary == -1 {
+			pieces = append(pieces, outputPiece{
+				data: output,
+			})
+			break
+		}
+
+		// Otherwise, generate an interior piece and continue
+		pieces = append(pieces, outputPiece{
+			data:  output[:boundary],
+			index: index,
+			kind:  kind,
+		})
+		output = output[boundary+len(prefix)+9:]
+	}
+	return intermediateOutput{pieces: pieces}
+}
+
+func (c *linkerContext) generateIsolatedHashInParallel(chunk *chunkInfo) {
+	// Compute the hash in parallel. This is a speedup when it turns out the hash
+	// isn't needed (well, as long as there are threads to spare).
+	channel := make(chan []byte, 1)
+	chunk.waitForIsolatedHash = func() []byte {
+		data := <-channel
+		channel <- data
+		return data
+	}
+	go c.generateIsolatedHash(chunk, channel)
+}
+
+func (c *linkerContext) generateIsolatedHash(chunk *chunkInfo, channel chan []byte) {
+	hash := xxhash.New()
+
+	// Mix the file names and part ranges of all of the files in this chunk into
+	// the hash. Objects that appear identical but that live in separate files or
+	// that live in separate parts in the same file must not be merged. This only
+	// needs to be done for JavaScript files, not CSS files.
+	if chunkRepr, ok := chunk.chunkRepr.(*chunkReprJS); ok {
+		for _, partRange := range chunkRepr.partsInChunkInOrder {
+			var filePath string
+			file := &c.graph.Files[partRange.sourceIndex]
+			if file.InputFile.Source.KeyPath.Namespace == "file" {
+				// Use the pretty path as the file name since it should be platform-
+				// independent (relative paths and the "/" path separator)
+				filePath = file.InputFile.Source.PrettyPath
+			} else {
+				// If this isn't in the "file" namespace, just use the full path text
+				// verbatim. This could be a source of cross-platform differences if
+				// plugins are storing platform-specific information in here, but then
+				// that problem isn't caused by esbuild itself.
+				filePath = file.InputFile.Source.KeyPath.Text
+			}
+
+			// Include the path namespace in the hash
+			hashWriteLengthPrefixed(hash, []byte(file.InputFile.Source.KeyPath.Namespace))
+
+			// Then include the file path
+			hashWriteLengthPrefixed(hash, []byte(filePath))
+
+			// Also write the part range. These numbers are deterministic and allocated
+			// per-file so this should be a well-behaved base for a hash.
+			hashWriteUint32(hash, partRange.partIndexBegin)
+			hashWriteUint32(hash, partRange.partIndexEnd)
+		}
+	}
+
+	// Hash the output path template as part of the content hash because we want
+	// any import to be considered different if the import's output path has changed.
+	for _, part := range chunk.finalTemplate {
+		hashWriteLengthPrefixed(hash, []byte(part.Data))
+	}
+
+	// Also hash the public path. If provided, this is used whenever files
+	// reference each other such as cross-chunk imports, asset file references,
+	// and source map comments. We always include the hash in all chunks instead
+	// of trying to figure out which chunks will include the public path for
+	// simplicity and for robustness to code changes in the future.
+	if c.options.PublicPath != "" {
+		hashWriteLengthPrefixed(hash, []byte(c.options.PublicPath))
+	}
+
+	// Include the generated output content in the hash. This excludes the
+	// randomly-generated import paths (the unique keys) and only includes the
+	// data in the spans between them.
+	if chunk.intermediateOutput.pieces != nil {
+		for _, piece := range chunk.intermediateOutput.pieces {
+			hashWriteLengthPrefixed(hash, piece.data)
+		}
+	} else {
+		bytes := chunk.intermediateOutput.joiner.Done()
+		hashWriteLengthPrefixed(hash, bytes)
+	}
+
+	// Also include the source map data in the hash. The source map is named the
+	// same name as the chunk name for ease of discovery. So we want the hash to
+	// change if the source map data changes even if the chunk data doesn't change.
+	// Otherwise the output path for the source map wouldn't change and the source
+	// map wouldn't end up being updated.
+	//
+	// Note that this means the contents of all input files are included in the
+	// hash because of "sourcesContent", so changing a comment in an input file
+	// can now change the hash of the output file. This only happens when you
+	// have source maps enabled (and "sourcesContent", which is on by default).
+	//
+	// The generated positions in the mappings here are in the output content
+	// *before* the final paths have been substituted. This may seem weird.
+	// However, I think this shouldn't cause issues because a) the unique key
+	// values are all always the same length so the offsets are deterministic
+	// and b) the final paths will be folded into the final hash later.
+	hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Prefix)
+	hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Mappings)
+	hashWriteLengthPrefixed(hash, chunk.outputSourceMap.Suffix)
+
+	// Store the hash so far. All other chunks that import this chunk will mix
+	// this hash into their final hash to ensure that the import path changes
+	// if this chunk (or any dependencies of this chunk) is changed.
+	channel <- hash.Sum(nil)
+}
+
+func hashWriteUint32(hash hash.Hash, value uint32) {
+	var lengthBytes [4]byte
+	binary.LittleEndian.PutUint32(lengthBytes[:], value)
+	hash.Write(lengthBytes[:])
+}
+
+// Hash the data in length-prefixed form because boundary locations are
+// important. We don't want "a" + "bc" to hash the same as "ab" + "c".
+func hashWriteLengthPrefixed(hash hash.Hash, bytes []byte) {
+	hashWriteUint32(hash, uint32(len(bytes)))
+	hash.Write(bytes)
+}
+
+// Marking a symbol as unbound prevents it from being renamed or minified.
+// This is only used when a module is compiled independently. We use a very
+// different way of handling exports and renaming/minifying when bundling.
+func (c *linkerContext) preventExportsFromBeingRenamed(sourceIndex uint32) {
+	repr, ok := c.graph.Files[sourceIndex].InputFile.Repr.(*graph.JSRepr)
+	if !ok {
+		return
+	}
+	hasImportOrExport := false
+
+	for _, part := range repr.AST.Parts {
+		for _, stmt := range part.Stmts {
+			switch s := stmt.Data.(type) {
+			case *js_ast.SImport:
+				// Ignore imports from internal (i.e. non-external) code. Since this
+				// function is only called when we're not bundling, these imports are
+				// all for files that were generated automatically and aren't part of
+				// the original source code (e.g. the runtime or an injected file).
+				// We shouldn't consider the file a module if the only ESM imports or
+				// exports are automatically generated ones.
+				if repr.AST.ImportRecords[s.ImportRecordIndex].SourceIndex.IsValid() {
+					continue
+				}
+
+				hasImportOrExport = true
+
+			case *js_ast.SLocal:
+				if s.IsExport {
+					js_ast.ForEachIdentifierBindingInDecls(s.Decls, func(loc logger.Loc, b *js_ast.BIdentifier) {
+						c.graph.Symbols.Get(b.Ref).Flags |= ast.MustNotBeRenamed
+					})
+					hasImportOrExport = true
+				}
+
+			case *js_ast.SFunction:
+				if s.IsExport {
+					c.graph.Symbols.Get(s.Fn.Name.Ref).Kind = ast.SymbolUnbound
+					hasImportOrExport = true
+				}
+
+			case *js_ast.SClass:
+				if s.IsExport {
+					c.graph.Symbols.Get(s.Class.Name.Ref).Kind = ast.SymbolUnbound
+					hasImportOrExport = true
+				}
+
+			case *js_ast.SExportClause, *js_ast.SExportDefault, *js_ast.SExportStar:
+				hasImportOrExport = true
+
+			case *js_ast.SExportFrom:
+				hasImportOrExport = true
+			}
+		}
+	}
+
+	// Heuristic: If this module has top-level import or export statements, we
+	// consider this an ES6 module and only preserve the names of the exported
+	// symbols. Everything else is minified since the names are private.
+	//
+	// Otherwise, we consider this potentially a script-type file instead of an
+	// ES6 module. In that case, preserve the names of all top-level symbols
+	// since they are all potentially exported (e.g. if this is used in a
+	// <script> tag). All symbols in nested scopes are still minified.
+	if !hasImportOrExport {
+		for _, member := range repr.AST.ModuleScope.Members {
+			c.graph.Symbols.Get(member.Ref).Flags |= ast.MustNotBeRenamed
+		}
+	}
+}
+
+type compileResultForSourceMap struct {
+	sourceMapChunk  sourcemap.Chunk
+	generatedOffset sourcemap.LineColumnOffset
+	sourceIndex     uint32
+}
+
+func (c *linkerContext) generateSourceMapForChunk(
+	results []compileResultForSourceMap,
+	chunkAbsDir string,
+	dataForSourceMaps []bundler.DataForSourceMap,
+	canHaveShifts bool,
+) (pieces sourcemap.SourceMapPieces) {
+	j := helpers.Joiner{}
+	j.AddString("{\n  \"version\": 3")
+
+	// Only write out the sources for a given source index once
+	sourceIndexToSourcesIndex := make(map[uint32]int)
+
+	// Generate the "sources" and "sourcesContent" arrays
+	type item struct {
+		path           logger.Path
+		prettyPath     string
+		quotedContents []byte
+	}
+	items := make([]item, 0, len(results))
+	nextSourcesIndex := 0
+	for _, result := range results {
+		if _, ok := sourceIndexToSourcesIndex[result.sourceIndex]; ok {
+			continue
+		}
+		sourceIndexToSourcesIndex[result.sourceIndex] = nextSourcesIndex
+		file := &c.graph.Files[result.sourceIndex]
+
+		// Simple case: no nested source map
+		if file.InputFile.InputSourceMap == nil {
+			var quotedContents []byte
+			if !c.options.ExcludeSourcesContent {
+				quotedContents = dataForSourceMaps[result.sourceIndex].QuotedContents[0]
+			}
+			items = append(items, item{
+				path:           file.InputFile.Source.KeyPath,
+				prettyPath:     file.InputFile.Source.PrettyPath,
+				quotedContents: quotedContents,
+			})
+			nextSourcesIndex++
+			continue
+		}
+
+		// Complex case: nested source map
+		sm := file.InputFile.InputSourceMap
+		for i, source := range sm.Sources {
+			path := logger.Path{
+				Namespace: file.InputFile.Source.KeyPath.Namespace,
+				Text:      source,
+			}
+
+			// If this file is in the "file" namespace, change the relative path in
+			// the source map into an absolute path using the directory of this file
+			if path.Namespace == "file" {
+				path.Text = c.fs.Join(c.fs.Dir(file.InputFile.Source.KeyPath.Text), source)
+			}
+
+			var quotedContents []byte
+			if !c.options.ExcludeSourcesContent {
+				quotedContents = dataForSourceMaps[result.sourceIndex].QuotedContents[i]
+			}
+			items = append(items, item{
+				path:           path,
+				prettyPath:     source,
+				quotedContents: quotedContents,
+			})
+		}
+		nextSourcesIndex += len(sm.Sources)
+	}
+
+	// Write the sources
+	j.AddString(",\n  \"sources\": [")
+	for i, item := range items {
+		if i != 0 {
+			j.AddString(", ")
+		}
+
+		// Modify the absolute path to the original file to be relative to the
+		// directory that will contain the output file for this chunk
+		if item.path.Namespace == "file" {
+			if relPath, ok := c.fs.Rel(chunkAbsDir, item.path.Text); ok {
+				// Make sure to always use forward slashes, even on Windows
+				item.prettyPath = strings.ReplaceAll(relPath, "\\", "/")
+			}
+		}
+
+		j.AddBytes(helpers.QuoteForJSON(item.prettyPath, c.options.ASCIIOnly))
+	}
+	j.AddString("]")
+
+	if c.options.SourceRoot != "" {
+		j.AddString(",\n  \"sourceRoot\": ")
+		j.AddBytes(helpers.QuoteForJSON(c.options.SourceRoot, c.options.ASCIIOnly))
+	}
+
+	// Write the sourcesContent
+	if !c.options.ExcludeSourcesContent {
+		j.AddString(",\n  \"sourcesContent\": [")
+		for i, item := range items {
+			if i != 0 {
+				j.AddString(", ")
+			}
+			j.AddBytes(item.quotedContents)
+		}
+		j.AddString("]")
+	}
+
+	j.AddString(",\n  \"mappings\": \"")
+
+	// Write the mappings
+	mappingsStart := j.Length()
+	prevEndState := sourcemap.SourceMapState{}
+	prevColumnOffset := 0
+	totalQuotedNameLen := 0
+	for _, result := range results {
+		chunk := result.sourceMapChunk
+		offset := result.generatedOffset
+		sourcesIndex := sourceIndexToSourcesIndex[result.sourceIndex]
+
+		// This should have already been checked earlier
+		if chunk.ShouldIgnore {
+			panic("Internal error")
+		}
+
+		// Because each file for the bundle is converted to a source map once,
+		// the source maps are shared between all entry points in the bundle.
+		// The easiest way of getting this to work is to have all source maps
+		// generate as if their source index is 0. We then adjust the source
+		// index per entry point by modifying the first source mapping. This
+		// is done by AppendSourceMapChunk() using the source index passed
+		// here.
+		startState := sourcemap.SourceMapState{
+			SourceIndex:     sourcesIndex,
+			GeneratedLine:   offset.Lines,
+			GeneratedColumn: offset.Columns,
+			OriginalName:    totalQuotedNameLen,
+		}
+		if offset.Lines == 0 {
+			startState.GeneratedColumn += prevColumnOffset
+		}
+
+		// Append the precomputed source map chunk
+		sourcemap.AppendSourceMapChunk(&j, prevEndState, startState, chunk.Buffer)
+
+		// Generate the relative offset to start from next time
+		prevOriginalName := prevEndState.OriginalName
+		prevEndState = chunk.EndState
+		prevEndState.SourceIndex += sourcesIndex
+		if chunk.Buffer.FirstNameOffset.IsValid() {
+			prevEndState.OriginalName += totalQuotedNameLen
+		} else {
+			// It's possible for a chunk to have mappings but for none of those
+			// mappings to have an associated name. The name is optional and is
+			// omitted when the mapping is for a non-name token or if the final
+			// and original names are the same. In that case we need to restore
+			// the previous original name end state since it wasn't modified after
+			// all. If we don't do this, then files after this will adjust their
+			// name offsets assuming that the previous generated mapping has this
+			// file's offset, which is wrong.
+			prevEndState.OriginalName = prevOriginalName
+		}
+		prevColumnOffset = chunk.FinalGeneratedColumn
+		totalQuotedNameLen += len(chunk.QuotedNames)
+
+		// If this was all one line, include the column offset from the start
+		if prevEndState.GeneratedLine == 0 {
+			prevEndState.GeneratedColumn += startState.GeneratedColumn
+			prevColumnOffset += startState.GeneratedColumn
+		}
+	}
+	mappingsEnd := j.Length()
+
+	// Write the names
+	isFirstName := true
+	j.AddString("\",\n  \"names\": [")
+	for _, result := range results {
+		for _, quotedName := range result.sourceMapChunk.QuotedNames {
+			if isFirstName {
+				isFirstName = false
+			} else {
+				j.AddString(", ")
+			}
+			j.AddBytes(quotedName)
+		}
+	}
+	j.AddString("]")
+
+	// Finish the source map
+	j.AddString("\n}\n")
+	bytes := j.Done()
+
+	if !canHaveShifts {
+		// If there cannot be any shifts, then we can avoid doing extra work later
+		// on by preserving the source map as a single memory allocation throughout
+		// the pipeline. That way we won't need to reallocate it.
+		pieces.Prefix = bytes
+	} else {
+		// Otherwise if there can be shifts, then we need to split this into several
+		// slices so that the shifts in the mappings array can be processed. This is
+		// more expensive because everything will need to be recombined into a new
+		// memory allocation at the end.
+		pieces.Prefix = bytes[:mappingsStart]
+		pieces.Mappings = bytes[mappingsStart:mappingsEnd]
+		pieces.Suffix = bytes[mappingsEnd:]
+	}
+	return
+}
+
+// Recover from a panic by logging it as an internal error instead of crashing
+func (c *linkerContext) recoverInternalError(waitGroup *sync.WaitGroup, sourceIndex uint32) {
+	if r := recover(); r != nil {
+		text := fmt.Sprintf("panic: %v", r)
+		if sourceIndex != runtime.SourceIndex {
+			text = fmt.Sprintf("%s (while printing %q)", text, c.graph.Files[sourceIndex].InputFile.Source.PrettyPath)
+		}
+		c.log.AddErrorWithNotes(nil, logger.Range{}, text,
+			[]logger.MsgData{{Text: helpers.PrettyPrintedStack()}})
+		waitGroup.Done()
+	}
+}
+
+func joinWithPublicPath(publicPath string, relPath string) string {
+	if strings.HasPrefix(relPath, "./") {
+		relPath = relPath[2:]
+
+		// Strip any amount of further no-op slashes (i.e. ".///././/x/y" => "x/y")
+		for {
+			if strings.HasPrefix(relPath, "/") {
+				relPath = relPath[1:]
+			} else if strings.HasPrefix(relPath, "./") {
+				relPath = relPath[2:]
+			} else {
+				break
+			}
+		}
+	}
+
+	// Use a relative path if there is no public path
+	if publicPath == "" {
+		publicPath = "."
+	}
+
+	// Join with a slash
+	slash := "/"
+	if strings.HasSuffix(publicPath, "/") {
+		slash = ""
+	}
+	return fmt.Sprintf("%s%s%s", publicPath, slash, relPath)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/logger/logger.go b/source/vendor/github.com/evanw/esbuild/internal/logger/logger.go
new file mode 100644
index 0000000..8acb904
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/logger/logger.go
@@ -0,0 +1,2045 @@
+package logger
+
+// Logging is either done to stderr (via "NewStderrLog") or to an in-memory
+// array (via "NewDeferLog"). In-memory arrays are used to capture messages
+// from parsing individual files because during incremental builds, log
+// messages for a given file can be replayed from memory if the file ends up
+// not being reparsed.
+//
+// Errors are streamed asynchronously as they happen, each error contains the
+// contents of the line with the error, and the error count is limited by
+// default.
+
+import (
+	"encoding/binary"
+	"fmt"
+	"os"
+	"runtime"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+)
+
+const defaultTerminalWidth = 80
+
+type Log struct {
+	AddMsg    func(Msg)
+	HasErrors func() bool
+	Peek      func() []Msg
+
+	Done func() []Msg
+
+	Level     LogLevel
+	Overrides map[MsgID]LogLevel
+}
+
+type LogLevel int8
+
+const (
+	LevelNone LogLevel = iota
+	LevelVerbose
+	LevelDebug
+	LevelInfo
+	LevelWarning
+	LevelError
+	LevelSilent
+)
+
+type MsgKind uint8
+
+const (
+	Error MsgKind = iota
+	Warning
+	Info
+	Note
+	Debug
+	Verbose
+)
+
+func (kind MsgKind) String() string {
+	switch kind {
+	case Error:
+		return "ERROR"
+	case Warning:
+		return "WARNING"
+	case Info:
+		return "INFO"
+	case Note:
+		return "NOTE"
+	case Debug:
+		return "DEBUG"
+	case Verbose:
+		return "VERBOSE"
+	default:
+		panic("Internal error")
+	}
+}
+
+func (kind MsgKind) Icon() string {
+	// Special-case Windows command prompt, which only supports a few characters
+	if isProbablyWindowsCommandPrompt() {
+		switch kind {
+		case Error:
+			return "X"
+		case Warning:
+			return "▲"
+		case Info:
+			return "►"
+		case Note:
+			return "→"
+		case Debug:
+			return "●"
+		case Verbose:
+			return "♦"
+		default:
+			panic("Internal error")
+		}
+	}
+
+	switch kind {
+	case Error:
+		return "✘"
+	case Warning:
+		return "▲"
+	case Info:
+		return "▶"
+	case Note:
+		return "→"
+	case Debug:
+		return "●"
+	case Verbose:
+		return "⬥"
+	default:
+		panic("Internal error")
+	}
+}
+
+var windowsCommandPrompt struct {
+	mutex         sync.Mutex
+	once          bool
+	isProbablyCMD bool
+}
+
+func isProbablyWindowsCommandPrompt() bool {
+	windowsCommandPrompt.mutex.Lock()
+	defer windowsCommandPrompt.mutex.Unlock()
+
+	if !windowsCommandPrompt.once {
+		windowsCommandPrompt.once = true
+
+		// Assume we are running in Windows Command Prompt if we're on Windows. If
+		// so, we can't use emoji because it won't be supported. Except we can
+		// still use emoji if the WT_SESSION environment variable is present
+		// because that means we're running in the new Windows Terminal instead.
+		if runtime.GOOS == "windows" {
+			windowsCommandPrompt.isProbablyCMD = true
+			if _, ok := os.LookupEnv("WT_SESSION"); ok {
+				windowsCommandPrompt.isProbablyCMD = false
+			}
+		}
+	}
+
+	return windowsCommandPrompt.isProbablyCMD
+}
+
+type Msg struct {
+	Notes      []MsgData
+	PluginName string
+	Data       MsgData
+	Kind       MsgKind
+	ID         MsgID
+}
+
+type MsgData struct {
+	// Optional user-specified data that is passed through unmodified
+	UserDetail interface{}
+
+	Location *MsgLocation
+	Text     string
+
+	DisableMaximumWidth bool
+}
+
+type MsgLocation struct {
+	File       string
+	Namespace  string
+	LineText   string
+	Suggestion string
+	Line       int // 1-based
+	Column     int // 0-based, in bytes
+	Length     int // in bytes
+}
+
+type Loc struct {
+	// This is the 0-based index of this location from the start of the file, in bytes
+	Start int32
+}
+
+type Range struct {
+	Loc Loc
+	Len int32
+}
+
+func (r Range) End() int32 {
+	return r.Loc.Start + r.Len
+}
+
+func (a *Range) ExpandBy(b Range) {
+	if a.Len == 0 {
+		*a = b
+	} else {
+		end := a.End()
+		if n := b.End(); n > end {
+			end = n
+		}
+		if b.Loc.Start < a.Loc.Start {
+			a.Loc.Start = b.Loc.Start
+		}
+		a.Len = end - a.Loc.Start
+	}
+}
+
+type Span struct {
+	Text  string
+	Range Range
+}
+
+// This type is just so we can use Go's native sort function
+type SortableMsgs []Msg
+
+func (a SortableMsgs) Len() int          { return len(a) }
+func (a SortableMsgs) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a SortableMsgs) Less(i int, j int) bool {
+	ai := a[i]
+	aj := a[j]
+	aiLoc := ai.Data.Location
+	ajLoc := aj.Data.Location
+	if aiLoc == nil || ajLoc == nil {
+		return aiLoc == nil && ajLoc != nil
+	}
+	if aiLoc.File != ajLoc.File {
+		return aiLoc.File < ajLoc.File
+	}
+	if aiLoc.Line != ajLoc.Line {
+		return aiLoc.Line < ajLoc.Line
+	}
+	if aiLoc.Column != ajLoc.Column {
+		return aiLoc.Column < ajLoc.Column
+	}
+	if ai.Kind != aj.Kind {
+		return ai.Kind < aj.Kind
+	}
+	return ai.Data.Text < aj.Data.Text
+}
+
+// This is used to represent both file system paths (Namespace == "file") and
+// abstract module paths (Namespace != "file"). Abstract module paths represent
+// "virtual modules" when used for an input file and "package paths" when used
+// to represent an external module.
+type Path struct {
+	Text      string
+	Namespace string
+
+	// This feature was added to support ancient CSS libraries that append things
+	// like "?#iefix" and "#icons" to some of their import paths as a hack for IE6.
+	// The intent is for these suffix parts to be ignored but passed through to
+	// the output. This is supported by other bundlers, so we also support this.
+	IgnoredSuffix string
+
+	// Import attributes (the "with" keyword after an import) can affect path
+	// resolution. In other words, two paths in the same file that are otherwise
+	// equal but that have different import attributes may resolve to different
+	// paths.
+	ImportAttributes ImportAttributes
+
+	Flags PathFlags
+}
+
+// We rely on paths as map keys. Go doesn't support custom hash codes and
+// only implements hash codes for certain types. In particular, hash codes
+// are implemented for strings but not for arrays of strings. So we have to
+// pack these import attributes into a string.
+type ImportAttributes struct {
+	packedData string
+}
+
+type ImportAttribute struct {
+	Key   string
+	Value string
+}
+
+// This returns a sorted array instead of a map to make determinism easier
+func (attrs ImportAttributes) DecodeIntoArray() (result []ImportAttribute) {
+	if attrs.packedData == "" {
+		return nil
+	}
+	bytes := []byte(attrs.packedData)
+	for len(bytes) > 0 {
+		kn := 4 + binary.LittleEndian.Uint32(bytes[:4])
+		k := string(bytes[4:kn])
+		bytes = bytes[kn:]
+		vn := 4 + binary.LittleEndian.Uint32(bytes[:4])
+		v := string(bytes[4:vn])
+		bytes = bytes[vn:]
+		result = append(result, ImportAttribute{Key: k, Value: v})
+	}
+	return result
+}
+
+func (attrs ImportAttributes) DecodeIntoMap() (result map[string]string) {
+	if array := attrs.DecodeIntoArray(); len(array) > 0 {
+		result = make(map[string]string, len(array))
+		for _, attr := range array {
+			result[attr.Key] = attr.Value
+		}
+	}
+	return
+}
+
+func EncodeImportAttributes(value map[string]string) ImportAttributes {
+	if len(value) == 0 {
+		return ImportAttributes{}
+	}
+	keys := make([]string, 0, len(value))
+	for k := range value {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	var sb strings.Builder
+	var n [4]byte
+	for _, k := range keys {
+		v := value[k]
+		binary.LittleEndian.PutUint32(n[:], uint32(len(k)))
+		sb.Write(n[:])
+		sb.WriteString(k)
+		binary.LittleEndian.PutUint32(n[:], uint32(len(v)))
+		sb.Write(n[:])
+		sb.WriteString(v)
+	}
+	return ImportAttributes{packedData: sb.String()}
+}
+
+type PathFlags uint8
+
+const (
+	// This corresponds to a value of "false' in the "browser" package.json field
+	PathDisabled PathFlags = 1 << iota
+)
+
+func (p Path) IsDisabled() bool {
+	return (p.Flags & PathDisabled) != 0
+}
+
+var noColorResult bool
+var noColorOnce sync.Once
+
+func hasNoColorEnvironmentVariable() bool {
+	noColorOnce.Do(func() {
+		// Read "NO_COLOR" from the environment. This is a convention that some
+		// software follows. See https://no-color.org/ for more information.
+		if _, ok := os.LookupEnv("NO_COLOR"); ok {
+			noColorResult = true
+		}
+	})
+	return noColorResult
+}
+
+// This has a custom implementation instead of using "filepath.Dir/Base/Ext"
+// because it should work the same on Unix and Windows. These names end up in
+// the generated output and the generated output should not depend on the OS.
+func PlatformIndependentPathDirBaseExt(path string) (dir string, base string, ext string) {
+	absRootSlash := -1
+
+	// Make sure we don't strip off the slash for the root of the file system
+	if len(path) > 0 && (path[0] == '/' || path[0] == '\\') {
+		absRootSlash = 0 // Unix
+	} else if len(path) > 2 && path[1] == ':' && (path[2] == '/' || path[2] == '\\') {
+		if c := path[0]; (c >= 'a' && c < 'z') || (c >= 'A' && c <= 'Z') {
+			absRootSlash = 2 // Windows
+		}
+	}
+
+	for {
+		i := strings.LastIndexAny(path, "/\\")
+
+		// Stop if there are no more slashes
+		if i < 0 {
+			base = path
+			break
+		}
+
+		// Stop if we found a non-trailing slash
+		if i == absRootSlash {
+			dir, base = path[:i+1], path[i+1:]
+			break
+		}
+		if i+1 != len(path) {
+			dir, base = path[:i], path[i+1:]
+			break
+		}
+
+		// Ignore trailing slashes
+		path = path[:i]
+	}
+
+	// Strip off the extension
+	if dot := strings.LastIndexByte(base, '.'); dot >= 0 {
+		ext = base[dot:]
+
+		// We default to the "local-css" loader for ".module.css" files. Make sure
+		// the string names generated by this don't all have "_module_" in them.
+		if ext == ".css" {
+			if dot2 := strings.LastIndexByte(base[:dot], '.'); dot2 >= 0 && base[dot2:] == ".module.css" {
+				dot = dot2
+				ext = base[dot:]
+			}
+		}
+
+		base = base[:dot]
+	}
+	return
+}
+
+type Source struct {
+	// This is used for error messages and the metadata JSON file.
+	//
+	// This is a mostly platform-independent path. It's relative to the current
+	// working directory and always uses standard path separators. Use this for
+	// referencing a file in all output data. These paths still use the original
+	// case of the path so they may still work differently on file systems that
+	// are case-insensitive vs. case-sensitive.
+	PrettyPath string
+
+	// An identifier that is mixed in to automatically-generated symbol names to
+	// improve readability. For example, if the identifier is "util" then the
+	// symbol for an "export default" statement will be called "util_default".
+	IdentifierName string
+
+	Contents string
+
+	// This is used as a unique key to identify this source file. It should never
+	// be shown to the user (e.g. never print this to the terminal).
+	//
+	// If it's marked as an absolute path, it's a platform-dependent path that
+	// includes environment-specific things such as Windows backslash path
+	// separators and potentially the user's home directory. Only use this for
+	// passing to syscalls for reading and writing to the file system. Do not
+	// include this in any output data.
+	//
+	// If it's marked as not an absolute path, it's an opaque string that is used
+	// to refer to an automatically-generated module.
+	KeyPath Path
+
+	Index uint32
+}
+
+func (s *Source) TextForRange(r Range) string {
+	return s.Contents[r.Loc.Start : r.Loc.Start+r.Len]
+}
+
+func (s *Source) LocBeforeWhitespace(loc Loc) Loc {
+	for loc.Start > 0 {
+		c, width := utf8.DecodeLastRuneInString(s.Contents[:loc.Start])
+		if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
+			break
+		}
+		loc.Start -= int32(width)
+	}
+	return loc
+}
+
+func (s *Source) RangeOfOperatorBefore(loc Loc, op string) Range {
+	text := s.Contents[:loc.Start]
+	index := strings.LastIndex(text, op)
+	if index >= 0 {
+		return Range{Loc: Loc{Start: int32(index)}, Len: int32(len(op))}
+	}
+	return Range{Loc: loc}
+}
+
+func (s *Source) RangeOfOperatorAfter(loc Loc, op string) Range {
+	text := s.Contents[loc.Start:]
+	index := strings.Index(text, op)
+	if index >= 0 {
+		return Range{Loc: Loc{Start: loc.Start + int32(index)}, Len: int32(len(op))}
+	}
+	return Range{Loc: loc}
+}
+
+func (s *Source) RangeOfString(loc Loc) Range {
+	text := s.Contents[loc.Start:]
+	if len(text) == 0 {
+		return Range{Loc: loc, Len: 0}
+	}
+
+	quote := text[0]
+	if quote == '"' || quote == '\'' {
+		// Search for the matching quote character
+		for i := 1; i < len(text); i++ {
+			c := text[i]
+			if c == quote {
+				return Range{Loc: loc, Len: int32(i + 1)}
+			} else if c == '\\' {
+				i += 1
+			}
+		}
+	}
+
+	if quote == '`' {
+		// Search for the matching quote character
+		for i := 1; i < len(text); i++ {
+			c := text[i]
+			if c == quote {
+				return Range{Loc: loc, Len: int32(i + 1)}
+			} else if c == '\\' {
+				i += 1
+			} else if c == '$' && i+1 < len(text) && text[i+1] == '{' {
+				break // Only return the range for no-substitution template literals
+			}
+		}
+	}
+
+	return Range{Loc: loc, Len: 0}
+}
+
+func (s *Source) RangeOfNumber(loc Loc) (r Range) {
+	text := s.Contents[loc.Start:]
+	r = Range{Loc: loc, Len: 0}
+
+	if len(text) > 0 {
+		if c := text[0]; c >= '0' && c <= '9' {
+			r.Len = 1
+			for int(r.Len) < len(text) {
+				c := text[r.Len]
+				if (c < '0' || c > '9') && (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && c != '.' && c != '_' {
+					break
+				}
+				r.Len++
+			}
+		}
+	}
+	return
+}
+
+func (s *Source) RangeOfLegacyOctalEscape(loc Loc) (r Range) {
+	text := s.Contents[loc.Start:]
+	r = Range{Loc: loc, Len: 0}
+
+	if len(text) >= 2 && text[0] == '\\' {
+		r.Len = 2
+		for r.Len < 4 && int(r.Len) < len(text) {
+			c := text[r.Len]
+			if c < '0' || c > '9' {
+				break
+			}
+			r.Len++
+		}
+	}
+	return
+}
+
+func (s *Source) CommentTextWithoutIndent(r Range) string {
+	text := s.Contents[r.Loc.Start:r.End()]
+	if len(text) < 2 || !strings.HasPrefix(text, "/*") {
+		return text
+	}
+	prefix := s.Contents[:r.Loc.Start]
+
+	// Figure out the initial indent
+	indent := 0
+seekBackwardToNewline:
+	for len(prefix) > 0 {
+		c, size := utf8.DecodeLastRuneInString(prefix)
+		switch c {
+		case '\r', '\n', '\u2028', '\u2029':
+			break seekBackwardToNewline
+		}
+		prefix = prefix[:len(prefix)-size]
+		indent++
+	}
+
+	// Split the comment into lines
+	var lines []string
+	start := 0
+	for i, c := range text {
+		switch c {
+		case '\r', '\n':
+			// Don't double-append for Windows style "\r\n" newlines
+			if start <= i {
+				lines = append(lines, text[start:i])
+			}
+
+			start = i + 1
+
+			// Ignore the second part of Windows style "\r\n" newlines
+			if c == '\r' && start < len(text) && text[start] == '\n' {
+				start++
+			}
+
+		case '\u2028', '\u2029':
+			lines = append(lines, text[start:i])
+			start = i + 3
+		}
+	}
+	lines = append(lines, text[start:])
+
+	// Find the minimum indent over all lines after the first line
+	for _, line := range lines[1:] {
+		lineIndent := 0
+		for _, c := range line {
+			if c != ' ' && c != '\t' {
+				break
+			}
+			lineIndent++
+		}
+		if indent > lineIndent {
+			indent = lineIndent
+		}
+	}
+
+	// Trim the indent off of all lines after the first line
+	for i, line := range lines {
+		if i > 0 {
+			lines[i] = line[indent:]
+		}
+	}
+	return strings.Join(lines, "\n")
+}
+
+func plural(prefix string, count int, shown int, someAreMissing bool) string {
+	var text string
+	if count == 1 {
+		text = fmt.Sprintf("%d %s", count, prefix)
+	} else {
+		text = fmt.Sprintf("%d %ss", count, prefix)
+	}
+	if shown < count {
+		text = fmt.Sprintf("%d of %s", shown, text)
+	} else if someAreMissing && count > 1 {
+		text = "all " + text
+	}
+	return text
+}
+
+func errorAndWarningSummary(errors int, warnings int, shownErrors int, shownWarnings int) string {
+	someAreMissing := shownWarnings < warnings || shownErrors < errors
+	switch {
+	case errors == 0:
+		return plural("warning", warnings, shownWarnings, someAreMissing)
+	case warnings == 0:
+		return plural("error", errors, shownErrors, someAreMissing)
+	default:
+		return fmt.Sprintf("%s and %s",
+			plural("warning", warnings, shownWarnings, someAreMissing),
+			plural("error", errors, shownErrors, someAreMissing))
+	}
+}
+
+type APIKind uint8
+
+const (
+	GoAPI APIKind = iota
+	CLIAPI
+	JSAPI
+)
+
+// This can be used to customize error messages for the current API kind
+var API APIKind
+
+type TerminalInfo struct {
+	IsTTY           bool
+	UseColorEscapes bool
+	Width           int
+	Height          int
+}
+
+func NewStderrLog(options OutputOptions) Log {
+	var mutex sync.Mutex
+	var msgs SortableMsgs
+	terminalInfo := GetTerminalInfo(os.Stderr)
+	errors := 0
+	warnings := 0
+	shownErrors := 0
+	shownWarnings := 0
+	hasErrors := false
+	remainingMessagesBeforeLimit := options.MessageLimit
+	if remainingMessagesBeforeLimit == 0 {
+		remainingMessagesBeforeLimit = 0x7FFFFFFF
+	}
+	var deferredWarnings []Msg
+
+	finalizeLog := func() {
+		// Print the deferred warning now if there was no error after all
+		for remainingMessagesBeforeLimit > 0 && len(deferredWarnings) > 0 {
+			shownWarnings++
+			writeStringWithColor(os.Stderr, deferredWarnings[0].String(options, terminalInfo))
+			deferredWarnings = deferredWarnings[1:]
+			remainingMessagesBeforeLimit--
+		}
+
+		// Print out a summary
+		if options.MessageLimit > 0 && errors+warnings > options.MessageLimit {
+			writeStringWithColor(os.Stderr, fmt.Sprintf("%s shown (disable the message limit with --log-limit=0)\n",
+				errorAndWarningSummary(errors, warnings, shownErrors, shownWarnings)))
+		} else if options.LogLevel <= LevelInfo && (warnings != 0 || errors != 0) {
+			writeStringWithColor(os.Stderr, fmt.Sprintf("%s\n",
+				errorAndWarningSummary(errors, warnings, shownErrors, shownWarnings)))
+		}
+	}
+
+	switch options.Color {
+	case ColorNever:
+		terminalInfo.UseColorEscapes = false
+	case ColorAlways:
+		terminalInfo.UseColorEscapes = SupportsColorEscapes
+	}
+
+	return Log{
+		Level:     options.LogLevel,
+		Overrides: options.Overrides,
+
+		AddMsg: func(msg Msg) {
+			mutex.Lock()
+			defer mutex.Unlock()
+			msgs = append(msgs, msg)
+
+			switch msg.Kind {
+			case Verbose:
+				if options.LogLevel <= LevelVerbose {
+					writeStringWithColor(os.Stderr, msg.String(options, terminalInfo))
+				}
+
+			case Debug:
+				if options.LogLevel <= LevelDebug {
+					writeStringWithColor(os.Stderr, msg.String(options, terminalInfo))
+				}
+
+			case Info:
+				if options.LogLevel <= LevelInfo {
+					writeStringWithColor(os.Stderr, msg.String(options, terminalInfo))
+				}
+
+			case Error:
+				hasErrors = true
+				if options.LogLevel <= LevelError {
+					errors++
+				}
+
+			case Warning:
+				if options.LogLevel <= LevelWarning {
+					warnings++
+				}
+			}
+
+			// Be silent if we're past the limit so we don't flood the terminal
+			if remainingMessagesBeforeLimit == 0 {
+				return
+			}
+
+			switch msg.Kind {
+			case Error:
+				if options.LogLevel <= LevelError {
+					shownErrors++
+					writeStringWithColor(os.Stderr, msg.String(options, terminalInfo))
+					remainingMessagesBeforeLimit--
+				}
+
+			case Warning:
+				if options.LogLevel <= LevelWarning {
+					if remainingMessagesBeforeLimit > (options.MessageLimit+1)/2 {
+						shownWarnings++
+						writeStringWithColor(os.Stderr, msg.String(options, terminalInfo))
+						remainingMessagesBeforeLimit--
+					} else {
+						// If we have less than half of the slots left, wait for potential
+						// future errors instead of using up all of the slots with warnings.
+						// We want the log for a failed build to always have at least one
+						// error in it.
+						deferredWarnings = append(deferredWarnings, msg)
+					}
+				}
+			}
+		},
+
+		HasErrors: func() bool {
+			mutex.Lock()
+			defer mutex.Unlock()
+			return hasErrors
+		},
+
+		Peek: func() []Msg {
+			mutex.Lock()
+			defer mutex.Unlock()
+			sort.Stable(msgs)
+			return append([]Msg{}, msgs...)
+		},
+
+		Done: func() []Msg {
+			mutex.Lock()
+			defer mutex.Unlock()
+			finalizeLog()
+			sort.Stable(msgs)
+			return msgs
+		},
+	}
+}
+
+func PrintErrorToStderr(osArgs []string, text string) {
+	PrintMessageToStderr(osArgs, Msg{Kind: Error, Data: MsgData{Text: text}})
+}
+
+func PrintErrorWithNoteToStderr(osArgs []string, text string, note string) {
+	msg := Msg{
+		Kind: Error,
+		Data: MsgData{Text: text},
+	}
+	if note != "" {
+		msg.Notes = []MsgData{{Text: note}}
+	}
+	PrintMessageToStderr(osArgs, msg)
+}
+
+func OutputOptionsForArgs(osArgs []string) OutputOptions {
+	options := OutputOptions{IncludeSource: true}
+
+	// Implement a mini argument parser so these options always work even if we
+	// haven't yet gotten to the general-purpose argument parsing code
+	for _, arg := range osArgs {
+		switch arg {
+		case "--color=false":
+			options.Color = ColorNever
+		case "--color=true", "--color":
+			options.Color = ColorAlways
+		case "--log-level=info":
+			options.LogLevel = LevelInfo
+		case "--log-level=warning":
+			options.LogLevel = LevelWarning
+		case "--log-level=error":
+			options.LogLevel = LevelError
+		case "--log-level=silent":
+			options.LogLevel = LevelSilent
+		}
+	}
+
+	return options
+}
+
+func PrintMessageToStderr(osArgs []string, msg Msg) {
+	log := NewStderrLog(OutputOptionsForArgs(osArgs))
+	log.AddMsg(msg)
+	log.Done()
+}
+
+type Colors struct {
+	Reset     string
+	Bold      string
+	Dim       string
+	Underline string
+
+	Red   string
+	Green string
+	Blue  string
+
+	Cyan    string
+	Magenta string
+	Yellow  string
+
+	RedBgRed     string
+	RedBgWhite   string
+	GreenBgGreen string
+	GreenBgWhite string
+	BlueBgBlue   string
+	BlueBgWhite  string
+
+	CyanBgCyan       string
+	CyanBgBlack      string
+	MagentaBgMagenta string
+	MagentaBgBlack   string
+	YellowBgYellow   string
+	YellowBgBlack    string
+}
+
+var TerminalColors = Colors{
+	Reset:     "\033[0m",
+	Bold:      "\033[1m",
+	Dim:       "\033[37m",
+	Underline: "\033[4m",
+
+	Red:   "\033[31m",
+	Green: "\033[32m",
+	Blue:  "\033[34m",
+
+	Cyan:    "\033[36m",
+	Magenta: "\033[35m",
+	Yellow:  "\033[33m",
+
+	RedBgRed:     "\033[41;31m",
+	RedBgWhite:   "\033[41;97m",
+	GreenBgGreen: "\033[42;32m",
+	GreenBgWhite: "\033[42;97m",
+	BlueBgBlue:   "\033[44;34m",
+	BlueBgWhite:  "\033[44;97m",
+
+	CyanBgCyan:       "\033[46;36m",
+	CyanBgBlack:      "\033[46;30m",
+	MagentaBgMagenta: "\033[45;35m",
+	MagentaBgBlack:   "\033[45;30m",
+	YellowBgYellow:   "\033[43;33m",
+	YellowBgBlack:    "\033[43;30m",
+}
+
+func PrintText(file *os.File, level LogLevel, osArgs []string, callback func(Colors) string) {
+	options := OutputOptionsForArgs(osArgs)
+
+	// Skip logging these if these logs are disabled
+	if options.LogLevel > level {
+		return
+	}
+
+	PrintTextWithColor(file, options.Color, callback)
+}
+
+func PrintTextWithColor(file *os.File, useColor UseColor, callback func(Colors) string) {
+	var useColorEscapes bool
+	switch useColor {
+	case ColorNever:
+		useColorEscapes = false
+	case ColorAlways:
+		useColorEscapes = SupportsColorEscapes
+	case ColorIfTerminal:
+		useColorEscapes = GetTerminalInfo(file).UseColorEscapes
+	}
+
+	var colors Colors
+	if useColorEscapes {
+		colors = TerminalColors
+	}
+	writeStringWithColor(file, callback(colors))
+}
+
+type SummaryTableEntry struct {
+	Dir         string
+	Base        string
+	Size        string
+	Bytes       int
+	IsSourceMap bool
+}
+
+// This type is just so we can use Go's native sort function
+type SummaryTable []SummaryTableEntry
+
+func (t SummaryTable) Len() int          { return len(t) }
+func (t SummaryTable) Swap(i int, j int) { t[i], t[j] = t[j], t[i] }
+
+func (t SummaryTable) Less(i int, j int) bool {
+	ti := t[i]
+	tj := t[j]
+
+	// Sort source maps last
+	if !ti.IsSourceMap && tj.IsSourceMap {
+		return true
+	}
+	if ti.IsSourceMap && !tj.IsSourceMap {
+		return false
+	}
+
+	// Sort by size first
+	if ti.Bytes > tj.Bytes {
+		return true
+	}
+	if ti.Bytes < tj.Bytes {
+		return false
+	}
+
+	// Sort alphabetically by directory first
+	if ti.Dir < tj.Dir {
+		return true
+	}
+	if ti.Dir > tj.Dir {
+		return false
+	}
+
+	// Then sort alphabetically by file name
+	return ti.Base < tj.Base
+}
+
+// Show a warning icon next to output files that are 1mb or larger
+const sizeWarningThreshold = 1024 * 1024
+
+func PrintSummary(useColor UseColor, table SummaryTable, start *time.Time) {
+	PrintTextWithColor(os.Stderr, useColor, func(colors Colors) string {
+		isProbablyWindowsCommandPrompt := isProbablyWindowsCommandPrompt()
+		sb := strings.Builder{}
+
+		if len(table) > 0 {
+			info := GetTerminalInfo(os.Stderr)
+
+			// Truncate the table in case it's really long
+			maxLength := info.Height / 2
+			if info.Height == 0 {
+				maxLength = 20
+			} else if maxLength < 5 {
+				maxLength = 5
+			}
+			length := len(table)
+			sort.Sort(table)
+			if length > maxLength {
+				table = table[:maxLength]
+			}
+
+			// Compute the maximum width of the size column
+			spacingBetweenColumns := 2
+			hasSizeWarning := false
+			maxPath := 0
+			maxSize := 0
+			for _, entry := range table {
+				path := len(entry.Dir) + len(entry.Base)
+				size := len(entry.Size) + spacingBetweenColumns
+				if path > maxPath {
+					maxPath = path
+				}
+				if size > maxSize {
+					maxSize = size
+				}
+				if !entry.IsSourceMap && entry.Bytes >= sizeWarningThreshold {
+					hasSizeWarning = true
+				}
+			}
+
+			margin := "  "
+			layoutWidth := info.Width
+			if layoutWidth < 1 {
+				layoutWidth = defaultTerminalWidth
+			}
+			layoutWidth -= 2 * len(margin)
+			if hasSizeWarning {
+				// Add space for the warning icon
+				layoutWidth -= 2
+			}
+			if layoutWidth > maxPath+maxSize {
+				layoutWidth = maxPath + maxSize
+			}
+			sb.WriteByte('\n')
+
+			for _, entry := range table {
+				dir, base := entry.Dir, entry.Base
+				pathWidth := layoutWidth - maxSize
+
+				// Truncate the path with "..." to fit on one line
+				if len(dir)+len(base) > pathWidth {
+					// Trim the directory from the front, leaving the trailing slash
+					if len(dir) > 0 {
+						n := pathWidth - len(base) - 3
+						if n < 1 {
+							n = 1
+						}
+						dir = "..." + dir[len(dir)-n:]
+					}
+
+					// Trim the file name from the back
+					if len(dir)+len(base) > pathWidth {
+						n := pathWidth - len(dir) - 3
+						if n < 0 {
+							n = 0
+						}
+						base = base[:n] + "..."
+					}
+				}
+
+				spacer := layoutWidth - len(entry.Size) - len(dir) - len(base)
+				if spacer < 0 {
+					spacer = 0
+				}
+
+				// Put a warning next to the size if it's above a certain threshold
+				sizeColor := colors.Cyan
+				sizeWarning := ""
+				if !entry.IsSourceMap && entry.Bytes >= sizeWarningThreshold {
+					sizeColor = colors.Yellow
+
+					// Emoji don't work in Windows Command Prompt
+					if !isProbablyWindowsCommandPrompt {
+						sizeWarning = " ⚠️"
+					}
+				}
+
+				sb.WriteString(fmt.Sprintf("%s%s%s%s%s%s%s%s%s%s%s%s\n",
+					margin,
+					colors.Dim,
+					dir,
+					colors.Reset,
+					colors.Bold,
+					base,
+					colors.Reset,
+					strings.Repeat(" ", spacer),
+					sizeColor,
+					entry.Size,
+					sizeWarning,
+					colors.Reset,
+				))
+			}
+
+			// Say how many remaining files are not shown
+			if length > maxLength {
+				plural := "s"
+				if length == maxLength+1 {
+					plural = ""
+				}
+				sb.WriteString(fmt.Sprintf("%s%s...and %d more output file%s...%s\n", margin, colors.Dim, length-maxLength, plural, colors.Reset))
+			}
+		}
+		sb.WriteByte('\n')
+
+		lightningSymbol := "⚡ "
+
+		// Emoji don't work in Windows Command Prompt
+		if isProbablyWindowsCommandPrompt {
+			lightningSymbol = ""
+		}
+
+		// Printing the time taken is optional
+		if start != nil {
+			sb.WriteString(fmt.Sprintf("%s%sDone in %dms%s\n",
+				lightningSymbol,
+				colors.Green,
+				time.Since(*start).Milliseconds(),
+				colors.Reset,
+			))
+		}
+
+		return sb.String()
+	})
+}
+
+type DeferLogKind uint8
+
+const (
+	DeferLogAll DeferLogKind = iota
+	DeferLogNoVerboseOrDebug
+)
+
+func NewDeferLog(kind DeferLogKind, overrides map[MsgID]LogLevel) Log {
+	var msgs SortableMsgs
+	var mutex sync.Mutex
+	var hasErrors bool
+
+	return Log{
+		Level:     LevelInfo,
+		Overrides: overrides,
+
+		AddMsg: func(msg Msg) {
+			if kind == DeferLogNoVerboseOrDebug && (msg.Kind == Verbose || msg.Kind == Debug) {
+				return
+			}
+			mutex.Lock()
+			defer mutex.Unlock()
+			if msg.Kind == Error {
+				hasErrors = true
+			}
+			msgs = append(msgs, msg)
+		},
+
+		HasErrors: func() bool {
+			mutex.Lock()
+			defer mutex.Unlock()
+			return hasErrors
+		},
+
+		Peek: func() []Msg {
+			mutex.Lock()
+			defer mutex.Unlock()
+			return append([]Msg{}, msgs...)
+		},
+
+		Done: func() []Msg {
+			mutex.Lock()
+			defer mutex.Unlock()
+			sort.Stable(msgs)
+			return msgs
+		},
+	}
+}
+
+type UseColor uint8
+
+const (
+	ColorIfTerminal UseColor = iota
+	ColorNever
+	ColorAlways
+)
+
+type OutputOptions struct {
+	MessageLimit  int
+	IncludeSource bool
+	Color         UseColor
+	LogLevel      LogLevel
+	Overrides     map[MsgID]LogLevel
+}
+
+func (msg Msg) String(options OutputOptions, terminalInfo TerminalInfo) string {
+	// Format the message
+	text := msgString(options.IncludeSource, terminalInfo, msg.ID, msg.Kind, msg.Data, msg.PluginName)
+
+	// Format the notes
+	var oldData MsgData
+	for i, note := range msg.Notes {
+		if options.IncludeSource && (i == 0 || strings.IndexByte(oldData.Text, '\n') >= 0 || oldData.Location != nil) {
+			text += "\n"
+		}
+		text += msgString(options.IncludeSource, terminalInfo, MsgID_None, Note, note, "")
+		oldData = note
+	}
+
+	// Add extra spacing between messages if source code is present
+	if options.IncludeSource {
+		text += "\n"
+	}
+	return text
+}
+
+// The number of margin characters in addition to the line number
+const extraMarginChars = 9
+
+func marginWithLineText(maxMargin int, line int) string {
+	number := fmt.Sprintf("%d", line)
+	return fmt.Sprintf("      %s%s │ ", strings.Repeat(" ", maxMargin-len(number)), number)
+}
+
+func emptyMarginText(maxMargin int, isLast bool) string {
+	space := strings.Repeat(" ", maxMargin)
+	if isLast {
+		return fmt.Sprintf("      %s ╵ ", space)
+	}
+	return fmt.Sprintf("      %s │ ", space)
+}
+
+func msgString(includeSource bool, terminalInfo TerminalInfo, id MsgID, kind MsgKind, data MsgData, pluginName string) string {
+	if !includeSource {
+		if loc := data.Location; loc != nil {
+			return fmt.Sprintf("%s: %s: %s\n", loc.File, kind.String(), data.Text)
+		}
+		return fmt.Sprintf("%s: %s\n", kind.String(), data.Text)
+	}
+
+	var colors Colors
+	if terminalInfo.UseColorEscapes {
+		colors = TerminalColors
+	}
+
+	var iconColor string
+	var kindColorBrackets string
+	var kindColorText string
+
+	location := ""
+
+	if data.Location != nil {
+		maxMargin := len(fmt.Sprintf("%d", data.Location.Line))
+		d := detailStruct(data, terminalInfo, maxMargin)
+
+		if d.Suggestion != "" {
+			location = fmt.Sprintf("\n    %s:%d:%d:\n%s%s%s%s%s%s\n%s%s%s%s%s\n%s%s%s%s%s\n%s",
+				d.Path, d.Line, d.Column,
+				colors.Dim, d.SourceBefore, colors.Green, d.SourceMarked, colors.Dim, d.SourceAfter,
+				emptyMarginText(maxMargin, false), d.Indent, colors.Green, d.Marker, colors.Dim,
+				emptyMarginText(maxMargin, true), d.Indent, colors.Green, d.Suggestion, colors.Reset,
+				d.ContentAfter,
+			)
+		} else {
+			location = fmt.Sprintf("\n    %s:%d:%d:\n%s%s%s%s%s%s\n%s%s%s%s%s\n%s",
+				d.Path, d.Line, d.Column,
+				colors.Dim, d.SourceBefore, colors.Green, d.SourceMarked, colors.Dim, d.SourceAfter,
+				emptyMarginText(maxMargin, true), d.Indent, colors.Green, d.Marker, colors.Reset,
+				d.ContentAfter,
+			)
+		}
+	}
+
+	switch kind {
+	case Verbose:
+		iconColor = colors.Cyan
+		kindColorBrackets = colors.CyanBgCyan
+		kindColorText = colors.CyanBgBlack
+
+	case Debug:
+		iconColor = colors.Green
+		kindColorBrackets = colors.GreenBgGreen
+		kindColorText = colors.GreenBgWhite
+
+	case Info:
+		iconColor = colors.Blue
+		kindColorBrackets = colors.BlueBgBlue
+		kindColorText = colors.BlueBgWhite
+
+	case Error:
+		iconColor = colors.Red
+		kindColorBrackets = colors.RedBgRed
+		kindColorText = colors.RedBgWhite
+
+	case Warning:
+		iconColor = colors.Yellow
+		kindColorBrackets = colors.YellowBgYellow
+		kindColorText = colors.YellowBgBlack
+
+	case Note:
+		sb := strings.Builder{}
+
+		for _, line := range strings.Split(data.Text, "\n") {
+			// Special-case word wrapping
+			if wrapWidth := terminalInfo.Width; wrapWidth > 2 {
+				if !data.DisableMaximumWidth && wrapWidth > 100 {
+					wrapWidth = 100 // Enforce a maximum paragraph width for readability
+				}
+				for _, run := range wrapWordsInString(line, wrapWidth-2) {
+					sb.WriteString("  ")
+					sb.WriteString(linkifyText(run, colors.Underline, colors.Reset))
+					sb.WriteByte('\n')
+				}
+				continue
+			}
+
+			// Otherwise, just write an indented line
+			sb.WriteString("  ")
+			sb.WriteString(linkifyText(line, colors.Underline, colors.Reset))
+			sb.WriteByte('\n')
+		}
+
+		sb.WriteString(location)
+		return sb.String()
+	}
+
+	if pluginName != "" {
+		pluginName = fmt.Sprintf(" %s%s[plugin %s]%s", colors.Bold, colors.Magenta, pluginName, colors.Reset)
+	}
+
+	msgID := MsgIDToString(id)
+	if msgID != "" {
+		msgID = fmt.Sprintf(" [%s]", msgID)
+	}
+
+	return fmt.Sprintf("%s%s %s[%s%s%s]%s %s%s%s%s%s\n%s",
+		iconColor, kind.Icon(),
+		kindColorBrackets, kindColorText, kind.String(), kindColorBrackets, colors.Reset,
+		colors.Bold, data.Text, colors.Reset, pluginName, msgID,
+		location,
+	)
+}
+
+func linkifyText(text string, underline string, reset string) string {
+	if underline == "" {
+		return text
+	}
+
+	https := strings.Index(text, "https://")
+	if https == -1 {
+		return text
+	}
+
+	sb := strings.Builder{}
+	for {
+		https := strings.Index(text, "https://")
+		if https == -1 {
+			break
+		}
+
+		end := strings.IndexByte(text[https:], ' ')
+		if end == -1 {
+			end = len(text)
+		} else {
+			end += https
+		}
+
+		// Remove trailing punctuation
+		if end > https {
+			switch text[end-1] {
+			case '.', ',', '?', '!', ')', ']', '}':
+				end--
+			}
+		}
+
+		sb.WriteString(text[:https])
+		sb.WriteString(underline)
+		sb.WriteString(text[https:end])
+		sb.WriteString(reset)
+		text = text[end:]
+	}
+
+	sb.WriteString(text)
+	return sb.String()
+}
+
+func wrapWordsInString(text string, width int) []string {
+	runs := []string{}
+
+outer:
+	for text != "" {
+		i := 0
+		x := 0
+		wordEndI := 0
+
+		// Skip over any leading spaces
+		for i < len(text) && text[i] == ' ' {
+			i++
+			x++
+		}
+
+		// Find out how many words will fit in this run
+		for i < len(text) {
+			oldWordEndI := wordEndI
+			wordStartI := i
+
+			// Find the end of the word
+			for i < len(text) {
+				c, width := utf8.DecodeRuneInString(text[i:])
+				if c == ' ' {
+					break
+				}
+				i += width
+				x += 1 // Naively assume that each unicode code point is a single column
+			}
+			wordEndI = i
+
+			// Split into a new run if this isn't the first word in the run and the end is past the width
+			if wordStartI > 0 && x > width {
+				runs = append(runs, text[:oldWordEndI])
+				text = text[wordStartI:]
+				continue outer
+			}
+
+			// Skip over any spaces after the word
+			for i < len(text) && text[i] == ' ' {
+				i++
+				x++
+			}
+		}
+
+		// If we get here, this is the last run (i.e. everything fits)
+		break
+	}
+
+	// Remove any trailing spaces on the last run
+	for len(text) > 0 && text[len(text)-1] == ' ' {
+		text = text[:len(text)-1]
+	}
+	runs = append(runs, text)
+	return runs
+}
+
+type MsgDetail struct {
+	SourceBefore string
+	SourceMarked string
+	SourceAfter  string
+
+	Indent     string
+	Marker     string
+	Suggestion string
+
+	ContentAfter string
+
+	Path   string
+	Line   int
+	Column int
+}
+
+// It's not common for large files to have many warnings. But when it happens,
+// we want to make sure that it's not too slow. Source code locations are
+// represented as byte offsets for compactness but transforming these to
+// line/column locations for warning messages requires scanning through the
+// file. A naive approach for this would cause O(n^2) scanning time for n
+// warnings distributed throughout the file.
+//
+// Warnings are typically generated sequentially as the file is scanned. So
+// one way of optimizing this is to just start scanning from where we left
+// off last time instead of always starting from the beginning of the file.
+// That's what this object does.
+//
+// Another option could be to eagerly populate an array of line/column offsets
+// and then use binary search for each query. This might slow down the common
+// case of a file with only at most a few warnings though, so think before
+// optimizing too much. Performance in the zero or one warning case is by far
+// the most important.
+type LineColumnTracker struct {
+	contents     string
+	prettyPath   string
+	offset       int32
+	line         int32
+	lineStart    int32
+	lineEnd      int32
+	hasLineStart bool
+	hasLineEnd   bool
+	hasSource    bool
+}
+
+func MakeLineColumnTracker(source *Source) LineColumnTracker {
+	if source == nil {
+		return LineColumnTracker{
+			hasSource: false,
+		}
+	}
+
+	return LineColumnTracker{
+		contents:     source.Contents,
+		prettyPath:   source.PrettyPath,
+		hasLineStart: true,
+		hasSource:    true,
+	}
+}
+
+func (tracker *LineColumnTracker) MsgData(r Range, text string) MsgData {
+	return MsgData{
+		Text:     text,
+		Location: tracker.MsgLocationOrNil(r),
+	}
+}
+
+func (t *LineColumnTracker) scanTo(offset int32) {
+	contents := t.contents
+	i := t.offset
+
+	// Scan forward
+	if i < offset {
+		for {
+			r, size := utf8.DecodeRuneInString(contents[i:])
+			i += int32(size)
+
+			switch r {
+			case '\n':
+				t.hasLineStart = true
+				t.hasLineEnd = false
+				t.lineStart = i
+				if i == int32(size) || contents[i-int32(size)-1] != '\r' {
+					t.line++
+				}
+
+			case '\r', '\u2028', '\u2029':
+				t.hasLineStart = true
+				t.hasLineEnd = false
+				t.lineStart = i
+				t.line++
+			}
+
+			if i >= offset {
+				t.offset = i
+				return
+			}
+		}
+	}
+
+	// Scan backward
+	if i > offset {
+		for {
+			r, size := utf8.DecodeLastRuneInString(contents[:i])
+			i -= int32(size)
+
+			switch r {
+			case '\n':
+				t.hasLineStart = false
+				t.hasLineEnd = true
+				t.lineEnd = i
+				if i == 0 || contents[i-1] != '\r' {
+					t.line--
+				}
+
+			case '\r', '\u2028', '\u2029':
+				t.hasLineStart = false
+				t.hasLineEnd = true
+				t.lineEnd = i
+				t.line--
+			}
+
+			if i <= offset {
+				t.offset = i
+				return
+			}
+		}
+	}
+}
+
+func (t *LineColumnTracker) computeLineAndColumn(offset int) (lineCount int, columnCount int, lineStart int, lineEnd int) {
+	t.scanTo(int32(offset))
+
+	// Scan for the start of the line
+	if !t.hasLineStart {
+		contents := t.contents
+		i := t.offset
+		for i > 0 {
+			r, size := utf8.DecodeLastRuneInString(contents[:i])
+			if r == '\n' || r == '\r' || r == '\u2028' || r == '\u2029' {
+				break
+			}
+			i -= int32(size)
+		}
+		t.hasLineStart = true
+		t.lineStart = i
+	}
+
+	// Scan for the end of the line
+	if !t.hasLineEnd {
+		contents := t.contents
+		i := t.offset
+		n := int32(len(contents))
+		for i < n {
+			r, size := utf8.DecodeRuneInString(contents[i:])
+			if r == '\n' || r == '\r' || r == '\u2028' || r == '\u2029' {
+				break
+			}
+			i += int32(size)
+		}
+		t.hasLineEnd = true
+		t.lineEnd = i
+	}
+
+	return int(t.line), offset - int(t.lineStart), int(t.lineStart), int(t.lineEnd)
+}
+
+func (tracker *LineColumnTracker) MsgLocationOrNil(r Range) *MsgLocation {
+	if tracker == nil || !tracker.hasSource {
+		return nil
+	}
+
+	// Convert the index into a line and column number
+	lineCount, columnCount, lineStart, lineEnd := tracker.computeLineAndColumn(int(r.Loc.Start))
+
+	return &MsgLocation{
+		File:     tracker.prettyPath,
+		Line:     lineCount + 1, // 0-based to 1-based
+		Column:   columnCount,
+		Length:   int(r.Len),
+		LineText: tracker.contents[lineStart:lineEnd],
+	}
+}
+
+func detailStruct(data MsgData, terminalInfo TerminalInfo, maxMargin int) MsgDetail {
+	// Only highlight the first line of the line text
+	loc := *data.Location
+	endOfFirstLine := len(loc.LineText)
+
+	// Note: This uses "IndexByte" because Go implements this with SIMD, which
+	// can matter a lot for really long lines. Some people pass huge >100mb
+	// minified files as line text for the log message.
+	if i := strings.IndexByte(loc.LineText, '\n'); i >= 0 {
+		endOfFirstLine = i
+	}
+
+	firstLine := loc.LineText[:endOfFirstLine]
+	afterFirstLine := loc.LineText[endOfFirstLine:]
+	if afterFirstLine != "" && !strings.HasSuffix(afterFirstLine, "\n") {
+		afterFirstLine += "\n"
+	}
+
+	// Clamp values in range
+	if loc.Line < 0 {
+		loc.Line = 0
+	}
+	if loc.Column < 0 {
+		loc.Column = 0
+	}
+	if loc.Length < 0 {
+		loc.Length = 0
+	}
+	if loc.Column > endOfFirstLine {
+		loc.Column = endOfFirstLine
+	}
+	if loc.Length > endOfFirstLine-loc.Column {
+		loc.Length = endOfFirstLine - loc.Column
+	}
+
+	spacesPerTab := 2
+	lineText := renderTabStops(firstLine, spacesPerTab)
+	textUpToLoc := renderTabStops(firstLine[:loc.Column], spacesPerTab)
+	markerStart := len(textUpToLoc)
+	markerEnd := markerStart
+	indent := strings.Repeat(" ", estimateWidthInTerminal(textUpToLoc))
+	marker := "^"
+
+	// Extend markers to cover the full range of the error
+	if loc.Length > 0 {
+		markerEnd = len(renderTabStops(firstLine[:loc.Column+loc.Length], spacesPerTab))
+	}
+
+	// Clip the marker to the bounds of the line
+	if markerStart > len(lineText) {
+		markerStart = len(lineText)
+	}
+	if markerEnd > len(lineText) {
+		markerEnd = len(lineText)
+	}
+	if markerEnd < markerStart {
+		markerEnd = markerStart
+	}
+
+	// Trim the line to fit the terminal width
+	width := terminalInfo.Width
+	if width < 1 {
+		width = defaultTerminalWidth
+	}
+	width -= maxMargin + extraMarginChars
+	if width < 1 {
+		width = 1
+	}
+	if loc.Column == endOfFirstLine {
+		// If the marker is at the very end of the line, the marker will be a "^"
+		// character that extends one column past the end of the line. In this case
+		// we should reserve a column at the end so the marker doesn't wrap.
+		width -= 1
+	}
+	if len(lineText) > width {
+		// Try to center the error
+		sliceStart := (markerStart + markerEnd - width) / 2
+		if sliceStart > markerStart-width/5 {
+			sliceStart = markerStart - width/5
+		}
+		if sliceStart < 0 {
+			sliceStart = 0
+		}
+		if sliceStart > len(lineText)-width {
+			sliceStart = len(lineText) - width
+		}
+		sliceEnd := sliceStart + width
+
+		// Slice the line
+		slicedLine := lineText[sliceStart:sliceEnd]
+		markerStart -= sliceStart
+		markerEnd -= sliceStart
+		if markerStart < 0 {
+			markerStart = 0
+		}
+		if markerEnd > len(slicedLine) {
+			markerEnd = len(slicedLine)
+		}
+
+		// Truncate the ends with "..."
+		if len(slicedLine) > 3 && sliceStart > 0 {
+			slicedLine = "..." + slicedLine[3:]
+			if markerStart < 3 {
+				markerStart = 3
+			}
+		}
+		if len(slicedLine) > 3 && sliceEnd < len(lineText) {
+			slicedLine = slicedLine[:len(slicedLine)-3] + "..."
+			if markerEnd > len(slicedLine)-3 {
+				markerEnd = len(slicedLine) - 3
+			}
+			if markerEnd < markerStart {
+				markerEnd = markerStart
+			}
+		}
+
+		// Now we can compute the indent
+		lineText = slicedLine
+		indent = strings.Repeat(" ", estimateWidthInTerminal(lineText[:markerStart]))
+	}
+
+	// If marker is still multi-character after clipping, make the marker wider
+	if markerEnd-markerStart > 1 {
+		marker = strings.Repeat("~", estimateWidthInTerminal(lineText[markerStart:markerEnd]))
+	}
+
+	// Put a margin before the marker indent
+	margin := marginWithLineText(maxMargin, loc.Line)
+
+	return MsgDetail{
+		Path:   loc.File,
+		Line:   loc.Line,
+		Column: loc.Column,
+
+		SourceBefore: margin + lineText[:markerStart],
+		SourceMarked: lineText[markerStart:markerEnd],
+		SourceAfter:  lineText[markerEnd:],
+
+		Indent:     indent,
+		Marker:     marker,
+		Suggestion: loc.Suggestion,
+
+		ContentAfter: afterFirstLine,
+	}
+}
+
+// Estimate the number of columns this string will take when printed
+func estimateWidthInTerminal(text string) int {
+	// For now just assume each code point is one column. This is wrong but is
+	// less wrong than assuming each code unit is one column.
+	width := 0
+	for text != "" {
+		c, size := utf8.DecodeRuneInString(text)
+		text = text[size:]
+
+		// Ignore the Zero Width No-Break Space character (UTF-8 BOM)
+		if c != 0xFEFF {
+			width++
+		}
+	}
+	return width
+}
+
+func renderTabStops(withTabs string, spacesPerTab int) string {
+	if !strings.ContainsRune(withTabs, '\t') {
+		return withTabs
+	}
+
+	withoutTabs := strings.Builder{}
+	count := 0
+
+	for _, c := range withTabs {
+		if c == '\t' {
+			spaces := spacesPerTab - count%spacesPerTab
+			for i := 0; i < spaces; i++ {
+				withoutTabs.WriteRune(' ')
+				count++
+			}
+		} else {
+			withoutTabs.WriteRune(c)
+			count++
+		}
+	}
+
+	return withoutTabs.String()
+}
+
+func (log Log) AddError(tracker *LineColumnTracker, r Range, text string) {
+	log.AddMsg(Msg{
+		Kind: Error,
+		Data: tracker.MsgData(r, text),
+	})
+}
+
+func (log Log) AddID(id MsgID, kind MsgKind, tracker *LineColumnTracker, r Range, text string) {
+	if override, ok := allowOverride(log.Overrides, id, kind); ok {
+		log.AddMsg(Msg{
+			ID:   id,
+			Kind: override,
+			Data: tracker.MsgData(r, text),
+		})
+	}
+}
+
+func (log Log) AddErrorWithNotes(tracker *LineColumnTracker, r Range, text string, notes []MsgData) {
+	log.AddMsg(Msg{
+		Kind:  Error,
+		Data:  tracker.MsgData(r, text),
+		Notes: notes,
+	})
+}
+
+func (log Log) AddIDWithNotes(id MsgID, kind MsgKind, tracker *LineColumnTracker, r Range, text string, notes []MsgData) {
+	if override, ok := allowOverride(log.Overrides, id, kind); ok {
+		log.AddMsg(Msg{
+			ID:    id,
+			Kind:  override,
+			Data:  tracker.MsgData(r, text),
+			Notes: notes,
+		})
+	}
+}
+
+func (log Log) AddMsgID(id MsgID, msg Msg) {
+	if override, ok := allowOverride(log.Overrides, id, msg.Kind); ok {
+		msg.ID = id
+		msg.Kind = override
+		log.AddMsg(msg)
+	}
+}
+
+func allowOverride(overrides map[MsgID]LogLevel, id MsgID, kind MsgKind) (MsgKind, bool) {
+	if logLevel, ok := overrides[id]; ok {
+		switch logLevel {
+		case LevelVerbose:
+			return Verbose, true
+		case LevelDebug:
+			return Debug, true
+		case LevelInfo:
+			return Info, true
+		case LevelWarning:
+			return Warning, true
+		case LevelError:
+			return Error, true
+		default:
+			// Setting the log level to "silent" silences this log message
+			return MsgKind(0), false
+		}
+	}
+	return kind, true
+}
+
+type StringInJSTableEntry struct {
+	innerLine   int32
+	innerColumn int32
+	innerLoc    Loc
+	outerLoc    Loc
+}
+
+// For Yarn PnP we sometimes parse JSON embedded in a JS string. This generates
+// a table that remaps locations inside the embedded JSON string literal into
+// locations in the actual JS file, which makes them easier to understand.
+func GenerateStringInJSTable(outerContents string, outerStringLiteralLoc Loc, innerContents string) (table []StringInJSTableEntry) {
+	i := int32(0)
+	n := int32(len(innerContents))
+	line := int32(1)
+	column := int32(0)
+	loc := Loc{Start: outerStringLiteralLoc.Start + 1}
+
+	for i < n {
+		// Ignore line continuations. A line continuation is not an escaped newline.
+		for {
+			if c, _ := utf8.DecodeRuneInString(outerContents[loc.Start:]); c != '\\' {
+				break
+			}
+			c, width := utf8.DecodeRuneInString(outerContents[loc.Start+1:])
+			switch c {
+			case '\n', '\r', '\u2028', '\u2029':
+				loc.Start += 1 + int32(width)
+				if c == '\r' && outerContents[loc.Start] == '\n' {
+					// Make sure Windows CRLF counts as a single newline
+					loc.Start++
+				}
+				continue
+			}
+			break
+		}
+
+		c, width := utf8.DecodeRuneInString(innerContents[i:])
+
+		// Compress the table using run-length encoding
+		table = append(table, StringInJSTableEntry{innerLine: line, innerColumn: column, innerLoc: Loc{Start: i}, outerLoc: loc})
+		if len(table) > 1 {
+			if last := table[len(table)-2]; line == last.innerLine && loc.Start-column == last.outerLoc.Start-last.innerColumn {
+				table = table[:len(table)-1]
+			}
+		}
+
+		// Advance the inner line/column
+		switch c {
+		case '\n', '\r', '\u2028', '\u2029':
+			line++
+			column = 0
+
+			// Handle newlines on Windows
+			if c == '\r' && i+1 < n && innerContents[i+1] == '\n' {
+				i++
+			}
+
+		default:
+			column += int32(width)
+		}
+		i += int32(width)
+
+		// Advance the outer loc, assuming the string syntax is already valid
+		c, width = utf8.DecodeRuneInString(outerContents[loc.Start:])
+		if c == '\r' && outerContents[loc.Start+1] == '\n' {
+			// Handle newlines on Windows in template literal strings
+			loc.Start += 2
+		} else if c != '\\' {
+			loc.Start += int32(width)
+		} else {
+			// Handle an escape sequence
+			c, width = utf8.DecodeRuneInString(outerContents[loc.Start+1:])
+			switch c {
+			case 'x':
+				// 2-digit hexadecimal
+				loc.Start += 1 + 2
+
+			case 'u':
+				loc.Start++
+				if outerContents[loc.Start] == '{' {
+					// Variable-length
+					for outerContents[loc.Start] != '}' {
+						loc.Start++
+					}
+					loc.Start++
+				} else {
+					// Fixed-length
+					loc.Start += 4
+				}
+
+			case '\n', '\r', '\u2028', '\u2029':
+				// This will be handled by the next iteration
+				break
+
+			default:
+				loc.Start += 1 + int32(width)
+			}
+		}
+	}
+
+	return
+}
+
+func RemapStringInJSLoc(table []StringInJSTableEntry, innerLoc Loc) Loc {
+	count := len(table)
+	index := 0
+
+	// Binary search to find the previous entry
+	for count > 0 {
+		step := count / 2
+		i := index + step
+		if i+1 < len(table) {
+			if entry := table[i+1]; entry.innerLoc.Start < innerLoc.Start {
+				index = i + 1
+				count -= step + 1
+				continue
+			}
+		}
+		count = step
+	}
+
+	entry := table[index]
+	entry.outerLoc.Start += innerLoc.Start - entry.innerLoc.Start // Undo run-length compression
+	return entry.outerLoc
+}
+
+func NewStringInJSLog(log Log, outerTracker *LineColumnTracker, table []StringInJSTableEntry) Log {
+	oldAddMsg := log.AddMsg
+
+	remapLineAndColumnToLoc := func(line int32, column int32) Loc {
+		count := len(table)
+		index := 0
+
+		// Binary search to find the previous entry
+		for count > 0 {
+			step := count / 2
+			i := index + step
+			if i+1 < len(table) {
+				if entry := table[i+1]; entry.innerLine < line || (entry.innerLine == line && entry.innerColumn < column) {
+					index = i + 1
+					count -= step + 1
+					continue
+				}
+			}
+			count = step
+		}
+
+		entry := table[index]
+		entry.outerLoc.Start += column - entry.innerColumn // Undo run-length compression
+		return entry.outerLoc
+	}
+
+	remapData := func(data MsgData) MsgData {
+		if data.Location == nil {
+			return data
+		}
+
+		// Generate a range in the outer source using the line/column/length in the inner source
+		r := Range{Loc: remapLineAndColumnToLoc(int32(data.Location.Line), int32(data.Location.Column))}
+		if data.Location.Length != 0 {
+			r.Len = remapLineAndColumnToLoc(int32(data.Location.Line), int32(data.Location.Column+data.Location.Length)).Start - r.Loc.Start
+		}
+
+		// Use that range to look up the line in the outer source
+		location := outerTracker.MsgData(r, data.Text).Location
+		location.Suggestion = data.Location.Suggestion
+		data.Location = location
+		return data
+	}
+
+	log.AddMsg = func(msg Msg) {
+		msg.Data = remapData(msg.Data)
+		for i, note := range msg.Notes {
+			msg.Notes[i] = remapData(note)
+		}
+		oldAddMsg(msg)
+	}
+
+	return log
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/logger/logger_darwin.go b/source/vendor/github.com/evanw/esbuild/internal/logger/logger_darwin.go
new file mode 100644
index 0000000..dac2a04
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/logger/logger_darwin.go
@@ -0,0 +1,34 @@
+//go:build darwin
+// +build darwin
+
+package logger
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+const SupportsColorEscapes = true
+
+func GetTerminalInfo(file *os.File) (info TerminalInfo) {
+	fd := file.Fd()
+
+	// Is this file descriptor a terminal?
+	if _, err := unix.IoctlGetTermios(int(fd), unix.TIOCGETA); err == nil {
+		info.IsTTY = true
+		info.UseColorEscapes = !hasNoColorEnvironmentVariable()
+
+		// Get the width of the window
+		if w, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ); err == nil {
+			info.Width = int(w.Col)
+			info.Height = int(w.Row)
+		}
+	}
+
+	return
+}
+
+func writeStringWithColor(file *os.File, text string) {
+	file.WriteString(text)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/logger/logger_linux.go b/source/vendor/github.com/evanw/esbuild/internal/logger/logger_linux.go
new file mode 100644
index 0000000..b825cbb
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/logger/logger_linux.go
@@ -0,0 +1,34 @@
+//go:build linux
+// +build linux
+
+package logger
+
+import (
+	"os"
+
+	"golang.org/x/sys/unix"
+)
+
+const SupportsColorEscapes = true
+
+func GetTerminalInfo(file *os.File) (info TerminalInfo) {
+	fd := file.Fd()
+
+	// Is this file descriptor a terminal?
+	if _, err := unix.IoctlGetTermios(int(fd), unix.TCGETS); err == nil {
+		info.IsTTY = true
+		info.UseColorEscapes = !hasNoColorEnvironmentVariable()
+
+		// Get the width of the window
+		if w, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ); err == nil {
+			info.Width = int(w.Col)
+			info.Height = int(w.Row)
+		}
+	}
+
+	return
+}
+
+func writeStringWithColor(file *os.File, text string) {
+	file.WriteString(text)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/logger/logger_other.go b/source/vendor/github.com/evanw/esbuild/internal/logger/logger_other.go
new file mode 100644
index 0000000..7da8a85
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/logger/logger_other.go
@@ -0,0 +1,16 @@
+//go:build !darwin && !linux && !windows
+// +build !darwin,!linux,!windows
+
+package logger
+
+import "os"
+
+const SupportsColorEscapes = false
+
+func GetTerminalInfo(*os.File) TerminalInfo {
+	return TerminalInfo{}
+}
+
+func writeStringWithColor(file *os.File, text string) {
+	file.WriteString(text)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/logger/logger_windows.go b/source/vendor/github.com/evanw/esbuild/internal/logger/logger_windows.go
new file mode 100644
index 0000000..f2383ff
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/logger/logger_windows.go
@@ -0,0 +1,136 @@
+//go:build windows
+// +build windows
+
+package logger
+
+import (
+	"os"
+	"strings"
+	"syscall"
+	"unsafe"
+)
+
+const SupportsColorEscapes = true
+
+var kernel32 = syscall.NewLazyDLL("kernel32.dll")
+var getConsoleMode = kernel32.NewProc("GetConsoleMode")
+var setConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute")
+var getConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo")
+
+type consoleScreenBufferInfo struct {
+	dwSizeX              int16
+	dwSizeY              int16
+	dwCursorPositionX    int16
+	dwCursorPositionY    int16
+	wAttributes          uint16
+	srWindowLeft         int16
+	srWindowTop          int16
+	srWindowRight        int16
+	srWindowBottom       int16
+	dwMaximumWindowSizeX int16
+	dwMaximumWindowSizeY int16
+}
+
+func GetTerminalInfo(file *os.File) TerminalInfo {
+	fd := file.Fd()
+
+	// Is this file descriptor a terminal?
+	var unused uint32
+	isTTY, _, _ := syscall.Syscall(getConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&unused)), 0)
+
+	// Get the width of the window
+	var info consoleScreenBufferInfo
+	syscall.Syscall(getConsoleScreenBufferInfo.Addr(), 2, fd, uintptr(unsafe.Pointer(&info)), 0)
+
+	return TerminalInfo{
+		IsTTY:           isTTY != 0,
+		Width:           int(info.dwSizeX) - 1,
+		Height:          int(info.dwSizeY) - 1,
+		UseColorEscapes: !hasNoColorEnvironmentVariable(),
+	}
+}
+
+const (
+	FOREGROUND_BLUE uint8 = 1 << iota
+	FOREGROUND_GREEN
+	FOREGROUND_RED
+	FOREGROUND_INTENSITY
+	BACKGROUND_BLUE
+	BACKGROUND_GREEN
+	BACKGROUND_RED
+	BACKGROUND_INTENSITY
+)
+
+var windowsEscapeSequenceMap = map[string]uint8{
+	TerminalColors.Reset: FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE,
+	TerminalColors.Dim:   FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE,
+	TerminalColors.Bold:  FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | FOREGROUND_INTENSITY,
+
+	// Apparently underlines only work with the CJK locale on Windows :(
+	TerminalColors.Underline: FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE,
+
+	TerminalColors.Red:   FOREGROUND_RED,
+	TerminalColors.Green: FOREGROUND_GREEN,
+	TerminalColors.Blue:  FOREGROUND_BLUE,
+
+	TerminalColors.Cyan:    FOREGROUND_GREEN | FOREGROUND_BLUE,
+	TerminalColors.Magenta: FOREGROUND_RED | FOREGROUND_BLUE,
+	TerminalColors.Yellow:  FOREGROUND_RED | FOREGROUND_GREEN,
+
+	TerminalColors.RedBgRed:     FOREGROUND_RED | BACKGROUND_RED,
+	TerminalColors.RedBgWhite:   FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | BACKGROUND_RED,
+	TerminalColors.GreenBgGreen: FOREGROUND_GREEN | BACKGROUND_GREEN,
+	TerminalColors.GreenBgWhite: FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | BACKGROUND_GREEN,
+	TerminalColors.BlueBgBlue:   FOREGROUND_BLUE | BACKGROUND_BLUE,
+	TerminalColors.BlueBgWhite:  FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | BACKGROUND_BLUE,
+
+	TerminalColors.CyanBgCyan:       FOREGROUND_GREEN | FOREGROUND_BLUE | BACKGROUND_GREEN | BACKGROUND_BLUE,
+	TerminalColors.CyanBgBlack:      BACKGROUND_GREEN | BACKGROUND_BLUE,
+	TerminalColors.MagentaBgMagenta: FOREGROUND_RED | FOREGROUND_BLUE | BACKGROUND_RED | BACKGROUND_BLUE,
+	TerminalColors.MagentaBgBlack:   BACKGROUND_RED | BACKGROUND_BLUE,
+	TerminalColors.YellowBgYellow:   FOREGROUND_RED | FOREGROUND_GREEN | BACKGROUND_RED | BACKGROUND_GREEN,
+	TerminalColors.YellowBgBlack:    BACKGROUND_RED | BACKGROUND_GREEN,
+}
+
+func writeStringWithColor(file *os.File, text string) {
+	fd := file.Fd()
+	i := 0
+
+	for i < len(text) {
+		// Find the escape
+		if text[i] != 033 {
+			i++
+			continue
+		}
+
+		// Find the 'm'
+		window := text[i:]
+		if len(window) > 8 {
+			window = window[:8]
+		}
+		m := strings.IndexByte(window, 'm')
+		if m == -1 {
+			i++
+			continue
+		}
+		m += i + 1
+
+		// Find the escape sequence
+		attributes, ok := windowsEscapeSequenceMap[text[i:m]]
+		if !ok {
+			i++
+			continue
+		}
+
+		// Write out the text before the escape sequence
+		file.WriteString(text[:i])
+
+		// Apply the escape sequence
+		text = text[m:]
+		i = 0
+		setConsoleTextAttribute.Call(fd, uintptr(attributes))
+	}
+
+	// Write out the remaining text
+	file.WriteString(text)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/logger/msg_ids.go b/source/vendor/github.com/evanw/esbuild/internal/logger/msg_ids.go
new file mode 100644
index 0000000..2e1e305
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/logger/msg_ids.go
@@ -0,0 +1,371 @@
+package logger
+
+// Most non-error log messages are given a message ID that can be used to set
+// the log level for that message. Errors do not get a message ID because you
+// cannot turn errors into non-errors (otherwise the build would incorrectly
+// succeed). Some internal log messages do not get a message ID because they
+// are part of verbose and/or internal debugging output. These messages use
+// "MsgID_None" instead.
+type MsgID = uint8
+
+const (
+	MsgID_None MsgID = iota
+
+	// JavaScript
+	MsgID_JS_AssertToWith
+	MsgID_JS_AssertTypeJSON
+	MsgID_JS_AssignToConstant
+	MsgID_JS_AssignToDefine
+	MsgID_JS_AssignToImport
+	MsgID_JS_CallImportNamespace
+	MsgID_JS_ClassNameWillThrow
+	MsgID_JS_CommonJSVariableInESM
+	MsgID_JS_DeleteSuperProperty
+	MsgID_JS_DirectEval
+	MsgID_JS_DuplicateCase
+	MsgID_JS_DuplicateClassMember
+	MsgID_JS_DuplicateObjectKey
+	MsgID_JS_EmptyImportMeta
+	MsgID_JS_EqualsNaN
+	MsgID_JS_EqualsNegativeZero
+	MsgID_JS_EqualsNewObject
+	MsgID_JS_HTMLCommentInJS
+	MsgID_JS_ImpossibleTypeof
+	MsgID_JS_IndirectRequire
+	MsgID_JS_PrivateNameWillThrow
+	MsgID_JS_SemicolonAfterReturn
+	MsgID_JS_SuspiciousBooleanNot
+	MsgID_JS_SuspiciousDefine
+	MsgID_JS_SuspiciousLogicalOperator
+	MsgID_JS_SuspiciousNullishCoalescing
+	MsgID_JS_ThisIsUndefinedInESM
+	MsgID_JS_UnsupportedDynamicImport
+	MsgID_JS_UnsupportedJSXComment
+	MsgID_JS_UnsupportedRegExp
+	MsgID_JS_UnsupportedRequireCall
+
+	// CSS
+	MsgID_CSS_CSSSyntaxError
+	MsgID_CSS_InvalidAtCharset
+	MsgID_CSS_InvalidAtImport
+	MsgID_CSS_InvalidAtLayer
+	MsgID_CSS_InvalidCalc
+	MsgID_CSS_JSCommentInCSS
+	MsgID_CSS_UndefinedComposesFrom
+	MsgID_CSS_UnsupportedAtCharset
+	MsgID_CSS_UnsupportedAtNamespace
+	MsgID_CSS_UnsupportedCSSProperty
+	MsgID_CSS_UnsupportedCSSNesting
+
+	// Bundler
+	MsgID_Bundler_AmbiguousReexport
+	MsgID_Bundler_DifferentPathCase
+	MsgID_Bundler_EmptyGlob
+	MsgID_Bundler_IgnoredBareImport
+	MsgID_Bundler_IgnoredDynamicImport
+	MsgID_Bundler_ImportIsUndefined
+	MsgID_Bundler_RequireResolveNotExternal
+
+	// Source maps
+	MsgID_SourceMap_InvalidSourceMappings
+	MsgID_SourceMap_SectionsInSourceMap
+	MsgID_SourceMap_MissingSourceMap
+	MsgID_SourceMap_UnsupportedSourceMapComment
+
+	// package.json
+	MsgID_PackageJSON_FIRST // Keep this first
+	MsgID_PackageJSON_DeadCondition
+	MsgID_PackageJSON_InvalidBrowser
+	MsgID_PackageJSON_InvalidImportsOrExports
+	MsgID_PackageJSON_InvalidSideEffects
+	MsgID_PackageJSON_InvalidType
+	MsgID_PackageJSON_LAST // Keep this last
+
+	// tsconfig.json
+	MsgID_TSConfigJSON_FIRST // Keep this first
+	MsgID_TSConfigJSON_Cycle
+	MsgID_TSConfigJSON_InvalidImportsNotUsedAsValues
+	MsgID_TSConfigJSON_InvalidJSX
+	MsgID_TSConfigJSON_InvalidPaths
+	MsgID_TSConfigJSON_InvalidTarget
+	MsgID_TSConfigJSON_InvalidTopLevelOption
+	MsgID_TSConfigJSON_Missing
+	MsgID_TSConfigJSON_LAST // Keep this last
+
+	MsgID_END // Keep this at the end (used only for tests)
+)
+
+func StringToMsgIDs(str string, logLevel LogLevel, overrides map[MsgID]LogLevel) {
+	switch str {
+	// JS
+	case "assert-to-with":
+		overrides[MsgID_JS_AssertToWith] = logLevel
+	case "assert-type-json":
+		overrides[MsgID_JS_AssertTypeJSON] = logLevel
+	case "assign-to-constant":
+		overrides[MsgID_JS_AssignToConstant] = logLevel
+	case "assign-to-define":
+		overrides[MsgID_JS_AssignToDefine] = logLevel
+	case "assign-to-import":
+		overrides[MsgID_JS_AssignToImport] = logLevel
+	case "call-import-namespace":
+		overrides[MsgID_JS_CallImportNamespace] = logLevel
+	case "class-name-will-throw":
+		overrides[MsgID_JS_ClassNameWillThrow] = logLevel
+	case "commonjs-variable-in-esm":
+		overrides[MsgID_JS_CommonJSVariableInESM] = logLevel
+	case "delete-super-property":
+		overrides[MsgID_JS_DeleteSuperProperty] = logLevel
+	case "direct-eval":
+		overrides[MsgID_JS_DirectEval] = logLevel
+	case "duplicate-case":
+		overrides[MsgID_JS_DuplicateCase] = logLevel
+	case "duplicate-class-member":
+		overrides[MsgID_JS_DuplicateClassMember] = logLevel
+	case "duplicate-object-key":
+		overrides[MsgID_JS_DuplicateObjectKey] = logLevel
+	case "empty-import-meta":
+		overrides[MsgID_JS_EmptyImportMeta] = logLevel
+	case "equals-nan":
+		overrides[MsgID_JS_EqualsNaN] = logLevel
+	case "equals-negative-zero":
+		overrides[MsgID_JS_EqualsNegativeZero] = logLevel
+	case "equals-new-object":
+		overrides[MsgID_JS_EqualsNewObject] = logLevel
+	case "html-comment-in-js":
+		overrides[MsgID_JS_HTMLCommentInJS] = logLevel
+	case "impossible-typeof":
+		overrides[MsgID_JS_ImpossibleTypeof] = logLevel
+	case "indirect-require":
+		overrides[MsgID_JS_IndirectRequire] = logLevel
+	case "private-name-will-throw":
+		overrides[MsgID_JS_PrivateNameWillThrow] = logLevel
+	case "semicolon-after-return":
+		overrides[MsgID_JS_SemicolonAfterReturn] = logLevel
+	case "suspicious-boolean-not":
+		overrides[MsgID_JS_SuspiciousBooleanNot] = logLevel
+	case "suspicious-define":
+		overrides[MsgID_JS_SuspiciousDefine] = logLevel
+	case "suspicious-logical-operator":
+		overrides[MsgID_JS_SuspiciousLogicalOperator] = logLevel
+	case "suspicious-nullish-coalescing":
+		overrides[MsgID_JS_SuspiciousNullishCoalescing] = logLevel
+	case "this-is-undefined-in-esm":
+		overrides[MsgID_JS_ThisIsUndefinedInESM] = logLevel
+	case "unsupported-dynamic-import":
+		overrides[MsgID_JS_UnsupportedDynamicImport] = logLevel
+	case "unsupported-jsx-comment":
+		overrides[MsgID_JS_UnsupportedJSXComment] = logLevel
+	case "unsupported-regexp":
+		overrides[MsgID_JS_UnsupportedRegExp] = logLevel
+	case "unsupported-require-call":
+		overrides[MsgID_JS_UnsupportedRequireCall] = logLevel
+
+	// CSS
+	case "css-syntax-error":
+		overrides[MsgID_CSS_CSSSyntaxError] = logLevel
+	case "invalid-@charset":
+		overrides[MsgID_CSS_InvalidAtCharset] = logLevel
+	case "invalid-@import":
+		overrides[MsgID_CSS_InvalidAtImport] = logLevel
+	case "invalid-@layer":
+		overrides[MsgID_CSS_InvalidAtLayer] = logLevel
+	case "invalid-calc":
+		overrides[MsgID_CSS_InvalidCalc] = logLevel
+	case "js-comment-in-css":
+		overrides[MsgID_CSS_JSCommentInCSS] = logLevel
+	case "undefined-composes-from":
+		overrides[MsgID_CSS_UndefinedComposesFrom] = logLevel
+	case "unsupported-@charset":
+		overrides[MsgID_CSS_UnsupportedAtCharset] = logLevel
+	case "unsupported-@namespace":
+		overrides[MsgID_CSS_UnsupportedAtNamespace] = logLevel
+	case "unsupported-css-property":
+		overrides[MsgID_CSS_UnsupportedCSSProperty] = logLevel
+	case "unsupported-css-nesting":
+		overrides[MsgID_CSS_UnsupportedCSSNesting] = logLevel
+
+	// Bundler
+	case "ambiguous-reexport":
+		overrides[MsgID_Bundler_AmbiguousReexport] = logLevel
+	case "different-path-case":
+		overrides[MsgID_Bundler_DifferentPathCase] = logLevel
+	case "empty-glob":
+		overrides[MsgID_Bundler_EmptyGlob] = logLevel
+	case "ignored-bare-import":
+		overrides[MsgID_Bundler_IgnoredBareImport] = logLevel
+	case "ignored-dynamic-import":
+		overrides[MsgID_Bundler_IgnoredDynamicImport] = logLevel
+	case "import-is-undefined":
+		overrides[MsgID_Bundler_ImportIsUndefined] = logLevel
+	case "require-resolve-not-external":
+		overrides[MsgID_Bundler_RequireResolveNotExternal] = logLevel
+
+	// Source maps
+	case "invalid-source-mappings":
+		overrides[MsgID_SourceMap_InvalidSourceMappings] = logLevel
+	case "sections-in-source-map":
+		overrides[MsgID_SourceMap_SectionsInSourceMap] = logLevel
+	case "missing-source-map":
+		overrides[MsgID_SourceMap_MissingSourceMap] = logLevel
+	case "unsupported-source-map-comment":
+		overrides[MsgID_SourceMap_UnsupportedSourceMapComment] = logLevel
+
+	case "package.json":
+		for i := MsgID_PackageJSON_FIRST; i <= MsgID_PackageJSON_LAST; i++ {
+			overrides[i] = logLevel
+		}
+
+	case "tsconfig.json":
+		for i := MsgID_TSConfigJSON_FIRST; i <= MsgID_TSConfigJSON_LAST; i++ {
+			overrides[i] = logLevel
+		}
+
+	default:
+		// Ignore invalid entries since this message id may have
+		// been renamed/removed since when this code was written
+	}
+}
+
+func MsgIDToString(id MsgID) string {
+	switch id {
+	// JS
+	case MsgID_JS_AssertToWith:
+		return "assert-to-with"
+	case MsgID_JS_AssertTypeJSON:
+		return "assert-type-json"
+	case MsgID_JS_AssignToConstant:
+		return "assign-to-constant"
+	case MsgID_JS_AssignToDefine:
+		return "assign-to-define"
+	case MsgID_JS_AssignToImport:
+		return "assign-to-import"
+	case MsgID_JS_CallImportNamespace:
+		return "call-import-namespace"
+	case MsgID_JS_ClassNameWillThrow:
+		return "class-name-will-throw"
+	case MsgID_JS_CommonJSVariableInESM:
+		return "commonjs-variable-in-esm"
+	case MsgID_JS_DeleteSuperProperty:
+		return "delete-super-property"
+	case MsgID_JS_DirectEval:
+		return "direct-eval"
+	case MsgID_JS_DuplicateCase:
+		return "duplicate-case"
+	case MsgID_JS_DuplicateClassMember:
+		return "duplicate-class-member"
+	case MsgID_JS_DuplicateObjectKey:
+		return "duplicate-object-key"
+	case MsgID_JS_EmptyImportMeta:
+		return "empty-import-meta"
+	case MsgID_JS_EqualsNaN:
+		return "equals-nan"
+	case MsgID_JS_EqualsNegativeZero:
+		return "equals-negative-zero"
+	case MsgID_JS_EqualsNewObject:
+		return "equals-new-object"
+	case MsgID_JS_HTMLCommentInJS:
+		return "html-comment-in-js"
+	case MsgID_JS_ImpossibleTypeof:
+		return "impossible-typeof"
+	case MsgID_JS_IndirectRequire:
+		return "indirect-require"
+	case MsgID_JS_PrivateNameWillThrow:
+		return "private-name-will-throw"
+	case MsgID_JS_SemicolonAfterReturn:
+		return "semicolon-after-return"
+	case MsgID_JS_SuspiciousBooleanNot:
+		return "suspicious-boolean-not"
+	case MsgID_JS_SuspiciousDefine:
+		return "suspicious-define"
+	case MsgID_JS_SuspiciousLogicalOperator:
+		return "suspicious-logical-operator"
+	case MsgID_JS_SuspiciousNullishCoalescing:
+		return "suspicious-nullish-coalescing"
+	case MsgID_JS_ThisIsUndefinedInESM:
+		return "this-is-undefined-in-esm"
+	case MsgID_JS_UnsupportedDynamicImport:
+		return "unsupported-dynamic-import"
+	case MsgID_JS_UnsupportedJSXComment:
+		return "unsupported-jsx-comment"
+	case MsgID_JS_UnsupportedRegExp:
+		return "unsupported-regexp"
+	case MsgID_JS_UnsupportedRequireCall:
+		return "unsupported-require-call"
+
+	// CSS
+	case MsgID_CSS_CSSSyntaxError:
+		return "css-syntax-error"
+	case MsgID_CSS_InvalidAtCharset:
+		return "invalid-@charset"
+	case MsgID_CSS_InvalidAtImport:
+		return "invalid-@import"
+	case MsgID_CSS_InvalidAtLayer:
+		return "invalid-@layer"
+	case MsgID_CSS_InvalidCalc:
+		return "invalid-calc"
+	case MsgID_CSS_JSCommentInCSS:
+		return "js-comment-in-css"
+	case MsgID_CSS_UndefinedComposesFrom:
+		return "undefined-composes-from"
+	case MsgID_CSS_UnsupportedAtCharset:
+		return "unsupported-@charset"
+	case MsgID_CSS_UnsupportedAtNamespace:
+		return "unsupported-@namespace"
+	case MsgID_CSS_UnsupportedCSSProperty:
+		return "unsupported-css-property"
+	case MsgID_CSS_UnsupportedCSSNesting:
+		return "unsupported-css-nesting"
+
+	// Bundler
+	case MsgID_Bundler_AmbiguousReexport:
+		return "ambiguous-reexport"
+	case MsgID_Bundler_DifferentPathCase:
+		return "different-path-case"
+	case MsgID_Bundler_EmptyGlob:
+		return "empty-glob"
+	case MsgID_Bundler_IgnoredBareImport:
+		return "ignored-bare-import"
+	case MsgID_Bundler_IgnoredDynamicImport:
+		return "ignored-dynamic-import"
+	case MsgID_Bundler_ImportIsUndefined:
+		return "import-is-undefined"
+	case MsgID_Bundler_RequireResolveNotExternal:
+		return "require-resolve-not-external"
+
+	// Source maps
+	case MsgID_SourceMap_InvalidSourceMappings:
+		return "invalid-source-mappings"
+	case MsgID_SourceMap_SectionsInSourceMap:
+		return "sections-in-source-map"
+	case MsgID_SourceMap_MissingSourceMap:
+		return "missing-source-map"
+	case MsgID_SourceMap_UnsupportedSourceMapComment:
+		return "unsupported-source-map-comment"
+
+	default:
+		if id >= MsgID_PackageJSON_FIRST && id <= MsgID_PackageJSON_LAST {
+			return "package.json"
+		}
+		if id >= MsgID_TSConfigJSON_FIRST && id <= MsgID_TSConfigJSON_LAST {
+			return "tsconfig.json"
+		}
+	}
+
+	return ""
+}
+
+// Some message IDs are more diverse internally than externally (in case we
+// want to expand the set of them later on). So just map these to the largest
+// one arbitrarily since you can't tell the difference externally anyway.
+func StringToMaximumMsgID(id string) MsgID {
+	overrides := make(map[MsgID]LogLevel)
+	maxID := MsgID_None
+	StringToMsgIDs(id, LevelInfo, overrides)
+	for id := range overrides {
+		if id > maxID {
+			maxID = id
+		}
+	}
+	return maxID
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/renamer/renamer.go b/source/vendor/github.com/evanw/esbuild/internal/renamer/renamer.go
new file mode 100644
index 0000000..15a9bf6
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/renamer/renamer.go
@@ -0,0 +1,662 @@
+package renamer
+
+import (
+	"fmt"
+	"sort"
+	"strconv"
+	"sync"
+	"sync/atomic"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_lexer"
+)
+
+func ComputeReservedNames(moduleScopes []*js_ast.Scope, symbols ast.SymbolMap) map[string]uint32 {
+	names := make(map[string]uint32)
+
+	// All keywords and strict mode reserved words are reserved names
+	for k := range js_lexer.Keywords {
+		names[k] = 1
+	}
+	for k := range js_lexer.StrictModeReservedWords {
+		names[k] = 1
+	}
+
+	// All unbound symbols must be reserved names
+	for _, scope := range moduleScopes {
+		computeReservedNamesForScope(scope, symbols, names)
+	}
+
+	return names
+}
+
+func computeReservedNamesForScope(scope *js_ast.Scope, symbols ast.SymbolMap, names map[string]uint32) {
+	for _, member := range scope.Members {
+		symbol := symbols.Get(member.Ref)
+		if symbol.Kind == ast.SymbolUnbound || symbol.Flags.Has(ast.MustNotBeRenamed) {
+			names[symbol.OriginalName] = 1
+		}
+	}
+	for _, ref := range scope.Generated {
+		symbol := symbols.Get(ref)
+		if symbol.Kind == ast.SymbolUnbound || symbol.Flags.Has(ast.MustNotBeRenamed) {
+			names[symbol.OriginalName] = 1
+		}
+	}
+
+	// If there's a direct "eval" somewhere inside the current scope, continue
+	// traversing down the scope tree until we find it to get all reserved names
+	if scope.ContainsDirectEval {
+		for _, child := range scope.Children {
+			if child.ContainsDirectEval {
+				computeReservedNamesForScope(child, symbols, names)
+			}
+		}
+	}
+}
+
+type Renamer interface {
+	NameForSymbol(ref ast.Ref) string
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// noOpRenamer
+
+type noOpRenamer struct {
+	symbols ast.SymbolMap
+}
+
+func NewNoOpRenamer(symbols ast.SymbolMap) Renamer {
+	return &noOpRenamer{
+		symbols: symbols,
+	}
+}
+
+func (r *noOpRenamer) NameForSymbol(ref ast.Ref) string {
+	ref = ast.FollowSymbols(r.symbols, ref)
+	return r.symbols.Get(ref).OriginalName
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// MinifyRenamer
+
+type symbolSlot struct {
+	name               string
+	count              uint32
+	needsCapitalForJSX uint32 // This is really a bool but needs to be atomic
+}
+
+type MinifyRenamer struct {
+	reservedNames        map[string]uint32
+	slots                [4][]symbolSlot
+	topLevelSymbolToSlot map[ast.Ref]uint32
+	symbols              ast.SymbolMap
+}
+
+func NewMinifyRenamer(symbols ast.SymbolMap, firstTopLevelSlots ast.SlotCounts, reservedNames map[string]uint32) *MinifyRenamer {
+	return &MinifyRenamer{
+		symbols:       symbols,
+		reservedNames: reservedNames,
+		slots: [4][]symbolSlot{
+			make([]symbolSlot, firstTopLevelSlots[0]),
+			make([]symbolSlot, firstTopLevelSlots[1]),
+			make([]symbolSlot, firstTopLevelSlots[2]),
+			make([]symbolSlot, firstTopLevelSlots[3]),
+		},
+		topLevelSymbolToSlot: make(map[ast.Ref]uint32),
+	}
+}
+
+func (r *MinifyRenamer) NameForSymbol(ref ast.Ref) string {
+	// Follow links to get to the underlying symbol
+	ref = ast.FollowSymbols(r.symbols, ref)
+	symbol := r.symbols.Get(ref)
+
+	// Skip this symbol if the name is pinned
+	ns := symbol.SlotNamespace()
+	if ns == ast.SlotMustNotBeRenamed {
+		return symbol.OriginalName
+	}
+
+	// Check if it's a nested scope symbol
+	i := symbol.NestedScopeSlot
+
+	// If it's not (i.e. it's in a top-level scope), look up the slot
+	if !i.IsValid() {
+		index, ok := r.topLevelSymbolToSlot[ref]
+		if !ok {
+			// If we get here, then we're printing a symbol that never had any
+			// recorded uses. This is odd but can happen in certain scenarios.
+			// For example, code in a branch with dead control flow won't mark
+			// any uses but may still be printed. In that case it doesn't matter
+			// what name we use since it's dead code.
+			return symbol.OriginalName
+		}
+		i = ast.MakeIndex32(index)
+	}
+
+	return r.slots[ns][i.GetIndex()].name
+}
+
+// The InnerIndex should be stable because the parser for a single file is
+// single-threaded and deterministically assigns out InnerIndex values
+// sequentially. But the SourceIndex should be unstable because the main thread
+// assigns out source index values sequentially to newly-discovered dependencies
+// in a multi-threaded producer/consumer relationship. So instead we use the
+// index of the source in the DFS order over all entry points for stability.
+type StableSymbolCount struct {
+	StableSourceIndex uint32
+	Ref               ast.Ref
+	Count             uint32
+}
+
+// This type is just so we can use Go's native sort function
+type StableSymbolCountArray []StableSymbolCount
+
+func (a StableSymbolCountArray) Len() int          { return len(a) }
+func (a StableSymbolCountArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a StableSymbolCountArray) Less(i int, j int) bool {
+	ai, aj := a[i], a[j]
+	if ai.Count > aj.Count {
+		return true
+	}
+	if ai.Count < aj.Count {
+		return false
+	}
+	if ai.StableSourceIndex < aj.StableSourceIndex {
+		return true
+	}
+	if ai.StableSourceIndex > aj.StableSourceIndex {
+		return false
+	}
+	return ai.Ref.InnerIndex < aj.Ref.InnerIndex
+}
+
+func (r *MinifyRenamer) AccumulateSymbolUseCounts(
+	topLevelSymbols *StableSymbolCountArray,
+	symbolUses map[ast.Ref]js_ast.SymbolUse,
+	stableSourceIndices []uint32,
+) {
+	// NOTE: This function is run in parallel. Make sure to avoid data races.
+
+	for ref, use := range symbolUses {
+		r.AccumulateSymbolCount(topLevelSymbols, ref, use.CountEstimate, stableSourceIndices)
+	}
+}
+
+func (r *MinifyRenamer) AccumulateSymbolCount(
+	topLevelSymbols *StableSymbolCountArray,
+	ref ast.Ref,
+	count uint32,
+	stableSourceIndices []uint32,
+) {
+	// NOTE: This function is run in parallel. Make sure to avoid data races.
+
+	// Follow links to get to the underlying symbol
+	ref = ast.FollowSymbols(r.symbols, ref)
+	symbol := r.symbols.Get(ref)
+	for symbol.NamespaceAlias != nil {
+		ref = ast.FollowSymbols(r.symbols, symbol.NamespaceAlias.NamespaceRef)
+		symbol = r.symbols.Get(ref)
+	}
+
+	// Skip this symbol if the name is pinned
+	ns := symbol.SlotNamespace()
+	if ns == ast.SlotMustNotBeRenamed {
+		return
+	}
+
+	// Check if it's a nested scope symbol
+	if i := symbol.NestedScopeSlot; i.IsValid() {
+		// If it is, accumulate the count using a parallel-safe atomic increment
+		slot := &r.slots[ns][i.GetIndex()]
+		atomic.AddUint32(&slot.count, count)
+		if symbol.Flags.Has(ast.MustStartWithCapitalLetterForJSX) {
+			atomic.StoreUint32(&slot.needsCapitalForJSX, 1)
+		}
+		return
+	}
+
+	// If it's a top-level symbol, defer it to later since we have
+	// to allocate slots for these in serial instead of in parallel
+	*topLevelSymbols = append(*topLevelSymbols, StableSymbolCount{
+		StableSourceIndex: stableSourceIndices[ref.SourceIndex],
+		Ref:               ref,
+		Count:             count,
+	})
+}
+
+// The parallel part of the symbol count accumulation algorithm above processes
+// nested symbols and generates an array of top-level symbols to process later.
+// After the parallel part has finished, that array of top-level symbols is passed
+// to this function which processes them in serial.
+func (r *MinifyRenamer) AllocateTopLevelSymbolSlots(topLevelSymbols StableSymbolCountArray) {
+	for _, stable := range topLevelSymbols {
+		symbol := r.symbols.Get(stable.Ref)
+		slots := &r.slots[symbol.SlotNamespace()]
+		if i, ok := r.topLevelSymbolToSlot[stable.Ref]; ok {
+			slot := &(*slots)[i]
+			slot.count += stable.Count
+			if symbol.Flags.Has(ast.MustStartWithCapitalLetterForJSX) {
+				slot.needsCapitalForJSX = 1
+			}
+		} else {
+			needsCapitalForJSX := uint32(0)
+			if symbol.Flags.Has(ast.MustStartWithCapitalLetterForJSX) {
+				needsCapitalForJSX = 1
+			}
+			i = uint32(len(*slots))
+			*slots = append(*slots, symbolSlot{
+				count:              stable.Count,
+				needsCapitalForJSX: needsCapitalForJSX,
+			})
+			r.topLevelSymbolToSlot[stable.Ref] = i
+		}
+	}
+}
+
+func (r *MinifyRenamer) AssignNamesByFrequency(minifier *ast.NameMinifier) {
+	for ns, slots := range r.slots {
+		// Sort symbols by count
+		sorted := make(slotAndCountArray, len(slots))
+		for i, item := range slots {
+			sorted[i] = slotAndCount{slot: uint32(i), count: item.count}
+		}
+		sort.Sort(sorted)
+
+		// Assign names to symbols
+		nextName := 0
+		for _, data := range sorted {
+			slot := &slots[data.slot]
+			name := minifier.NumberToMinifiedName(nextName)
+			nextName++
+
+			// Make sure we never generate a reserved name. We only have to worry
+			// about collisions with reserved identifiers for normal symbols, and we
+			// only have to worry about collisions with keywords for labels. We do
+			// not have to worry about either for private names because they start
+			// with a "#" character.
+			switch ast.SlotNamespace(ns) {
+			case ast.SlotDefault:
+				for r.reservedNames[name] != 0 {
+					name = minifier.NumberToMinifiedName(nextName)
+					nextName++
+				}
+
+				// Make sure names of symbols used in JSX elements start with a capital letter
+				if slot.needsCapitalForJSX != 0 {
+					for name[0] >= 'a' && name[0] <= 'z' {
+						name = minifier.NumberToMinifiedName(nextName)
+						nextName++
+					}
+				}
+
+			case ast.SlotLabel:
+				for js_lexer.Keywords[name] != 0 {
+					name = minifier.NumberToMinifiedName(nextName)
+					nextName++
+				}
+			}
+
+			// Private names must be prefixed with "#"
+			if ast.SlotNamespace(ns) == ast.SlotPrivateName {
+				name = "#" + name
+			}
+
+			slot.name = name
+		}
+	}
+}
+
+// Returns the number of nested slots
+func AssignNestedScopeSlots(moduleScope *js_ast.Scope, symbols []ast.Symbol) (slotCounts ast.SlotCounts) {
+	// Temporarily set the nested scope slots of top-level symbols to valid so
+	// they aren't renamed in nested scopes. This prevents us from accidentally
+	// assigning nested scope slots to variables declared using "var" in a nested
+	// scope that are actually hoisted up to the module scope to become a top-
+	// level symbol.
+	validSlot := ast.MakeIndex32(1)
+	for _, member := range moduleScope.Members {
+		symbols[member.Ref.InnerIndex].NestedScopeSlot = validSlot
+	}
+	for _, ref := range moduleScope.Generated {
+		symbols[ref.InnerIndex].NestedScopeSlot = validSlot
+	}
+
+	// Assign nested scope slots independently for each nested scope
+	for _, child := range moduleScope.Children {
+		slotCounts.UnionMax(assignNestedScopeSlotsHelper(child, symbols, ast.SlotCounts{}))
+	}
+
+	// Then set the nested scope slots of top-level symbols back to zero. Top-
+	// level symbols are not supposed to have nested scope slots.
+	for _, member := range moduleScope.Members {
+		symbols[member.Ref.InnerIndex].NestedScopeSlot = ast.Index32{}
+	}
+	for _, ref := range moduleScope.Generated {
+		symbols[ref.InnerIndex].NestedScopeSlot = ast.Index32{}
+	}
+	return
+}
+
+func assignNestedScopeSlotsHelper(scope *js_ast.Scope, symbols []ast.Symbol, slot ast.SlotCounts) ast.SlotCounts {
+	// Sort member map keys for determinism
+	sortedMembers := make([]int, 0, len(scope.Members))
+	for _, member := range scope.Members {
+		sortedMembers = append(sortedMembers, int(member.Ref.InnerIndex))
+	}
+	sort.Ints(sortedMembers)
+
+	// Assign slots for this scope's symbols. Only do this if the slot is
+	// not already assigned. Nested scopes have copies of symbols from parent
+	// scopes and we want to use the slot from the parent scope, not child scopes.
+	for _, innerIndex := range sortedMembers {
+		symbol := &symbols[innerIndex]
+		if ns := symbol.SlotNamespace(); ns != ast.SlotMustNotBeRenamed && !symbol.NestedScopeSlot.IsValid() {
+			symbol.NestedScopeSlot = ast.MakeIndex32(slot[ns])
+			slot[ns]++
+		}
+	}
+	for _, ref := range scope.Generated {
+		symbol := &symbols[ref.InnerIndex]
+		if ns := symbol.SlotNamespace(); ns != ast.SlotMustNotBeRenamed && !symbol.NestedScopeSlot.IsValid() {
+			symbol.NestedScopeSlot = ast.MakeIndex32(slot[ns])
+			slot[ns]++
+		}
+	}
+
+	// Labels are always declared in a nested scope, so we don't need to check.
+	if scope.Label.Ref != ast.InvalidRef {
+		symbol := &symbols[scope.Label.Ref.InnerIndex]
+		symbol.NestedScopeSlot = ast.MakeIndex32(slot[ast.SlotLabel])
+		slot[ast.SlotLabel]++
+	}
+
+	// Assign slots for the symbols of child scopes
+	slotCounts := slot
+	for _, child := range scope.Children {
+		slotCounts.UnionMax(assignNestedScopeSlotsHelper(child, symbols, slot))
+	}
+	return slotCounts
+}
+
+type slotAndCount struct {
+	slot  uint32
+	count uint32
+}
+
+// This type is just so we can use Go's native sort function
+type slotAndCountArray []slotAndCount
+
+func (a slotAndCountArray) Len() int          { return len(a) }
+func (a slotAndCountArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+func (a slotAndCountArray) Less(i int, j int) bool {
+	ai, aj := a[i], a[j]
+	return ai.count > aj.count || (ai.count == aj.count && ai.slot < aj.slot)
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// NumberRenamer
+
+type NumberRenamer struct {
+	symbols ast.SymbolMap
+	root    numberScope
+	names   [][]string
+}
+
+func NewNumberRenamer(symbols ast.SymbolMap, reservedNames map[string]uint32) *NumberRenamer {
+	return &NumberRenamer{
+		symbols: symbols,
+		names:   make([][]string, len(symbols.SymbolsForSource)),
+		root:    numberScope{nameCounts: reservedNames},
+	}
+}
+
+func (r *NumberRenamer) NameForSymbol(ref ast.Ref) string {
+	ref = ast.FollowSymbols(r.symbols, ref)
+	if inner := r.names[ref.SourceIndex]; inner != nil {
+		if name := inner[ref.InnerIndex]; name != "" {
+			return name
+		}
+	}
+	return r.symbols.Get(ref).OriginalName
+}
+
+func (r *NumberRenamer) AddTopLevelSymbol(ref ast.Ref) {
+	r.assignName(&r.root, ref)
+}
+
+func (r *NumberRenamer) assignName(scope *numberScope, ref ast.Ref) {
+	ref = ast.FollowSymbols(r.symbols, ref)
+
+	// Don't rename the same symbol more than once
+	inner := r.names[ref.SourceIndex]
+	if inner != nil && inner[ref.InnerIndex] != "" {
+		return
+	}
+
+	// Don't rename unbound symbols, symbols marked as reserved names, labels, or private names
+	symbol := r.symbols.Get(ref)
+	ns := symbol.SlotNamespace()
+	if ns != ast.SlotDefault && ns != ast.SlotPrivateName {
+		return
+	}
+
+	// Make sure names of symbols used in JSX elements start with a capital letter
+	originalName := symbol.OriginalName
+	if symbol.Flags.Has(ast.MustStartWithCapitalLetterForJSX) {
+		if first := rune(originalName[0]); first >= 'a' && first <= 'z' {
+			originalName = fmt.Sprintf("%c%s", first+('A'-'a'), originalName[1:])
+		}
+	}
+
+	// Compute a new name
+	name := scope.findUnusedName(originalName, ns)
+
+	// Store the new name
+	if inner == nil {
+		// Note: This should not be a data race even though this method is run from
+		// multiple threads. The parallel part only looks at symbols defined in
+		// nested scopes, and those can only ever be accessed from within the file.
+		// References to those symbols should never spread across files.
+		//
+		// While we could avoid the data race by densely preallocating the entire
+		// "names" array ahead of time, that will waste a lot more memory for
+		// builds that make heavy use of code splitting and have many chunks. Doing
+		// things lazily like this means we use less memory but still stay safe.
+		inner = make([]string, len(r.symbols.SymbolsForSource[ref.SourceIndex]))
+		r.names[ref.SourceIndex] = inner
+	}
+	inner[ref.InnerIndex] = name
+}
+
+func (r *NumberRenamer) assignNamesInScope(scope *js_ast.Scope, sourceIndex uint32, parent *numberScope, sorted *[]int) *numberScope {
+	s := &numberScope{parent: parent, nameCounts: make(map[string]uint32)}
+
+	if len(scope.Members) > 0 {
+		// Sort member map keys for determinism, reusing a shared memory buffer
+		*sorted = (*sorted)[:0]
+		for _, member := range scope.Members {
+			*sorted = append(*sorted, int(member.Ref.InnerIndex))
+		}
+		sort.Ints(*sorted)
+
+		// Rename all user-defined symbols in this scope
+		for _, innerIndex := range *sorted {
+			r.assignName(s, ast.Ref{SourceIndex: sourceIndex, InnerIndex: uint32(innerIndex)})
+		}
+	}
+
+	// Also rename all generated symbols in this scope
+	for _, ref := range scope.Generated {
+		r.assignName(s, ref)
+	}
+
+	return s
+}
+
+func (r *NumberRenamer) assignNamesRecursive(scope *js_ast.Scope, sourceIndex uint32, parent *numberScope, sorted *[]int) {
+	// For performance in extreme cases (e.g. 10,000 nested scopes), traversing
+	// through singly-nested scopes uses iteration instead of recursion
+	for {
+		if len(scope.Members) > 0 || len(scope.Generated) > 0 {
+			// For performance in extreme cases (e.g. 10,000 nested scopes), only
+			// allocate a scope when it's necessary. I'm not quite sure why allocating
+			// one scope per level is so much overhead. It's not that many objects.
+			// Or at least there are already that many objects for the AST that we're
+			// traversing, so I don't know why 80% of the time in these extreme cases
+			// is taken by this function (if we don't avoid this allocation).
+			parent = r.assignNamesInScope(scope, sourceIndex, parent, sorted)
+		}
+		if children := scope.Children; len(children) == 1 {
+			scope = children[0]
+		} else {
+			break
+		}
+	}
+
+	// Symbols in child scopes may also have to be renamed to avoid conflicts
+	for _, child := range scope.Children {
+		r.assignNamesRecursive(child, sourceIndex, parent, sorted)
+	}
+}
+
+func (r *NumberRenamer) AssignNamesByScope(nestedScopes map[uint32][]*js_ast.Scope) {
+	waitGroup := sync.WaitGroup{}
+	waitGroup.Add(len(nestedScopes))
+
+	// Rename nested scopes from separate files in parallel
+	for sourceIndex, scopes := range nestedScopes {
+		go func(sourceIndex uint32, scopes []*js_ast.Scope) {
+			var sorted []int
+			for _, scope := range scopes {
+				r.assignNamesRecursive(scope, sourceIndex, &r.root, &sorted)
+			}
+			waitGroup.Done()
+		}(sourceIndex, scopes)
+	}
+
+	waitGroup.Wait()
+}
+
+type numberScope struct {
+	parent *numberScope
+
+	// This is used as a set of used names in this scope. This also maps the name
+	// to the number of times the name has experienced a collision. When a name
+	// collides with an already-used name, we need to rename it. This is done by
+	// incrementing a number at the end until the name is unused. We save the
+	// count here so that subsequent collisions can start counting from where the
+	// previous collision ended instead of having to start counting from 1.
+	nameCounts map[string]uint32
+}
+
+type nameUse uint8
+
+const (
+	nameUnused nameUse = iota
+	nameUsed
+	nameUsedInSameScope
+)
+
+func (s *numberScope) findNameUse(name string) nameUse {
+	original := s
+	for {
+		if _, ok := s.nameCounts[name]; ok {
+			if s == original {
+				return nameUsedInSameScope
+			}
+			return nameUsed
+		}
+		s = s.parent
+		if s == nil {
+			return nameUnused
+		}
+	}
+}
+
+func (s *numberScope) findUnusedName(name string, ns ast.SlotNamespace) string {
+	// We may not have a valid identifier if this is an internally-constructed name
+	if ns == ast.SlotPrivateName {
+		if id := name[1:]; !js_ast.IsIdentifier(id) {
+			name = js_ast.ForceValidIdentifier("#", id)
+		}
+	} else {
+		if !js_ast.IsIdentifier(name) {
+			name = js_ast.ForceValidIdentifier("", name)
+		}
+	}
+
+	if use := s.findNameUse(name); use != nameUnused {
+		// If the name is already in use, generate a new name by appending a number
+		tries := uint32(1)
+		if use == nameUsedInSameScope {
+			// To avoid O(n^2) behavior, the number must start off being the number
+			// that we used last time there was a collision with this name. Otherwise
+			// if there are many collisions with the same name, each name collision
+			// would have to increment the counter past all previous name collisions
+			// which is a O(n^2) time algorithm. Only do this if this symbol comes
+			// from the same scope as the previous one since sibling scopes can reuse
+			// the same name without problems.
+			tries = s.nameCounts[name]
+		}
+		prefix := name
+
+		// Keep incrementing the number until the name is unused
+		for {
+			tries++
+			name = prefix + strconv.Itoa(int(tries))
+
+			// Make sure this new name is unused
+			if s.findNameUse(name) == nameUnused {
+				// Store the count so we can start here next time instead of starting
+				// from 1. This means we avoid O(n^2) behavior.
+				if use == nameUsedInSameScope {
+					s.nameCounts[prefix] = tries
+				}
+				break
+			}
+		}
+	}
+
+	// Each name starts off with a count of 1 so that the first collision with
+	// "name" is called "name2"
+	s.nameCounts[name] = 1
+	return name
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// ExportRenamer
+
+type ExportRenamer struct {
+	used  map[string]uint32
+	count int
+}
+
+func (r *ExportRenamer) NextRenamedName(name string) string {
+	if r.used == nil {
+		r.used = make(map[string]uint32)
+	}
+	if tries, ok := r.used[name]; ok {
+		prefix := name
+		for {
+			tries++
+			name = prefix + strconv.Itoa(int(tries))
+			if _, ok := r.used[name]; !ok {
+				break
+			}
+		}
+		r.used[name] = tries
+	} else {
+		r.used[name] = 1
+	}
+	return name
+}
+
+func (r *ExportRenamer) NextMinifiedName() string {
+	name := ast.DefaultNameMinifierJS.NumberToMinifiedName(r.count)
+	r.count++
+	return name
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/resolver/dataurl.go b/source/vendor/github.com/evanw/esbuild/internal/resolver/dataurl.go
new file mode 100644
index 0000000..c46e607
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/resolver/dataurl.go
@@ -0,0 +1,76 @@
+package resolver
+
+import (
+	"encoding/base64"
+	"fmt"
+	"net/url"
+	"strings"
+)
+
+type DataURL struct {
+	mimeType string
+	data     string
+	isBase64 bool
+}
+
+func ParseDataURL(url string) (parsed DataURL, ok bool) {
+	if strings.HasPrefix(url, "data:") {
+		if comma := strings.IndexByte(url, ','); comma != -1 {
+			parsed.mimeType = url[len("data:"):comma]
+			parsed.data = url[comma+1:]
+			if strings.HasSuffix(parsed.mimeType, ";base64") {
+				parsed.mimeType = parsed.mimeType[:len(parsed.mimeType)-len(";base64")]
+				parsed.isBase64 = true
+			}
+			ok = true
+		}
+	}
+	return
+}
+
+type MIMEType uint8
+
+const (
+	MIMETypeUnsupported MIMEType = iota
+	MIMETypeTextCSS
+	MIMETypeTextJavaScript
+	MIMETypeApplicationJSON
+)
+
+func (parsed DataURL) DecodeMIMEType() MIMEType {
+	// Remove things like ";charset=utf-8"
+	mimeType := parsed.mimeType
+	if semicolon := strings.IndexByte(mimeType, ';'); semicolon != -1 {
+		mimeType = mimeType[:semicolon]
+	}
+
+	// Hard-code a few supported types
+	switch mimeType {
+	case "text/css":
+		return MIMETypeTextCSS
+	case "text/javascript":
+		return MIMETypeTextJavaScript
+	case "application/json":
+		return MIMETypeApplicationJSON
+	default:
+		return MIMETypeUnsupported
+	}
+}
+
+func (parsed DataURL) DecodeData() (string, error) {
+	// Try to read base64 data
+	if parsed.isBase64 {
+		bytes, err := base64.StdEncoding.DecodeString(parsed.data)
+		if err != nil {
+			return "", fmt.Errorf("could not decode base64 data: %s", err.Error())
+		}
+		return string(bytes), nil
+	}
+
+	// Try to read percent-escaped data
+	content, err := url.PathUnescape(parsed.data)
+	if err != nil {
+		return "", fmt.Errorf("could not decode percent-escaped data: %s", err.Error())
+	}
+	return content, nil
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/resolver/package_json.go b/source/vendor/github.com/evanw/esbuild/internal/resolver/package_json.go
new file mode 100644
index 0000000..068acde
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/resolver/package_json.go
@@ -0,0 +1,1462 @@
+package resolver
+
+import (
+	"fmt"
+	"net/url"
+	"path"
+	"regexp"
+	"sort"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_lexer"
+	"github.com/evanw/esbuild/internal/js_parser"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type packageJSON struct {
+	name           string
+	mainFields     map[string]mainField
+	moduleTypeData js_ast.ModuleTypeData
+
+	// "TypeScript will first check whether package.json contains a "tsconfig"
+	// field, and if it does, TypeScript will try to load a configuration file
+	// from that field. If neither exists, TypeScript will try to read from a
+	// tsconfig.json at the root."
+	//
+	// See: https://www.typescriptlang.org/docs/handbook/release-notes/typescript-3-2.html#tsconfigjson-inheritance-via-nodejs-packages
+	tsconfig string
+
+	// Present if the "browser" field is present. This field is intended to be
+	// used by bundlers and lets you redirect the paths of certain 3rd-party
+	// modules that don't work in the browser to other modules that shim that
+	// functionality. That way you don't have to rewrite the code for those 3rd-
+	// party modules. For example, you might remap the native "util" node module
+	// to something like https://www.npmjs.com/package/util so it works in the
+	// browser.
+	//
+	// This field contains the original mapping object in "package.json". Mapping
+	// to a nil path indicates that the module is disabled. As far as I can
+	// tell, the official spec is an abandoned GitHub repo hosted by a user account:
+	// https://github.com/defunctzombie/package-browser-field-spec. The npm docs
+	// say almost nothing: https://docs.npmjs.com/files/package.json.
+	//
+	// Note that the non-package "browser" map has to be checked twice to match
+	// Webpack's behavior: once before resolution and once after resolution. It
+	// leads to some unintuitive failure cases that we must emulate around missing
+	// file extensions:
+	//
+	// * Given the mapping "./no-ext": "./no-ext-browser.js" the query "./no-ext"
+	//   should match but the query "./no-ext.js" should NOT match.
+	//
+	// * Given the mapping "./ext.js": "./ext-browser.js" the query "./ext.js"
+	//   should match and the query "./ext" should ALSO match.
+	//
+	browserMap map[string]*string
+
+	// If this is non-nil, each entry in this map is the absolute path of a file
+	// with side effects. Any entry not in this map should be considered to have
+	// no side effects, which means import statements for these files can be
+	// removed if none of the imports are used. This is a convention from Webpack:
+	// https://webpack.js.org/guides/tree-shaking/.
+	//
+	// Note that if a file is included, all statements that can't be proven to be
+	// free of side effects must be included. This convention does not say
+	// anything about whether any statements within the file have side effects or
+	// not.
+	sideEffectsMap     map[string]bool
+	sideEffectsRegexps []*regexp.Regexp
+	sideEffectsData    *SideEffectsData
+
+	// This represents the "imports" field in this package.json file.
+	importsMap *pjMap
+
+	// This represents the "exports" field in this package.json file.
+	exportsMap *pjMap
+
+	source logger.Source
+}
+
+type mainField struct {
+	relPath string
+	keyLoc  logger.Loc
+}
+
+type browserPathKind uint8
+
+const (
+	absolutePathKind browserPathKind = iota
+	packagePathKind
+)
+
+func (r resolverQuery) checkBrowserMap(resolveDirInfo *dirInfo, inputPath string, kind browserPathKind) (remapped *string, ok bool) {
+	// This only applies if the current platform is "browser"
+	if r.options.Platform != config.PlatformBrowser {
+		return nil, false
+	}
+
+	// There must be an enclosing directory with a "package.json" file with a "browser" map
+	if resolveDirInfo.enclosingBrowserScope == nil {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("No \"browser\" map found in directory %q", resolveDirInfo.absPath))
+		}
+		return nil, false
+	}
+
+	packageJSON := resolveDirInfo.enclosingBrowserScope.packageJSON
+	browserMap := packageJSON.browserMap
+
+	type implicitExtensions uint8
+
+	const (
+		includeImplicitExtensions implicitExtensions = iota
+		skipImplicitExtensions
+	)
+
+	checkPath := func(pathToCheck string, implicitExtensions implicitExtensions) bool {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Checking for %q in the \"browser\" map in %q",
+				pathToCheck, packageJSON.source.KeyPath.Text))
+		}
+
+		// Check for equality
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("  Checking for %q", pathToCheck))
+		}
+		remapped, ok = browserMap[pathToCheck]
+		if ok {
+			inputPath = pathToCheck
+			return true
+		}
+
+		// If that failed, try adding implicit extensions
+		if implicitExtensions == includeImplicitExtensions {
+			for _, ext := range r.options.ExtensionOrder {
+				extPath := pathToCheck + ext
+				if r.debugLogs != nil {
+					r.debugLogs.addNote(fmt.Sprintf("  Checking for %q", extPath))
+				}
+				remapped, ok = browserMap[extPath]
+				if ok {
+					inputPath = extPath
+					return true
+				}
+			}
+		}
+
+		// If that failed, try assuming this is a directory and looking for an "index" file
+		indexPath := path.Join(pathToCheck, "index")
+		if IsPackagePath(indexPath) && !IsPackagePath(pathToCheck) {
+			indexPath = "./" + indexPath
+		}
+
+		// Check for equality
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("  Checking for %q", indexPath))
+		}
+		remapped, ok = browserMap[indexPath]
+		if ok {
+			inputPath = indexPath
+			return true
+		}
+
+		// If that failed, try adding implicit extensions
+		if implicitExtensions == includeImplicitExtensions {
+			for _, ext := range r.options.ExtensionOrder {
+				extPath := indexPath + ext
+				if r.debugLogs != nil {
+					r.debugLogs.addNote(fmt.Sprintf("  Checking for %q", extPath))
+				}
+				remapped, ok = browserMap[extPath]
+				if ok {
+					inputPath = extPath
+					return true
+				}
+			}
+		}
+
+		return false
+	}
+
+	// Turn absolute paths into paths relative to the "browser" map location
+	if kind == absolutePathKind {
+		relPath, ok := r.fs.Rel(resolveDirInfo.enclosingBrowserScope.absPath, inputPath)
+		if !ok {
+			return nil, false
+		}
+		inputPath = strings.ReplaceAll(relPath, "\\", "/")
+	}
+
+	if inputPath == "." {
+		// No bundler supports remapping ".", so we don't either
+		return nil, false
+	}
+
+	// First try the import path as a package path
+	if !checkPath(inputPath, includeImplicitExtensions) && IsPackagePath(inputPath) {
+		// If a package path didn't work, try the import path as a relative path
+		switch kind {
+		case absolutePathKind:
+			checkPath("./"+inputPath, includeImplicitExtensions)
+
+		case packagePathKind:
+			// Browserify allows a browser map entry of "./pkg" to override a package
+			// path of "require('pkg')". This is weird, and arguably a bug. But we
+			// replicate this bug for compatibility. However, Browserify only allows
+			// this within the same package. It does not allow such an entry in a
+			// parent package to override this in a child package. So this behavior
+			// is disallowed if there is a "node_modules" folder in between the child
+			// package and the parent package.
+			isInSamePackage := true
+			for info := resolveDirInfo; info != nil && info != resolveDirInfo.enclosingBrowserScope; info = info.parent {
+				if info.isNodeModules {
+					isInSamePackage = false
+					break
+				}
+			}
+			if isInSamePackage {
+				relativePathPrefix := "./"
+
+				// Use the relative path from the file containing the import path to the
+				// enclosing package.json file. This includes any subdirectories within the
+				// package if there are any.
+				if relPath, ok := r.fs.Rel(resolveDirInfo.enclosingBrowserScope.absPath, resolveDirInfo.absPath); ok && relPath != "." {
+					relativePathPrefix += strings.ReplaceAll(relPath, "\\", "/") + "/"
+				}
+
+				// Browserify lets "require('pkg')" match "./pkg" but not "./pkg.js".
+				// So don't add implicit extensions specifically in this place so we
+				// match Browserify's behavior.
+				checkPath(relativePathPrefix+inputPath, skipImplicitExtensions)
+			}
+		}
+	}
+
+	if r.debugLogs != nil {
+		if ok {
+			if remapped == nil {
+				r.debugLogs.addNote(fmt.Sprintf("Found %q marked as disabled", inputPath))
+			} else {
+				r.debugLogs.addNote(fmt.Sprintf("Found %q mapping to %q", inputPath, *remapped))
+			}
+		} else {
+			r.debugLogs.addNote(fmt.Sprintf("Failed to find %q", inputPath))
+		}
+	}
+	return
+}
+
+func (r resolverQuery) parsePackageJSON(inputPath string) *packageJSON {
+	packageJSONPath := r.fs.Join(inputPath, "package.json")
+	contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, packageJSONPath)
+	if r.debugLogs != nil && originalError != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", packageJSONPath, originalError.Error()))
+	}
+	if err != nil {
+		r.log.AddError(nil, logger.Range{},
+			fmt.Sprintf("Cannot read file %q: %s",
+				PrettyPath(r.fs, logger.Path{Text: packageJSONPath, Namespace: "file"}), err.Error()))
+		return nil
+	}
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("The file %q exists", packageJSONPath))
+	}
+
+	keyPath := logger.Path{Text: packageJSONPath, Namespace: "file"}
+	jsonSource := logger.Source{
+		KeyPath:    keyPath,
+		PrettyPath: PrettyPath(r.fs, keyPath),
+		Contents:   contents,
+	}
+	tracker := logger.MakeLineColumnTracker(&jsonSource)
+
+	json, ok := r.caches.JSONCache.Parse(r.log, jsonSource, js_parser.JSONOptions{})
+	if !ok {
+		return nil
+	}
+
+	packageJSON := &packageJSON{
+		source:     jsonSource,
+		mainFields: make(map[string]mainField),
+	}
+
+	// Read the "name" field
+	if nameJSON, _, ok := getProperty(json, "name"); ok {
+		if nameValue, ok := getString(nameJSON); ok {
+			packageJSON.name = nameValue
+		}
+	}
+
+	// Read the "type" field
+	if typeJSON, typeKeyLoc, ok := getProperty(json, "type"); ok {
+		if typeValue, ok := getString(typeJSON); ok {
+			switch typeValue {
+			case "commonjs":
+				packageJSON.moduleTypeData = js_ast.ModuleTypeData{
+					Type:   js_ast.ModuleCommonJS_PackageJSON,
+					Source: &packageJSON.source,
+					Range:  jsonSource.RangeOfString(typeJSON.Loc),
+				}
+			case "module":
+				packageJSON.moduleTypeData = js_ast.ModuleTypeData{
+					Type:   js_ast.ModuleESM_PackageJSON,
+					Source: &packageJSON.source,
+					Range:  jsonSource.RangeOfString(typeJSON.Loc),
+				}
+			default:
+				notes := []logger.MsgData{{Text: "The \"type\" field must be set to either \"commonjs\" or \"module\"."}}
+				kind := logger.Warning
+
+				// If someone does something like "type": "./index.d.ts" then they
+				// likely meant "types" instead of "type". Customize the message
+				// for this and hide it if it's inside a published npm package.
+				if strings.HasSuffix(typeValue, ".d.ts") {
+					notes[0] = tracker.MsgData(jsonSource.RangeOfString(typeKeyLoc),
+						"TypeScript type declarations use the \"types\" field, not the \"type\" field:")
+					notes[0].Location.Suggestion = "\"types\""
+					if helpers.IsInsideNodeModules(jsonSource.KeyPath.Text) {
+						kind = logger.Debug
+					}
+				}
+
+				r.log.AddIDWithNotes(logger.MsgID_PackageJSON_InvalidType, kind, &tracker, jsonSource.RangeOfString(typeJSON.Loc),
+					fmt.Sprintf("%q is not a valid value for the \"type\" field", typeValue),
+					notes)
+			}
+		} else {
+			r.log.AddID(logger.MsgID_PackageJSON_InvalidType, logger.Warning, &tracker, logger.Range{Loc: typeJSON.Loc},
+				"The value for \"type\" must be a string")
+		}
+	}
+
+	// Read the "tsconfig" field
+	if tsconfigJSON, _, ok := getProperty(json, "tsconfig"); ok {
+		if tsconfigValue, ok := getString(tsconfigJSON); ok {
+			packageJSON.tsconfig = tsconfigValue
+		}
+	}
+
+	// Read the "main" fields
+	mainFields := r.options.MainFields
+	if mainFields == nil {
+		mainFields = defaultMainFields[r.options.Platform]
+	}
+	for _, field := range mainFields {
+		if mainJSON, mainLoc, ok := getProperty(json, field); ok {
+			if main, ok := getString(mainJSON); ok && main != "" {
+				packageJSON.mainFields[field] = mainField{keyLoc: mainLoc, relPath: main}
+			}
+		}
+	}
+	for _, field := range mainFieldsForFailure {
+		if _, ok := packageJSON.mainFields[field]; !ok {
+			if mainJSON, mainLoc, ok := getProperty(json, field); ok {
+				if main, ok := getString(mainJSON); ok && main != "" {
+					packageJSON.mainFields[field] = mainField{keyLoc: mainLoc, relPath: main}
+				}
+			}
+		}
+	}
+
+	// Read the "browser" property, but only when targeting the browser
+	if browserJSON, _, ok := getProperty(json, "browser"); ok && r.options.Platform == config.PlatformBrowser {
+		// We both want the ability to have the option of CJS vs. ESM and the
+		// option of having node vs. browser. The way to do this is to use the
+		// object literal form of the "browser" field like this:
+		//
+		//   "main": "dist/index.node.cjs.js",
+		//   "module": "dist/index.node.esm.js",
+		//   "browser": {
+		//     "./dist/index.node.cjs.js": "./dist/index.browser.cjs.js",
+		//     "./dist/index.node.esm.js": "./dist/index.browser.esm.js"
+		//   },
+		//
+		if browser, ok := browserJSON.Data.(*js_ast.EObject); ok {
+			// The value is an object
+			browserMap := make(map[string]*string)
+
+			// Remap all files in the browser field
+			for _, prop := range browser.Properties {
+				if key, ok := getString(prop.Key); ok && prop.ValueOrNil.Data != nil {
+					if value, ok := getString(prop.ValueOrNil); ok {
+						// If this is a string, it's a replacement package
+						browserMap[key] = &value
+					} else if value, ok := getBool(prop.ValueOrNil); ok {
+						// If this is false, it means the package is disabled
+						if !value {
+							browserMap[key] = nil
+						}
+					} else {
+						r.log.AddID(logger.MsgID_PackageJSON_InvalidBrowser, logger.Warning, &tracker, logger.Range{Loc: prop.ValueOrNil.Loc},
+							"Each \"browser\" mapping must be a string or a boolean")
+					}
+				}
+			}
+
+			packageJSON.browserMap = browserMap
+		}
+	}
+
+	// Read the "sideEffects" property
+	if sideEffectsJSON, sideEffectsLoc, ok := getProperty(json, "sideEffects"); ok {
+		switch data := sideEffectsJSON.Data.(type) {
+		case *js_ast.EBoolean:
+			if !data.Value {
+				// Make an empty map for "sideEffects: false", which indicates all
+				// files in this module can be considered to not have side effects.
+				packageJSON.sideEffectsMap = make(map[string]bool)
+				packageJSON.sideEffectsData = &SideEffectsData{
+					IsSideEffectsArrayInJSON: false,
+					Source:                   &jsonSource,
+					Range:                    jsonSource.RangeOfString(sideEffectsLoc),
+				}
+			}
+
+		case *js_ast.EArray:
+			// The "sideEffects: []" format means all files in this module but not in
+			// the array can be considered to not have side effects.
+			packageJSON.sideEffectsMap = make(map[string]bool)
+			packageJSON.sideEffectsData = &SideEffectsData{
+				IsSideEffectsArrayInJSON: true,
+				Source:                   &jsonSource,
+				Range:                    jsonSource.RangeOfString(sideEffectsLoc),
+			}
+			for _, itemJSON := range data.Items {
+				item, ok := itemJSON.Data.(*js_ast.EString)
+				if !ok || item.Value == nil {
+					r.log.AddID(logger.MsgID_PackageJSON_InvalidSideEffects, logger.Warning, &tracker, logger.Range{Loc: itemJSON.Loc},
+						"Expected string in array for \"sideEffects\"")
+					continue
+				}
+
+				// Reference: https://github.com/webpack/webpack/blob/ed175cd22f89eb9fecd0a70572a3fd0be028e77c/lib/optimize/SideEffectsFlagPlugin.js
+				pattern := helpers.UTF16ToString(item.Value)
+				if !strings.ContainsRune(pattern, '/') {
+					pattern = "**/" + pattern
+				}
+				absPattern := r.fs.Join(inputPath, pattern)
+				absPattern = strings.ReplaceAll(absPattern, "\\", "/") // Avoid problems with Windows-style slashes
+				re, hadWildcard := globstarToEscapedRegexp(absPattern)
+
+				// Wildcard patterns require more expensive matching
+				if hadWildcard {
+					packageJSON.sideEffectsRegexps = append(packageJSON.sideEffectsRegexps, regexp.MustCompile(re))
+					continue
+				}
+
+				// Normal strings can be matched with a map lookup
+				packageJSON.sideEffectsMap[absPattern] = true
+			}
+
+		default:
+			r.log.AddID(logger.MsgID_PackageJSON_InvalidSideEffects, logger.Warning, &tracker, logger.Range{Loc: sideEffectsJSON.Loc},
+				"The value for \"sideEffects\" must be a boolean or an array")
+		}
+	}
+
+	// Read the "imports" map
+	if importsJSON, importsLoc, ok := getProperty(json, "imports"); ok {
+		if importsMap := parseImportsExportsMap(jsonSource, r.log, importsJSON, "imports", importsLoc); importsMap != nil {
+			if importsMap.root.kind != pjObject {
+				r.log.AddID(logger.MsgID_PackageJSON_InvalidImportsOrExports, logger.Warning, &tracker, importsMap.root.firstToken,
+					"The value for \"imports\" must be an object")
+			}
+			packageJSON.importsMap = importsMap
+		}
+	}
+
+	// Read the "exports" map
+	if exportsJSON, exportsLoc, ok := getProperty(json, "exports"); ok {
+		if exportsMap := parseImportsExportsMap(jsonSource, r.log, exportsJSON, "exports", exportsLoc); exportsMap != nil {
+			packageJSON.exportsMap = exportsMap
+		}
+	}
+
+	return packageJSON
+}
+
+// Reference: https://github.com/fitzgen/glob-to-regexp/blob/2abf65a834259c6504ed3b80e85f893f8cd99127/index.js
+func globstarToEscapedRegexp(glob string) (string, bool) {
+	sb := strings.Builder{}
+	sb.WriteByte('^')
+	hadWildcard := false
+	n := len(glob)
+
+	for i := 0; i < n; i++ {
+		c := glob[i]
+		switch c {
+		case '\\', '^', '$', '.', '+', '|', '(', ')', '[', ']', '{', '}':
+			sb.WriteByte('\\')
+			sb.WriteByte(c)
+
+		case '?':
+			sb.WriteByte('.')
+			hadWildcard = true
+
+		case '*':
+			// Move over all consecutive "*"'s.
+			// Also store the previous and next characters
+			prevChar := -1
+			if i > 0 {
+				prevChar = int(glob[i-1])
+			}
+			starCount := 1
+			for i+1 < n && glob[i+1] == '*' {
+				starCount++
+				i++
+			}
+			nextChar := -1
+			if i+1 < n {
+				nextChar = int(glob[i+1])
+			}
+
+			// Determine if this is a globstar segment
+			isGlobstar := starCount > 1 && // multiple "*"'s
+				(prevChar == '/' || prevChar == -1) && // from the start of the segment
+				(nextChar == '/' || nextChar == -1) // to the end of the segment
+
+			if isGlobstar {
+				// It's a globstar, so match zero or more path segments
+				sb.WriteString("(?:[^/]*(?:/|$))*")
+				i++ // Move over the "/"
+			} else {
+				// It's not a globstar, so only match one path segment
+				sb.WriteString("[^/]*")
+			}
+
+			hadWildcard = true
+
+		default:
+			sb.WriteByte(c)
+		}
+	}
+
+	sb.WriteByte('$')
+	return sb.String(), hadWildcard
+}
+
+// Reference: https://nodejs.org/api/esm.html#esm_resolver_algorithm_specification
+type pjMap struct {
+	root           pjEntry
+	propertyKey    string
+	propertyKeyLoc logger.Loc
+}
+
+type pjKind uint8
+
+const (
+	pjNull pjKind = iota
+	pjString
+	pjArray
+	pjObject
+	pjInvalid
+)
+
+type pjEntry struct {
+	strData       string
+	arrData       []pjEntry
+	mapData       []pjMapEntry // Can't be a "map" because order matters
+	expansionKeys expansionKeysArray
+	firstToken    logger.Range
+	kind          pjKind
+}
+
+type pjMapEntry struct {
+	key      string
+	value    pjEntry
+	keyRange logger.Range
+}
+
+// This type is just so we can use Go's native sort function
+type expansionKeysArray []pjMapEntry
+
+func (a expansionKeysArray) Len() int          { return len(a) }
+func (a expansionKeysArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a expansionKeysArray) Less(i int, j int) bool {
+	// Assert: keyA ends with "/" or contains only a single "*".
+	// Assert: keyB ends with "/" or contains only a single "*".
+	keyA := a[i].key
+	keyB := a[j].key
+
+	// Let baseLengthA be the index of "*" in keyA plus one, if keyA contains "*", or the length of keyA otherwise.
+	// Let baseLengthB be the index of "*" in keyB plus one, if keyB contains "*", or the length of keyB otherwise.
+	starA := strings.IndexByte(keyA, '*')
+	starB := strings.IndexByte(keyB, '*')
+	var baseLengthA int
+	var baseLengthB int
+	if starA >= 0 {
+		baseLengthA = starA
+	} else {
+		baseLengthA = len(keyA)
+	}
+	if starB >= 0 {
+		baseLengthB = starB
+	} else {
+		baseLengthB = len(keyB)
+	}
+
+	// If baseLengthA is greater than baseLengthB, return -1.
+	// If baseLengthB is greater than baseLengthA, return 1.
+	if baseLengthA > baseLengthB {
+		return true
+	}
+	if baseLengthB > baseLengthA {
+		return false
+	}
+
+	// If keyA does not contain "*", return 1.
+	// If keyB does not contain "*", return -1.
+	if starA < 0 {
+		return false
+	}
+	if starB < 0 {
+		return true
+	}
+
+	// If the length of keyA is greater than the length of keyB, return -1.
+	// If the length of keyB is greater than the length of keyA, return 1.
+	if len(keyA) > len(keyB) {
+		return true
+	}
+	if len(keyB) > len(keyA) {
+		return false
+	}
+
+	return false
+}
+
+func (entry pjEntry) valueForKey(key string) (pjEntry, bool) {
+	for _, item := range entry.mapData {
+		if item.key == key {
+			return item.value, true
+		}
+	}
+	return pjEntry{}, false
+}
+
+func parseImportsExportsMap(source logger.Source, log logger.Log, json js_ast.Expr, propertyKey string, propertyKeyLoc logger.Loc) *pjMap {
+	var visit func(expr js_ast.Expr) pjEntry
+	tracker := logger.MakeLineColumnTracker(&source)
+
+	visit = func(expr js_ast.Expr) pjEntry {
+		var firstToken logger.Range
+
+		switch e := expr.Data.(type) {
+		case *js_ast.ENull:
+			return pjEntry{
+				kind:       pjNull,
+				firstToken: js_lexer.RangeOfIdentifier(source, expr.Loc),
+			}
+
+		case *js_ast.EString:
+			return pjEntry{
+				kind:       pjString,
+				firstToken: source.RangeOfString(expr.Loc),
+				strData:    helpers.UTF16ToString(e.Value),
+			}
+
+		case *js_ast.EArray:
+			arrData := make([]pjEntry, len(e.Items))
+			for i, item := range e.Items {
+				arrData[i] = visit(item)
+			}
+			return pjEntry{
+				kind:       pjArray,
+				firstToken: logger.Range{Loc: expr.Loc, Len: 1},
+				arrData:    arrData,
+			}
+
+		case *js_ast.EObject:
+			mapData := make([]pjMapEntry, len(e.Properties))
+			expansionKeys := make(expansionKeysArray, 0, len(e.Properties))
+			firstToken := logger.Range{Loc: expr.Loc, Len: 1}
+			isConditionalSugar := false
+
+			type DeadCondition struct {
+				reason string
+				ranges []logger.Range
+				notes  []logger.MsgData
+			}
+			var foundDefault logger.Range
+			var foundImport logger.Range
+			var foundRequire logger.Range
+			var deadCondition DeadCondition
+
+			for i, property := range e.Properties {
+				keyStr, _ := property.Key.Data.(*js_ast.EString)
+				key := helpers.UTF16ToString(keyStr.Value)
+				keyRange := source.RangeOfString(property.Key.Loc)
+
+				// If exports is an Object with both a key starting with "." and a key
+				// not starting with ".", throw an Invalid Package Configuration error.
+				curIsConditionalSugar := !strings.HasPrefix(key, ".")
+				if i == 0 {
+					isConditionalSugar = curIsConditionalSugar
+				} else if isConditionalSugar != curIsConditionalSugar {
+					prevEntry := mapData[i-1]
+					log.AddIDWithNotes(logger.MsgID_PackageJSON_InvalidImportsOrExports, logger.Warning, &tracker, keyRange,
+						"This object cannot contain keys that both start with \".\" and don't start with \".\"",
+						[]logger.MsgData{tracker.MsgData(prevEntry.keyRange,
+							fmt.Sprintf("The key %q is incompatible with the previous key %q:", key, prevEntry.key))})
+					return pjEntry{
+						kind:       pjInvalid,
+						firstToken: firstToken,
+					}
+				}
+
+				// Track "dead" conditional branches that can never be reached
+				if foundDefault.Len != 0 || (foundImport.Len != 0 && foundRequire.Len != 0) {
+					deadCondition.ranges = append(deadCondition.ranges, keyRange)
+					// Note: Don't warn about the "default" condition as it's supposed to be a catch-all condition
+					if deadCondition.reason == "" && key != "default" {
+						if foundDefault.Len != 0 {
+							deadCondition.reason = "\"default\""
+							deadCondition.notes = []logger.MsgData{
+								tracker.MsgData(foundDefault, "The \"default\" condition comes earlier and will always be chosen:"),
+							}
+						} else {
+							deadCondition.reason = "both \"import\" and \"require\""
+							deadCondition.notes = []logger.MsgData{
+								tracker.MsgData(foundImport, "The \"import\" condition comes earlier and will be used for all \"import\" statements:"),
+								tracker.MsgData(foundRequire, "The \"require\" condition comes earlier and will be used for all \"require\" calls:"),
+							}
+						}
+					}
+				} else {
+					switch key {
+					case "default":
+						foundDefault = keyRange
+					case "import":
+						foundImport = keyRange
+					case "require":
+						foundRequire = keyRange
+					}
+				}
+
+				entry := pjMapEntry{
+					key:      key,
+					keyRange: keyRange,
+					value:    visit(property.ValueOrNil),
+				}
+
+				if strings.HasSuffix(key, "/") || strings.IndexByte(key, '*') >= 0 {
+					expansionKeys = append(expansionKeys, entry)
+				}
+
+				mapData[i] = entry
+			}
+
+			// Let expansionKeys be the list of keys of matchObj either ending in "/"
+			// or containing only a single "*", sorted by the sorting function
+			// PATTERN_KEY_COMPARE which orders in descending order of specificity.
+			sort.Stable(expansionKeys)
+
+			// Warn about "dead" conditional branches that can never be reached
+			if deadCondition.reason != "" {
+				kind := logger.Warning
+				if helpers.IsInsideNodeModules(source.KeyPath.Text) {
+					kind = logger.Debug
+				}
+				var conditions string
+				conditionWord := "condition"
+				itComesWord := "it comes"
+				if len(deadCondition.ranges) > 1 {
+					conditionWord = "conditions"
+					itComesWord = "they come"
+				}
+				for i, r := range deadCondition.ranges {
+					if i > 0 {
+						conditions += " and "
+					}
+					conditions += source.TextForRange(r)
+				}
+				log.AddIDWithNotes(logger.MsgID_PackageJSON_DeadCondition, kind, &tracker, deadCondition.ranges[0],
+					fmt.Sprintf("The %s %s here will never be used as %s after %s", conditionWord, conditions, itComesWord, deadCondition.reason),
+					deadCondition.notes)
+			}
+
+			return pjEntry{
+				kind:          pjObject,
+				firstToken:    firstToken,
+				mapData:       mapData,
+				expansionKeys: expansionKeys,
+			}
+
+		case *js_ast.EBoolean:
+			firstToken = js_lexer.RangeOfIdentifier(source, expr.Loc)
+
+		case *js_ast.ENumber:
+			firstToken = source.RangeOfNumber(expr.Loc)
+
+		default:
+			firstToken.Loc = expr.Loc
+		}
+
+		log.AddID(logger.MsgID_PackageJSON_InvalidImportsOrExports, logger.Warning, &tracker, firstToken,
+			"This value must be a string, an object, an array, or null")
+		return pjEntry{
+			kind:       pjInvalid,
+			firstToken: firstToken,
+		}
+	}
+
+	root := visit(json)
+
+	if root.kind == pjNull {
+		return nil
+	}
+
+	return &pjMap{
+		root:           root,
+		propertyKey:    propertyKey,
+		propertyKeyLoc: propertyKeyLoc,
+	}
+}
+
+func (entry pjEntry) keysStartWithDot() bool {
+	return len(entry.mapData) > 0 && strings.HasPrefix(entry.mapData[0].key, ".")
+}
+
+type pjStatus uint8
+
+const (
+	pjStatusUndefined                  pjStatus = iota
+	pjStatusUndefinedNoConditionsMatch          // A more friendly error message for when no conditions are matched
+	pjStatusNull
+	pjStatusExact
+	pjStatusExactEndsWithStar
+	pjStatusInexact        // This means we may need to try CommonJS-style extension suffixes
+	pjStatusPackageResolve // Need to re-run package resolution on the result
+
+	// Module specifier is an invalid URL, package name or package subpath specifier.
+	pjStatusInvalidModuleSpecifier
+
+	// package.json configuration is invalid or contains an invalid configuration.
+	pjStatusInvalidPackageConfiguration
+
+	// Package exports or imports define a target module for the package that is an invalid type or string target.
+	pjStatusInvalidPackageTarget
+
+	// Package exports do not define or permit a target subpath in the package for the given module.
+	pjStatusPackagePathNotExported
+
+	// Package imports do not define the specifiespecifier
+	pjStatusPackageImportNotDefined
+
+	// The package or module requested does not exist.
+	pjStatusModuleNotFound
+	pjStatusModuleNotFoundMissingExtension // The user just needs to add the missing extension
+
+	// The resolved path corresponds to a directory, which is not a supported target for module imports.
+	pjStatusUnsupportedDirectoryImport
+	pjStatusUnsupportedDirectoryImportMissingIndex // The user just needs to add the missing "/index.js" suffix
+)
+
+func (status pjStatus) isUndefined() bool {
+	return status == pjStatusUndefined || status == pjStatusUndefinedNoConditionsMatch
+}
+
+type pjDebug struct {
+	// If the status is "pjStatusInvalidPackageTarget" or "pjStatusInvalidModuleSpecifier",
+	// then this is the reason. It always starts with " because".
+	invalidBecause string
+
+	// If the status is "pjStatusUndefinedNoConditionsMatch", this is the set of
+	// conditions that didn't match, in the order that they were found in the file.
+	// This information is used for error messages.
+	unmatchedConditions []logger.Span
+
+	// This is the range of the token to use for error messages
+	token logger.Range
+
+	// If true, the token is a "null" literal
+	isBecauseOfNullLiteral bool
+}
+
+func (r resolverQuery) esmHandlePostConditions(
+	resolved string,
+	status pjStatus,
+	debug pjDebug,
+) (string, pjStatus, pjDebug) {
+	if status != pjStatusExact && status != pjStatusExactEndsWithStar && status != pjStatusInexact {
+		return resolved, status, debug
+	}
+
+	// If resolved contains any percent encodings of "/" or "\" ("%2f" and "%5C"
+	// respectively), then throw an Invalid Module Specifier error.
+	resolvedPath, err := url.PathUnescape(resolved)
+	if err != nil {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("The path %q contains invalid URL escapes: %s", resolved, err.Error()))
+		}
+		return resolved, pjStatusInvalidModuleSpecifier, debug
+	}
+	var found string
+	if strings.Contains(resolved, "%2f") {
+		found = "%2f"
+	} else if strings.Contains(resolved, "%2F") {
+		found = "%2F"
+	} else if strings.Contains(resolved, "%5c") {
+		found = "%5c"
+	} else if strings.Contains(resolved, "%5C") {
+		found = "%5C"
+	}
+	if found != "" {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("The path %q is not allowed to contain %q", resolved, found))
+		}
+		return resolved, pjStatusInvalidModuleSpecifier, debug
+	}
+
+	// If the file at resolved is a directory, then throw an Unsupported Directory
+	// Import error.
+	if strings.HasSuffix(resolvedPath, "/") || strings.HasSuffix(resolvedPath, "\\") {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("The path %q is not allowed to end with a slash", resolved))
+		}
+		return resolved, pjStatusUnsupportedDirectoryImport, debug
+	}
+
+	// Set resolved to the real path of resolved.
+	return resolvedPath, status, debug
+}
+
+func (r resolverQuery) esmPackageImportsResolve(
+	specifier string,
+	imports pjEntry,
+	conditions map[string]bool,
+) (string, pjStatus, pjDebug) {
+	// ALGORITHM DEVIATION: Provide a friendly error message if "imports" is not an object
+	if imports.kind != pjObject {
+		return "", pjStatusInvalidPackageConfiguration, pjDebug{token: imports.firstToken}
+	}
+
+	resolved, status, debug := r.esmPackageImportsExportsResolve(specifier, imports, "/", true, conditions)
+	if status != pjStatusNull && status != pjStatusUndefined {
+		return resolved, status, debug
+	}
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("The package import %q is not defined", specifier))
+	}
+	return specifier, pjStatusPackageImportNotDefined, pjDebug{token: imports.firstToken}
+}
+
+func (r resolverQuery) esmPackageExportsResolve(
+	packageURL string,
+	subpath string,
+	exports pjEntry,
+	conditions map[string]bool,
+) (string, pjStatus, pjDebug) {
+	if exports.kind == pjInvalid {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Invalid package configuration")
+		}
+		return "", pjStatusInvalidPackageConfiguration, pjDebug{token: exports.firstToken}
+	}
+
+	debugToReturn := pjDebug{token: exports.firstToken}
+	if subpath == "." {
+		mainExport := pjEntry{kind: pjNull}
+		if exports.kind == pjString || exports.kind == pjArray || (exports.kind == pjObject && !exports.keysStartWithDot()) {
+			mainExport = exports
+		} else if exports.kind == pjObject {
+			if dot, ok := exports.valueForKey("."); ok {
+				if r.debugLogs != nil {
+					r.debugLogs.addNote("Using the entry for \".\"")
+				}
+				mainExport = dot
+			}
+		}
+		if mainExport.kind != pjNull {
+			resolved, status, debug := r.esmPackageTargetResolve(packageURL, mainExport, "", false, false, conditions)
+			if status != pjStatusNull && status != pjStatusUndefined {
+				return resolved, status, debug
+			} else {
+				debugToReturn = debug
+			}
+		}
+	} else if exports.kind == pjObject && exports.keysStartWithDot() {
+		resolved, status, debug := r.esmPackageImportsExportsResolve(subpath, exports, packageURL, false, conditions)
+		if status != pjStatusNull && status != pjStatusUndefined {
+			return resolved, status, debug
+		} else {
+			debugToReturn = debug
+		}
+	}
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("The path %q is not exported", subpath))
+	}
+	return "", pjStatusPackagePathNotExported, debugToReturn
+}
+
+func (r resolverQuery) esmPackageImportsExportsResolve(
+	matchKey string,
+	matchObj pjEntry,
+	packageURL string,
+	isImports bool,
+	conditions map[string]bool,
+) (string, pjStatus, pjDebug) {
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Checking object path map for %q", matchKey))
+	}
+
+	// If matchKey is a key of matchObj and does not end in "/" or contain "*", then
+	if !strings.HasSuffix(matchKey, "/") && strings.IndexByte(matchKey, '*') < 0 {
+		if target, ok := matchObj.valueForKey(matchKey); ok {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Found exact match for %q", matchKey))
+			}
+			return r.esmPackageTargetResolve(packageURL, target, "", false, isImports, conditions)
+		}
+	}
+
+	for _, expansion := range matchObj.expansionKeys {
+		// If expansionKey contains "*", set patternBase to the substring of
+		// expansionKey up to but excluding the first "*" character
+		if star := strings.IndexByte(expansion.key, '*'); star >= 0 {
+			patternBase := expansion.key[:star]
+
+			// If patternBase is not null and matchKey starts with but is not equal
+			// to patternBase, then
+			if strings.HasPrefix(matchKey, patternBase) {
+				// Let patternTrailer be the substring of expansionKey from the index
+				// after the first "*" character.
+				patternTrailer := expansion.key[star+1:]
+
+				// If patternTrailer has zero length, or if matchKey ends with
+				// patternTrailer and the length of matchKey is greater than or
+				// equal to the length of expansionKey, then
+				if patternTrailer == "" || (strings.HasSuffix(matchKey, patternTrailer) && len(matchKey) >= len(expansion.key)) {
+					target := expansion.value
+					subpath := matchKey[len(patternBase) : len(matchKey)-len(patternTrailer)]
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("The key %q matched with %q left over", expansion.key, subpath))
+					}
+					return r.esmPackageTargetResolve(packageURL, target, subpath, true, isImports, conditions)
+				}
+			}
+		} else {
+			// Otherwise if patternBase is null and matchKey starts with
+			// expansionKey, then
+			if strings.HasPrefix(matchKey, expansion.key) {
+				target := expansion.value
+				subpath := matchKey[len(expansion.key):]
+				if r.debugLogs != nil {
+					r.debugLogs.addNote(fmt.Sprintf("The key %q matched with %q left over", expansion.key, subpath))
+				}
+				result, status, debug := r.esmPackageTargetResolve(packageURL, target, subpath, false, isImports, conditions)
+				if status == pjStatusExact || status == pjStatusExactEndsWithStar {
+					// Return the object { resolved, exact: false }.
+					status = pjStatusInexact
+				}
+				return result, status, debug
+			}
+		}
+
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("The key %q did not match", expansion.key))
+		}
+	}
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("No keys matched %q", matchKey))
+	}
+	return "", pjStatusNull, pjDebug{token: matchObj.firstToken}
+}
+
+// If path split on "/" or "\" contains any ".", ".." or "node_modules"
+// segments after the first segment, throw an Invalid Package Target error.
+func findInvalidSegment(path string) string {
+	slash := strings.IndexAny(path, "/\\")
+	if slash == -1 {
+		return ""
+	}
+	path = path[slash+1:]
+	for path != "" {
+		slash := strings.IndexAny(path, "/\\")
+		segment := path
+		if slash != -1 {
+			segment = path[:slash]
+			path = path[slash+1:]
+		} else {
+			path = ""
+		}
+		if segment == "." || segment == ".." || segment == "node_modules" {
+			return segment
+		}
+	}
+	return ""
+}
+
+func (r resolverQuery) esmPackageTargetResolve(
+	packageURL string,
+	target pjEntry,
+	subpath string,
+	pattern bool,
+	internal bool,
+	conditions map[string]bool,
+) (string, pjStatus, pjDebug) {
+	switch target.kind {
+	case pjString:
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Checking path %q against target %q", subpath, target.strData))
+			r.debugLogs.increaseIndent()
+			defer r.debugLogs.decreaseIndent()
+		}
+
+		// If pattern is false, subpath has non-zero length and target
+		// does not end with "/", throw an Invalid Module Specifier error.
+		if !pattern && subpath != "" && !strings.HasSuffix(target.strData, "/") {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The target %q is invalid because it doesn't end in \"/\"", target.strData))
+			}
+			return target.strData, pjStatusInvalidModuleSpecifier, pjDebug{
+				token:          target.firstToken,
+				invalidBecause: " because it doesn't end in \"/\"",
+			}
+		}
+
+		// If target does not start with "./", then...
+		if !strings.HasPrefix(target.strData, "./") {
+			if internal && !strings.HasPrefix(target.strData, "../") && !strings.HasPrefix(target.strData, "/") {
+				if pattern {
+					result := strings.ReplaceAll(target.strData, "*", subpath)
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("Substituted %q for \"*\" in %q to get %q", subpath, target.strData, result))
+					}
+					return result, pjStatusPackageResolve, pjDebug{token: target.firstToken}
+				}
+				result := target.strData + subpath
+				if r.debugLogs != nil {
+					r.debugLogs.addNote(fmt.Sprintf("Joined %q to %q to get %q", target.strData, subpath, result))
+				}
+				return result, pjStatusPackageResolve, pjDebug{token: target.firstToken}
+			}
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The target %q is invalid because it doesn't start with \"./\"", target.strData))
+			}
+			return target.strData, pjStatusInvalidPackageTarget, pjDebug{
+				token:          target.firstToken,
+				invalidBecause: " because it doesn't start with \"./\"",
+			}
+		}
+
+		// If target split on "/" or "\" contains any ".", ".." or "node_modules"
+		// segments after the first segment, throw an Invalid Package Target error.
+		if invalidSegment := findInvalidSegment(target.strData); invalidSegment != "" {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The target %q is invalid because it contains invalid segment %q", target.strData, invalidSegment))
+			}
+			return target.strData, pjStatusInvalidPackageTarget, pjDebug{
+				token:          target.firstToken,
+				invalidBecause: fmt.Sprintf(" because it contains invalid segment %q", invalidSegment),
+			}
+		}
+
+		// Let resolvedTarget be the URL resolution of the concatenation of packageURL and target.
+		resolvedTarget := path.Join(packageURL, target.strData)
+
+		// If subpath split on "/" or "\" contains any ".", ".." or "node_modules"
+		// segments, throw an Invalid Module Specifier error.
+		if invalidSegment := findInvalidSegment(subpath); invalidSegment != "" {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The path %q is invalid because it contains invalid segment %q", subpath, invalidSegment))
+			}
+			return subpath, pjStatusInvalidModuleSpecifier, pjDebug{
+				token:          target.firstToken,
+				invalidBecause: fmt.Sprintf(" because it contains invalid segment %q", invalidSegment),
+			}
+		}
+
+		if pattern {
+			// Return the URL resolution of resolvedTarget with every instance of "*" replaced with subpath.
+			result := strings.ReplaceAll(resolvedTarget, "*", subpath)
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Substituted %q for \"*\" in %q to get %q", subpath, "."+resolvedTarget, "."+result))
+			}
+			status := pjStatusExact
+			if strings.HasSuffix(resolvedTarget, "*") && strings.IndexByte(resolvedTarget, '*') == len(resolvedTarget)-1 {
+				status = pjStatusExactEndsWithStar
+			}
+			return result, status, pjDebug{token: target.firstToken}
+		} else {
+			// Return the URL resolution of the concatenation of subpath and resolvedTarget.
+			result := path.Join(resolvedTarget, subpath)
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Joined %q to %q to get %q", subpath, "."+resolvedTarget, "."+result))
+			}
+			return result, pjStatusExact, pjDebug{token: target.firstToken}
+		}
+
+	case pjObject:
+		if r.debugLogs != nil {
+			keys := make([]string, 0, len(conditions))
+			for key := range conditions {
+				keys = append(keys, fmt.Sprintf("%q", key))
+			}
+			sort.Strings(keys)
+			r.debugLogs.addNote(fmt.Sprintf("Checking condition map for one of [%s]", strings.Join(keys, ", ")))
+			r.debugLogs.increaseIndent()
+			defer r.debugLogs.decreaseIndent()
+		}
+
+		var didFindMapEntry bool
+		var lastMapEntry pjMapEntry
+
+		for _, p := range target.mapData {
+			if p.key == "default" || conditions[p.key] {
+				if r.debugLogs != nil {
+					r.debugLogs.addNote(fmt.Sprintf("The key %q applies", p.key))
+				}
+				resolved, status, debug := r.esmPackageTargetResolve(packageURL, p.value, subpath, pattern, internal, conditions)
+				if status.isUndefined() {
+					didFindMapEntry = true
+					lastMapEntry = p
+					continue
+				}
+				return resolved, status, debug
+			}
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The key %q does not apply", p.key))
+			}
+		}
+
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("No keys in the map were applicable")
+		}
+
+		// ALGORITHM DEVIATION: Provide a friendly error message if no conditions matched
+		if len(target.mapData) > 0 && !target.keysStartWithDot() {
+			if didFindMapEntry && lastMapEntry.value.kind == pjObject &&
+				len(lastMapEntry.value.mapData) > 0 && !lastMapEntry.value.keysStartWithDot() {
+				// If a top-level condition did match but no sub-condition matched,
+				// complain about the sub-condition instead of the top-level condition.
+				// This leads to a less confusing error message. For example:
+				//
+				//   "exports": {
+				//     "node": {
+				//       "require": "./dist/bwip-js-node.js"
+				//     }
+				//   },
+				//
+				// We want the warning to say this:
+				//
+				//   note: None of the conditions in the package definition ("require") match any of the
+				//         currently active conditions ("default", "import", "node")
+				//   14 |       "node": {
+				//      |               ^
+				//
+				// We don't want the warning to say this:
+				//
+				//   note: None of the conditions in the package definition ("browser", "electron", "node")
+				//         match any of the currently active conditions ("default", "import", "node")
+				//   7 |   "exports": {
+				//     |              ^
+				//
+				// More information: https://github.com/evanw/esbuild/issues/1484
+				target = lastMapEntry.value
+			}
+			keys := make([]logger.Span, len(target.mapData))
+			for i, p := range target.mapData {
+				keys[i] = logger.Span{Text: p.key, Range: p.keyRange}
+			}
+			return "", pjStatusUndefinedNoConditionsMatch, pjDebug{
+				token:               target.firstToken,
+				unmatchedConditions: keys,
+			}
+		}
+
+		return "", pjStatusUndefined, pjDebug{token: target.firstToken}
+
+	case pjArray:
+		if len(target.arrData) == 0 {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The path %q is set to an empty array", subpath))
+			}
+			return "", pjStatusNull, pjDebug{token: target.firstToken}
+		}
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Checking for %q in an array", subpath))
+			r.debugLogs.increaseIndent()
+			defer r.debugLogs.decreaseIndent()
+		}
+		lastException := pjStatusUndefined
+		lastDebug := pjDebug{token: target.firstToken}
+		for _, targetValue := range target.arrData {
+			// Let resolved be the result, continuing the loop on any Invalid Package Target error.
+			resolved, status, debug := r.esmPackageTargetResolve(packageURL, targetValue, subpath, pattern, internal, conditions)
+			if status == pjStatusInvalidPackageTarget || status == pjStatusNull {
+				lastException = status
+				lastDebug = debug
+				continue
+			}
+			if status.isUndefined() {
+				continue
+			}
+			return resolved, status, debug
+		}
+
+		// Return or throw the last fallback resolution null return or error.
+		return "", lastException, lastDebug
+
+	case pjNull:
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("The path %q is set to null", subpath))
+		}
+		return "", pjStatusNull, pjDebug{token: target.firstToken, isBecauseOfNullLiteral: true}
+	}
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Invalid package target for path %q", subpath))
+	}
+	return "", pjStatusInvalidPackageTarget, pjDebug{token: target.firstToken}
+}
+
+func esmParsePackageName(packageSpecifier string) (packageName string, packageSubpath string, ok bool) {
+	if packageSpecifier == "" {
+		return
+	}
+
+	slash := strings.IndexByte(packageSpecifier, '/')
+	if !strings.HasPrefix(packageSpecifier, "@") {
+		if slash == -1 {
+			slash = len(packageSpecifier)
+		}
+		packageName = packageSpecifier[:slash]
+	} else {
+		if slash == -1 {
+			return
+		}
+		slash2 := strings.IndexByte(packageSpecifier[slash+1:], '/')
+		if slash2 == -1 {
+			slash2 = len(packageSpecifier[slash+1:])
+		}
+		packageName = packageSpecifier[:slash+1+slash2]
+	}
+
+	if strings.HasPrefix(packageName, ".") || strings.ContainsAny(packageName, "\\%") {
+		return
+	}
+
+	packageSubpath = "." + packageSpecifier[len(packageName):]
+	ok = true
+	return
+}
+
+func (r resolverQuery) esmPackageExportsReverseResolve(
+	query string,
+	root pjEntry,
+	conditions map[string]bool,
+) (bool, string, logger.Range) {
+	if root.kind == pjObject && root.keysStartWithDot() {
+		if ok, subpath, token := r.esmPackageImportsExportsReverseResolve(query, root, conditions); ok {
+			return true, subpath, token
+		}
+	}
+
+	return false, "", logger.Range{}
+}
+
+func (r resolverQuery) esmPackageImportsExportsReverseResolve(
+	query string,
+	matchObj pjEntry,
+	conditions map[string]bool,
+) (bool, string, logger.Range) {
+	if !strings.HasSuffix(query, "*") {
+		for _, entry := range matchObj.mapData {
+			if ok, subpath, token := r.esmPackageTargetReverseResolve(query, entry.key, entry.value, esmReverseExact, conditions); ok {
+				return true, subpath, token
+			}
+		}
+	}
+
+	for _, expansion := range matchObj.expansionKeys {
+		if strings.HasSuffix(expansion.key, "*") {
+			if ok, subpath, token := r.esmPackageTargetReverseResolve(query, expansion.key, expansion.value, esmReversePattern, conditions); ok {
+				return true, subpath, token
+			}
+		}
+
+		if ok, subpath, token := r.esmPackageTargetReverseResolve(query, expansion.key, expansion.value, esmReversePrefix, conditions); ok {
+			return true, subpath, token
+		}
+	}
+
+	return false, "", logger.Range{}
+}
+
+type esmReverseKind uint8
+
+const (
+	esmReverseExact esmReverseKind = iota
+	esmReversePattern
+	esmReversePrefix
+)
+
+func (r resolverQuery) esmPackageTargetReverseResolve(
+	query string,
+	key string,
+	target pjEntry,
+	kind esmReverseKind,
+	conditions map[string]bool,
+) (bool, string, logger.Range) {
+	switch target.kind {
+	case pjString:
+		switch kind {
+		case esmReverseExact:
+			if query == target.strData {
+				return true, key, target.firstToken
+			}
+
+		case esmReversePrefix:
+			if strings.HasPrefix(query, target.strData) {
+				return true, key + query[len(target.strData):], target.firstToken
+			}
+
+		case esmReversePattern:
+			star := strings.IndexByte(target.strData, '*')
+			keyWithoutTrailingStar := strings.TrimSuffix(key, "*")
+
+			// Handle the case of no "*"
+			if star == -1 {
+				if query == target.strData {
+					return true, keyWithoutTrailingStar, target.firstToken
+				}
+				break
+			}
+
+			// Only support tracing through a single "*"
+			prefix := target.strData[0:star]
+			suffix := target.strData[star+1:]
+			if !strings.ContainsRune(suffix, '*') && strings.HasPrefix(query, prefix) {
+				if afterPrefix := query[len(prefix):]; strings.HasSuffix(afterPrefix, suffix) {
+					starData := afterPrefix[:len(afterPrefix)-len(suffix)]
+					return true, keyWithoutTrailingStar + starData, target.firstToken
+				}
+			}
+		}
+
+	case pjObject:
+		for _, p := range target.mapData {
+			if p.key == "default" || conditions[p.key] {
+				if ok, subpath, token := r.esmPackageTargetReverseResolve(query, key, p.value, kind, conditions); ok {
+					return true, subpath, token
+				}
+			}
+		}
+
+	case pjArray:
+		for _, targetValue := range target.arrData {
+			if ok, subpath, token := r.esmPackageTargetReverseResolve(query, key, targetValue, kind, conditions); ok {
+				return true, subpath, token
+			}
+		}
+	}
+
+	return false, "", logger.Range{}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/resolver/resolver.go b/source/vendor/github.com/evanw/esbuild/internal/resolver/resolver.go
new file mode 100644
index 0000000..b3f6c8b
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/resolver/resolver.go
@@ -0,0 +1,2923 @@
+package resolver
+
+import (
+	"errors"
+	"fmt"
+	"path"
+	"regexp"
+	"sort"
+	"strings"
+	"sync"
+	"syscall"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/cache"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/fs"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+var defaultMainFields = map[config.Platform][]string{
+	// Note that this means if a package specifies "main", "module", and
+	// "browser" then "browser" will win out over "module". This is the
+	// same behavior as webpack: https://github.com/webpack/webpack/issues/4674.
+	//
+	// This is deliberate because the presence of the "browser" field is a
+	// good signal that the "module" field may have non-browser stuff in it,
+	// which will crash or fail to be bundled when targeting the browser.
+	config.PlatformBrowser: {"browser", "module", "main"},
+
+	// Note that this means if a package specifies "module" and "main", the ES6
+	// module will not be selected. This means tree shaking will not work when
+	// targeting node environments.
+	//
+	// This is unfortunately necessary for compatibility. Some packages
+	// incorrectly treat the "module" field as "code for the browser". It
+	// actually means "code for ES6 environments" which includes both node
+	// and the browser.
+	//
+	// For example, the package "@firebase/app" prints a warning on startup about
+	// the bundler incorrectly using code meant for the browser if the bundler
+	// selects the "module" field instead of the "main" field.
+	//
+	// If you want to enable tree shaking when targeting node, you will have to
+	// configure the main fields to be "module" and then "main". Keep in mind
+	// that some packages may break if you do this.
+	config.PlatformNode: {"main", "module"},
+
+	// The neutral platform is for people that don't want esbuild to try to
+	// pick good defaults for their platform. In that case, the list of main
+	// fields is empty by default. You must explicitly configure it yourself.
+	config.PlatformNeutral: {},
+}
+
+// These are the main fields to use when the "main fields" setting is configured
+// to something unusual, such as something without the "main" field.
+var mainFieldsForFailure = []string{"main", "module"}
+
+// Path resolution is a mess. One tricky issue is the "module" override for the
+// "main" field in "package.json" files. Bundlers generally prefer "module" over
+// "main" but that breaks packages that export a function in "main" for use with
+// "require()", since resolving to "module" means an object will be returned. We
+// attempt to handle this automatically by having import statements resolve to
+// "module" but switch that out later for "main" if "require()" is used too.
+type PathPair struct {
+	// Either secondary will be empty, or primary will be "module" and secondary
+	// will be "main"
+	Primary   logger.Path
+	Secondary logger.Path
+
+	IsExternal bool
+}
+
+func (pp *PathPair) iter() []*logger.Path {
+	result := []*logger.Path{&pp.Primary, &pp.Secondary}
+	if !pp.HasSecondary() {
+		result = result[:1]
+	}
+	return result
+}
+
+func (pp *PathPair) HasSecondary() bool {
+	return pp.Secondary.Text != ""
+}
+
+type SideEffectsData struct {
+	Source *logger.Source
+
+	// If non-empty, this false value came from a plugin
+	PluginName string
+
+	Range logger.Range
+
+	// If true, "sideEffects" was an array. If false, "sideEffects" was false.
+	IsSideEffectsArrayInJSON bool
+}
+
+type ResolveResult struct {
+	PathPair PathPair
+
+	// If this was resolved by a plugin, the plugin gets to store its data here
+	PluginData interface{}
+
+	DifferentCase *fs.DifferentCase
+
+	// If present, any ES6 imports to this file can be considered to have no side
+	// effects. This means they should be removed if unused.
+	PrimarySideEffectsData *SideEffectsData
+
+	// These are from "tsconfig.json"
+	TSConfigJSX    config.TSConfigJSX
+	TSConfig       *config.TSConfig
+	TSAlwaysStrict *config.TSAlwaysStrict
+
+	// This is the "type" field from "package.json"
+	ModuleTypeData js_ast.ModuleTypeData
+}
+
+type suggestionRange uint8
+
+const (
+	suggestionRangeFull suggestionRange = iota
+	suggestionRangeEnd
+)
+
+type DebugMeta struct {
+	notes              []logger.MsgData
+	suggestionText     string
+	suggestionMessage  string
+	suggestionRange    suggestionRange
+	ModifiedImportPath string
+}
+
+func (dm DebugMeta) LogErrorMsg(log logger.Log, source *logger.Source, r logger.Range, text string, suggestion string, notes []logger.MsgData) {
+	tracker := logger.MakeLineColumnTracker(source)
+
+	if source != nil && dm.suggestionMessage != "" {
+		suggestionRange := r
+		if dm.suggestionRange == suggestionRangeEnd {
+			suggestionRange = logger.Range{Loc: logger.Loc{Start: r.End() - 1}}
+		}
+		data := tracker.MsgData(suggestionRange, dm.suggestionMessage)
+		data.Location.Suggestion = dm.suggestionText
+		dm.notes = append(dm.notes, data)
+	}
+
+	msg := logger.Msg{
+		Kind:  logger.Error,
+		Data:  tracker.MsgData(r, text),
+		Notes: append(dm.notes, notes...),
+	}
+
+	if msg.Data.Location != nil && suggestion != "" {
+		msg.Data.Location.Suggestion = suggestion
+	}
+
+	log.AddMsg(msg)
+}
+
+type Resolver struct {
+	fs     fs.FS
+	log    logger.Log
+	caches *cache.CacheSet
+
+	tsConfigOverride *TSConfigJSON
+
+	// These are sets that represent various conditions for the "exports" field
+	// in package.json.
+	esmConditionsDefault map[string]bool
+	esmConditionsImport  map[string]bool
+	esmConditionsRequire map[string]bool
+
+	// A special filtered import order for CSS "@import" imports.
+	//
+	// The "resolve extensions" setting determines the order of implicit
+	// extensions to try when resolving imports with the extension omitted.
+	// Sometimes people create a JavaScript/TypeScript file and a CSS file with
+	// the same name when they create a component. At a high level, users expect
+	// implicit extensions to resolve to the JS file when being imported from JS
+	// and to resolve to the CSS file when being imported from CSS.
+	//
+	// Different bundlers handle this in different ways. Parcel handles this by
+	// having the resolver prefer the same extension as the importing file in
+	// front of the configured "resolve extensions" order. Webpack's "css-loader"
+	// plugin just explicitly configures a special "resolve extensions" order
+	// consisting of only ".css" for CSS files.
+	//
+	// It's unclear what behavior is best here. What we currently do is to create
+	// a special filtered version of the configured "resolve extensions" order
+	// for CSS files that filters out any extension that has been explicitly
+	// configured with a non-CSS loader. This still gives users control over the
+	// order but avoids the scenario where we match an import in a CSS file to a
+	// JavaScript-related file. It's probably not perfect with plugins in the
+	// picture but it's better than some alternatives and probably pretty good.
+	cssExtensionOrder []string
+
+	// A special sorted import order for imports inside packages.
+	//
+	// The "resolve extensions" setting determines the order of implicit
+	// extensions to try when resolving imports with the extension omitted.
+	// Sometimes people author a package using TypeScript and publish both the
+	// compiled JavaScript and the original TypeScript. The compiled JavaScript
+	// depends on the "tsconfig.json" settings that were passed to "tsc" when
+	// it was compiled, and we don't know what they are (they may even be
+	// unknowable if the "tsconfig.json" file wasn't published).
+	//
+	// To work around this, we sort TypeScript file extensions after JavaScript
+	// file extensions (but only within packages) so that esbuild doesn't load
+	// the original source code in these scenarios. Instead we should load the
+	// compiled code, which is what will be loaded by node at run-time.
+	nodeModulesExtensionOrder []string
+
+	// This cache maps a directory path to information about that directory and
+	// all parent directories
+	dirCache map[string]*dirInfo
+
+	pnpManifestWasChecked bool
+	pnpManifest           *pnpData
+
+	options config.Options
+
+	// This mutex serves two purposes. First of all, it guards access to "dirCache"
+	// which is potentially mutated during path resolution. But this mutex is also
+	// necessary for performance. The "React admin" benchmark mysteriously runs
+	// twice as fast when this mutex is locked around the whole resolve operation
+	// instead of around individual accesses to "dirCache". For some reason,
+	// reducing parallelism in the resolver helps the rest of the bundler go
+	// faster. I'm not sure why this is but please don't change this unless you
+	// do a lot of testing with various benchmarks and there aren't any regressions.
+	mutex sync.Mutex
+}
+
+type resolverQuery struct {
+	*Resolver
+	debugMeta *DebugMeta
+	debugLogs *debugLogs
+	kind      ast.ImportKind
+}
+
+func NewResolver(call config.APICall, fs fs.FS, log logger.Log, caches *cache.CacheSet, options *config.Options) *Resolver {
+	// Filter out non-CSS extensions for CSS "@import" imports
+	cssExtensionOrder := make([]string, 0, len(options.ExtensionOrder))
+	for _, ext := range options.ExtensionOrder {
+		if loader, ok := options.ExtensionToLoader[ext]; !ok || loader.IsCSS() {
+			cssExtensionOrder = append(cssExtensionOrder, ext)
+		}
+	}
+
+	// Sort all TypeScript file extensions after all JavaScript file extensions
+	// for imports of files inside of "node_modules" directories. But insert
+	// the TypeScript file extensions right after the last JavaScript file
+	// extension instead of at the end so that they might come before the
+	// first CSS file extension, which is important to people that publish
+	// TypeScript and CSS code to npm with the same file names for both.
+	nodeModulesExtensionOrder := make([]string, 0, len(options.ExtensionOrder))
+	split := 0
+	for i, ext := range options.ExtensionOrder {
+		if loader, ok := options.ExtensionToLoader[ext]; ok && loader == config.LoaderJS || loader == config.LoaderJSX {
+			split = i + 1 // Split after the last JavaScript extension
+		}
+	}
+	if split != 0 { // Only do this if there are any JavaScript extensions
+		for _, ext := range options.ExtensionOrder[:split] { // Non-TypeScript extensions before the split
+			if loader, ok := options.ExtensionToLoader[ext]; !ok || !loader.IsTypeScript() {
+				nodeModulesExtensionOrder = append(nodeModulesExtensionOrder, ext)
+			}
+		}
+		for _, ext := range options.ExtensionOrder { // All TypeScript extensions
+			if loader, ok := options.ExtensionToLoader[ext]; ok && loader.IsTypeScript() {
+				nodeModulesExtensionOrder = append(nodeModulesExtensionOrder, ext)
+			}
+		}
+		for _, ext := range options.ExtensionOrder[split:] { // Non-TypeScript extensions after the split
+			if loader, ok := options.ExtensionToLoader[ext]; !ok || !loader.IsTypeScript() {
+				nodeModulesExtensionOrder = append(nodeModulesExtensionOrder, ext)
+			}
+		}
+	}
+
+	// Generate the condition sets for interpreting the "exports" field
+	esmConditionsDefault := map[string]bool{"default": true}
+	esmConditionsImport := map[string]bool{"import": true}
+	esmConditionsRequire := map[string]bool{"require": true}
+	for _, condition := range options.Conditions {
+		esmConditionsDefault[condition] = true
+	}
+	switch options.Platform {
+	case config.PlatformBrowser:
+		esmConditionsDefault["browser"] = true
+	case config.PlatformNode:
+		esmConditionsDefault["node"] = true
+	}
+	for key := range esmConditionsDefault {
+		esmConditionsImport[key] = true
+		esmConditionsRequire[key] = true
+	}
+
+	fs.Cwd()
+
+	res := &Resolver{
+		fs:                        fs,
+		log:                       log,
+		options:                   *options,
+		caches:                    caches,
+		dirCache:                  make(map[string]*dirInfo),
+		cssExtensionOrder:         cssExtensionOrder,
+		nodeModulesExtensionOrder: nodeModulesExtensionOrder,
+		esmConditionsDefault:      esmConditionsDefault,
+		esmConditionsImport:       esmConditionsImport,
+		esmConditionsRequire:      esmConditionsRequire,
+	}
+
+	// Handle the "tsconfig.json" override when the resolver is created. This
+	// isn't done when we validate the build options both because the code for
+	// "tsconfig.json" handling is already in the resolver, and because we want
+	// watch mode to pick up changes to "tsconfig.json" and rebuild.
+	var debugMeta DebugMeta
+	if options.TSConfigPath != "" || options.TSConfigRaw != "" {
+		r := resolverQuery{
+			Resolver:  res,
+			debugMeta: &debugMeta,
+		}
+		var visited map[string]bool
+		var err error
+		if call == config.BuildCall {
+			visited = make(map[string]bool)
+		}
+		if options.TSConfigPath != "" {
+			if r.log.Level <= logger.LevelDebug {
+				r.debugLogs = &debugLogs{what: fmt.Sprintf("Resolving tsconfig file %q", options.TSConfigPath)}
+			}
+			res.tsConfigOverride, err = r.parseTSConfig(options.TSConfigPath, visited, fs.Dir(options.TSConfigPath))
+		} else {
+			source := logger.Source{
+				KeyPath:    logger.Path{Text: fs.Join(fs.Cwd(), "<tsconfig.json>"), Namespace: "file"},
+				PrettyPath: "<tsconfig.json>",
+				Contents:   options.TSConfigRaw,
+			}
+			res.tsConfigOverride, err = r.parseTSConfigFromSource(source, visited, fs.Cwd())
+		}
+		if err != nil {
+			if err == syscall.ENOENT {
+				r.log.AddError(nil, logger.Range{}, fmt.Sprintf("Cannot find tsconfig file %q",
+					PrettyPath(r.fs, logger.Path{Text: options.TSConfigPath, Namespace: "file"})))
+			} else if err != errParseErrorAlreadyLogged {
+				r.log.AddError(nil, logger.Range{}, fmt.Sprintf("Cannot read file %q: %s",
+					PrettyPath(r.fs, logger.Path{Text: options.TSConfigPath, Namespace: "file"}), err.Error()))
+			}
+		} else {
+			r.flushDebugLogs(flushDueToSuccess)
+		}
+	}
+
+	// Mutate the provided options by settings from "tsconfig.json" if present
+	if res.tsConfigOverride != nil {
+		options.TS.Config = res.tsConfigOverride.Settings
+		res.tsConfigOverride.JSXSettings.ApplyTo(&options.JSX)
+		options.TSAlwaysStrict = res.tsConfigOverride.TSAlwaysStrictOrStrict()
+	}
+
+	return res
+}
+
+func (res *Resolver) Resolve(sourceDir string, importPath string, kind ast.ImportKind) (*ResolveResult, DebugMeta) {
+	var debugMeta DebugMeta
+	r := resolverQuery{
+		Resolver:  res,
+		debugMeta: &debugMeta,
+		kind:      kind,
+	}
+	if r.log.Level <= logger.LevelDebug {
+		r.debugLogs = &debugLogs{what: fmt.Sprintf(
+			"Resolving import %q in directory %q of type %q",
+			importPath, sourceDir, kind.StringForMetafile())}
+	}
+
+	// Apply package alias substitutions first
+	if r.options.PackageAliases != nil && IsPackagePath(importPath) {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Checking for package alias matches")
+		}
+		longestKey := ""
+		longestValue := ""
+
+		for key, value := range r.options.PackageAliases {
+			if len(key) > len(longestKey) && strings.HasPrefix(importPath, key) && (len(importPath) == len(key) || importPath[len(key)] == '/') {
+				longestKey = key
+				longestValue = value
+			}
+		}
+
+		if longestKey != "" {
+			debugMeta.ModifiedImportPath = longestValue
+			if tail := importPath[len(longestKey):]; tail != "/" {
+				// Don't include the trailing characters if they are equal to a
+				// single slash. This comes up because you can abuse this quirk of
+				// node's path resolution to force node to load the package from the
+				// file system instead of as a built-in module. For example, "util"
+				// is node's built-in module while "util/" is one on the file system.
+				// Leaving the trailing slash in place causes problems for people:
+				// https://github.com/evanw/esbuild/issues/2730. It should be ok to
+				// always strip the trailing slash even when using the alias feature
+				// to swap one package for another (except when you swap a reference
+				// to one built-in node module with another but really why would you
+				// do that).
+				debugMeta.ModifiedImportPath += tail
+			}
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("  Matched with alias from %q to %q", longestKey, longestValue))
+				r.debugLogs.addNote(fmt.Sprintf("  Modified import path from %q to %q", importPath, debugMeta.ModifiedImportPath))
+			}
+			importPath = debugMeta.ModifiedImportPath
+
+			// Resolve the package using the current path instead of the original
+			// path. This is trying to resolve the substitute in the top-level
+			// package instead of the nested package, which lets the top-level
+			// package control the version of the substitution. It's also critical
+			// when using Yarn PnP because Yarn PnP doesn't allow nested packages
+			// to "reach outside" of their normal dependency lists.
+			sourceDir = r.fs.Cwd()
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("  Changed resolve directory to %q", sourceDir))
+			}
+		} else if r.debugLogs != nil {
+			r.debugLogs.addNote("  Failed to find any package alias matches")
+		}
+	}
+
+	// Certain types of URLs default to being external for convenience
+	if isExplicitlyExternal := r.isExternal(r.options.ExternalSettings.PreResolve, importPath, kind); isExplicitlyExternal ||
+
+		// "fill: url(#filter);"
+		(kind == ast.ImportURL && strings.HasPrefix(importPath, "#")) ||
+
+		// "background: url(http://example.com/images/image.png);"
+		strings.HasPrefix(importPath, "http://") ||
+
+		// "background: url(https://example.com/images/image.png);"
+		strings.HasPrefix(importPath, "https://") ||
+
+		// "background: url(//example.com/images/image.png);"
+		strings.HasPrefix(importPath, "//") {
+
+		if r.debugLogs != nil {
+			if isExplicitlyExternal {
+				r.debugLogs.addNote(fmt.Sprintf("The path %q was marked as external by the user", importPath))
+			} else {
+				r.debugLogs.addNote("Marking this path as implicitly external")
+			}
+		}
+
+		r.flushDebugLogs(flushDueToSuccess)
+		return &ResolveResult{
+			PathPair: PathPair{Primary: logger.Path{Text: importPath}, IsExternal: true},
+		}, debugMeta
+	}
+
+	if pathPair, ok, sideEffects := r.checkForBuiltInNodeModules(importPath); ok {
+		r.flushDebugLogs(flushDueToSuccess)
+		return &ResolveResult{
+			PathPair:               pathPair,
+			PrimarySideEffectsData: sideEffects,
+		}, debugMeta
+	}
+
+	if parsed, ok := ParseDataURL(importPath); ok {
+		// "import 'data:text/javascript,console.log(123)';"
+		// "@import 'data:text/css,body{background:white}';"
+		if parsed.DecodeMIMEType() != MIMETypeUnsupported {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote("Putting this path in the \"dataurl\" namespace")
+			}
+			r.flushDebugLogs(flushDueToSuccess)
+			return &ResolveResult{
+				PathPair: PathPair{Primary: logger.Path{Text: importPath, Namespace: "dataurl"}},
+			}, debugMeta
+		}
+
+		// "background: url(data:image/png;base64,iVBORw0KGgo=);"
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Marking this data URL as external")
+		}
+		r.flushDebugLogs(flushDueToSuccess)
+		return &ResolveResult{
+			PathPair: PathPair{Primary: logger.Path{Text: importPath}, IsExternal: true},
+		}, debugMeta
+	}
+
+	// Fail now if there is no directory to resolve in. This can happen for
+	// virtual modules (e.g. stdin) if a resolve directory is not specified.
+	if sourceDir == "" {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Cannot resolve this path without a directory")
+		}
+		r.flushDebugLogs(flushDueToFailure)
+		return nil, debugMeta
+	}
+
+	// Glob imports only work in a multi-path context
+	if strings.ContainsRune(importPath, '*') {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Cannot resolve a path containing a wildcard character in a single-path context")
+		}
+		r.flushDebugLogs(flushDueToFailure)
+		return nil, debugMeta
+	}
+
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+
+	// Check for the Yarn PnP manifest if it hasn't already been checked for
+	if !r.pnpManifestWasChecked {
+		r.pnpManifestWasChecked = true
+
+		// Use the current working directory to find the Yarn PnP manifest. We
+		// can't necessarily use the entry point locations because the entry
+		// point locations aren't necessarily file paths. For example, they could
+		// be HTTP URLs that will be handled by a plugin.
+		for dirInfo := r.dirInfoCached(r.fs.Cwd()); dirInfo != nil; dirInfo = dirInfo.parent {
+			if absPath := dirInfo.pnpManifestAbsPath; absPath != "" {
+				if strings.HasSuffix(absPath, ".json") {
+					if json, source := r.extractYarnPnPDataFromJSON(absPath, pnpReportErrorsAboutMissingFiles); json.Data != nil {
+						r.pnpManifest = compileYarnPnPData(absPath, r.fs.Dir(absPath), json, source)
+					}
+				} else {
+					if json, source := r.tryToExtractYarnPnPDataFromJS(absPath, pnpReportErrorsAboutMissingFiles); json.Data != nil {
+						r.pnpManifest = compileYarnPnPData(absPath, r.fs.Dir(absPath), json, source)
+					}
+				}
+				if r.debugLogs != nil && r.pnpManifest != nil && r.pnpManifest.invalidIgnorePatternData != "" {
+					r.debugLogs.addNote("  Invalid Go regular expression for \"ignorePatternData\": " + r.pnpManifest.invalidIgnorePatternData)
+				}
+				break
+			}
+		}
+	}
+
+	sourceDirInfo := r.dirInfoCached(sourceDir)
+	if sourceDirInfo == nil {
+		// Bail if the directory is missing for some reason
+		return nil, debugMeta
+	}
+
+	result := r.resolveWithoutSymlinks(sourceDir, sourceDirInfo, importPath)
+	if result == nil {
+		// If resolution failed, try again with the URL query and/or hash removed
+		suffix := strings.IndexAny(importPath, "?#")
+		if suffix < 1 {
+			r.flushDebugLogs(flushDueToFailure)
+			return nil, debugMeta
+		}
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Retrying resolution after removing the suffix %q", importPath[suffix:]))
+		}
+		if result2 := r.resolveWithoutSymlinks(sourceDir, sourceDirInfo, importPath[:suffix]); result2 == nil {
+			r.flushDebugLogs(flushDueToFailure)
+			return nil, debugMeta
+		} else {
+			result = result2
+			result.PathPair.Primary.IgnoredSuffix = importPath[suffix:]
+			if result.PathPair.HasSecondary() {
+				result.PathPair.Secondary.IgnoredSuffix = importPath[suffix:]
+			}
+		}
+	}
+
+	// If successful, resolve symlinks using the directory info cache
+	r.finalizeResolve(result)
+	r.flushDebugLogs(flushDueToSuccess)
+	return result, debugMeta
+}
+
+// This returns nil on failure and non-nil on success. Note that this may
+// return an empty array to indicate a successful search that returned zero
+// results.
+func (res *Resolver) ResolveGlob(sourceDir string, importPathPattern []helpers.GlobPart, kind ast.ImportKind, prettyPattern string) (map[string]ResolveResult, *logger.Msg) {
+	var debugMeta DebugMeta
+	r := resolverQuery{
+		Resolver:  res,
+		debugMeta: &debugMeta,
+		kind:      kind,
+	}
+
+	if r.log.Level <= logger.LevelDebug {
+		r.debugLogs = &debugLogs{what: fmt.Sprintf(
+			"Resolving glob import %s in directory %q of type %q",
+			prettyPattern, sourceDir, kind.StringForMetafile())}
+	}
+
+	if len(importPathPattern) == 0 {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Ignoring empty glob pattern")
+		}
+		r.flushDebugLogs(flushDueToFailure)
+		return nil, nil
+	}
+	firstPrefix := importPathPattern[0].Prefix
+
+	// Glob patterns only work for relative URLs
+	if !strings.HasPrefix(firstPrefix, "./") && !strings.HasPrefix(firstPrefix, "../") &&
+		!strings.HasPrefix(firstPrefix, ".\\") && !strings.HasPrefix(firstPrefix, "..\\") {
+		if kind == ast.ImportEntryPoint {
+			// Be permissive about forgetting "./" for entry points since it's common
+			// to omit "./" on the command line. But don't accidentally treat absolute
+			// paths as relative (even on Windows).
+			if !r.fs.IsAbs(firstPrefix) {
+				firstPrefix = "./" + firstPrefix
+			}
+		} else {
+			// Don't allow omitting "./" for other imports since node doesn't let you do this either
+			if r.debugLogs != nil {
+				r.debugLogs.addNote("Ignoring glob import that doesn't start with \"./\" or \"../\"")
+			}
+			r.flushDebugLogs(flushDueToFailure)
+			return nil, nil
+		}
+	}
+
+	// Handle leading directories in the pattern (including "../")
+	dirPrefix := 0
+	for {
+		slash := strings.IndexAny(firstPrefix[dirPrefix:], "/\\")
+		if slash == -1 {
+			break
+		}
+		if star := strings.IndexByte(firstPrefix[dirPrefix:], '*'); star != -1 && slash > star {
+			break
+		}
+		dirPrefix += slash + 1
+	}
+
+	// If the pattern is an absolute path, then just replace source directory.
+	// Otherwise join the source directory with the prefix from the pattern.
+	if suffix := firstPrefix[:dirPrefix]; r.fs.IsAbs(suffix) {
+		sourceDir = suffix
+	} else {
+		sourceDir = r.fs.Join(sourceDir, suffix)
+	}
+
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+
+	// Look up the directory to start from
+	sourceDirInfo := r.dirInfoCached(sourceDir)
+	if sourceDirInfo == nil {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Failed to find the directory %q", sourceDir))
+		}
+		r.flushDebugLogs(flushDueToFailure)
+		return nil, nil
+	}
+
+	// Turn the glob pattern into a regular expression
+	canMatchOnSlash := false
+	wasGlobStar := false
+	sb := strings.Builder{}
+	sb.WriteByte('^')
+	for i, part := range importPathPattern {
+		prefix := part.Prefix
+		if i == 0 {
+			prefix = firstPrefix
+		}
+		if wasGlobStar && len(prefix) > 0 && (prefix[0] == '/' || prefix[0] == '\\') {
+			prefix = prefix[1:] // Move over the "/" after a globstar
+		}
+		sb.WriteString(regexp.QuoteMeta(prefix))
+		switch part.Wildcard {
+		case helpers.GlobAllIncludingSlash:
+			// It's a globstar, so match zero or more path segments
+			sb.WriteString("(?:[^/]*(?:/|$))*")
+			canMatchOnSlash = true
+			wasGlobStar = true
+		case helpers.GlobAllExceptSlash:
+			// It's not a globstar, so only match one path segment
+			sb.WriteString("[^/]*")
+			wasGlobStar = false
+		}
+	}
+	sb.WriteByte('$')
+	re := regexp.MustCompile(sb.String())
+
+	// Initialize "results" to a non-nil value to indicate that the glob is valid
+	results := make(map[string]ResolveResult)
+
+	var visit func(dirInfo *dirInfo, dir string)
+	visit = func(dirInfo *dirInfo, dir string) {
+		for _, key := range dirInfo.entries.SortedKeys() {
+			entry, _ := dirInfo.entries.Get(key)
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Considering entry %q", r.fs.Join(dirInfo.absPath, key)))
+				r.debugLogs.increaseIndent()
+			}
+
+			switch entry.Kind(r.fs) {
+			case fs.DirEntry:
+				// To avoid infinite loops, don't follow any symlinks
+				if canMatchOnSlash && entry.Symlink(r.fs) == "" {
+					if childDirInfo := r.dirInfoCached(r.fs.Join(dirInfo.absPath, key)); childDirInfo != nil {
+						visit(childDirInfo, fmt.Sprintf("%s%s/", dir, key))
+					}
+				}
+
+			case fs.FileEntry:
+				if relPath := dir + key; re.MatchString(relPath) {
+					var result ResolveResult
+
+					if r.isExternal(r.options.ExternalSettings.PreResolve, relPath, kind) {
+						result.PathPair = PathPair{Primary: logger.Path{Text: relPath}, IsExternal: true}
+
+						if r.debugLogs != nil {
+							r.debugLogs.addNote(fmt.Sprintf("The path %q was marked as external by the user", result.PathPair.Primary.Text))
+						}
+					} else {
+						absPath := r.fs.Join(dirInfo.absPath, key)
+						result.PathPair = PathPair{Primary: logger.Path{Text: absPath, Namespace: "file"}}
+					}
+
+					r.finalizeResolve(&result)
+					results[relPath] = result
+				}
+			}
+
+			if r.debugLogs != nil {
+				r.debugLogs.decreaseIndent()
+			}
+		}
+	}
+
+	visit(sourceDirInfo, firstPrefix[:dirPrefix])
+
+	var warning *logger.Msg
+	if len(results) == 0 {
+		warning = &logger.Msg{
+			ID:   logger.MsgID_Bundler_EmptyGlob,
+			Kind: logger.Warning,
+			Data: logger.MsgData{Text: fmt.Sprintf("The glob pattern %s did not match any files", prettyPattern)},
+		}
+	}
+
+	r.flushDebugLogs(flushDueToSuccess)
+	return results, warning
+}
+
+func (r resolverQuery) isExternal(matchers config.ExternalMatchers, path string, kind ast.ImportKind) bool {
+	if kind == ast.ImportEntryPoint {
+		// Never mark an entry point as external. This is not useful.
+		return false
+	}
+	if _, ok := matchers.Exact[path]; ok {
+		return true
+	}
+	for _, pattern := range matchers.Patterns {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Checking %q against the external pattern %q", path, pattern.Prefix+"*"+pattern.Suffix))
+		}
+		if len(path) >= len(pattern.Prefix)+len(pattern.Suffix) &&
+			strings.HasPrefix(path, pattern.Prefix) &&
+			strings.HasSuffix(path, pattern.Suffix) {
+			return true
+		}
+	}
+	return false
+}
+
+// This tries to run "Resolve" on a package path as a relative path. If
+// successful, the user just forgot a leading "./" in front of the path.
+func (res *Resolver) ProbeResolvePackageAsRelative(sourceDir string, importPath string, kind ast.ImportKind) (*ResolveResult, DebugMeta) {
+	var debugMeta DebugMeta
+	r := resolverQuery{
+		Resolver:  res,
+		debugMeta: &debugMeta,
+		kind:      kind,
+	}
+	absPath := r.fs.Join(sourceDir, importPath)
+
+	r.mutex.Lock()
+	defer r.mutex.Unlock()
+
+	if pair, ok, diffCase := r.loadAsFileOrDirectory(absPath); ok {
+		result := &ResolveResult{PathPair: pair, DifferentCase: diffCase}
+		r.finalizeResolve(result)
+		r.flushDebugLogs(flushDueToSuccess)
+		return result, debugMeta
+	}
+
+	return nil, debugMeta
+}
+
+type debugLogs struct {
+	what   string
+	indent string
+	notes  []logger.MsgData
+}
+
+func (d *debugLogs) addNote(text string) {
+	if d.indent != "" {
+		text = d.indent + text
+	}
+	d.notes = append(d.notes, logger.MsgData{Text: text, DisableMaximumWidth: true})
+}
+
+func (d *debugLogs) increaseIndent() {
+	d.indent += "  "
+}
+
+func (d *debugLogs) decreaseIndent() {
+	d.indent = d.indent[2:]
+}
+
+type flushMode uint8
+
+const (
+	flushDueToFailure flushMode = iota
+	flushDueToSuccess
+)
+
+func (r resolverQuery) flushDebugLogs(mode flushMode) {
+	if r.debugLogs != nil {
+		if mode == flushDueToFailure {
+			r.log.AddIDWithNotes(logger.MsgID_None, logger.Debug, nil, logger.Range{}, r.debugLogs.what, r.debugLogs.notes)
+		} else if r.log.Level <= logger.LevelVerbose {
+			r.log.AddIDWithNotes(logger.MsgID_None, logger.Verbose, nil, logger.Range{}, r.debugLogs.what, r.debugLogs.notes)
+		}
+	}
+}
+
+func (r resolverQuery) finalizeResolve(result *ResolveResult) {
+	if !result.PathPair.IsExternal && r.isExternal(r.options.ExternalSettings.PostResolve, result.PathPair.Primary.Text, r.kind) {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("The path %q was marked as external by the user", result.PathPair.Primary.Text))
+		}
+		result.PathPair.IsExternal = true
+	} else {
+		for i, path := range result.PathPair.iter() {
+			if path.Namespace != "file" {
+				continue
+			}
+			dirInfo := r.dirInfoCached(r.fs.Dir(path.Text))
+			if dirInfo == nil {
+				continue
+			}
+			base := r.fs.Base(path.Text)
+
+			// If the path contains symlinks, rewrite the path to the real path
+			if !r.options.PreserveSymlinks {
+				if entry, _ := dirInfo.entries.Get(base); entry != nil {
+					symlink := entry.Symlink(r.fs)
+					if symlink != "" {
+						// This means the entry itself is a symlink
+					} else if dirInfo.absRealPath != "" {
+						// There is at least one parent directory with a symlink
+						symlink = r.fs.Join(dirInfo.absRealPath, base)
+					}
+					if symlink != "" {
+						if r.debugLogs != nil {
+							r.debugLogs.addNote(fmt.Sprintf("Resolved symlink %q to %q", path.Text, symlink))
+						}
+						path.Text = symlink
+
+						// Look up the directory over again if it was changed
+						dirInfo = r.dirInfoCached(r.fs.Dir(path.Text))
+						if dirInfo == nil {
+							continue
+						}
+						base = r.fs.Base(path.Text)
+					}
+				}
+			}
+
+			// Path attributes are only taken from the primary path
+			if i > 0 {
+				continue
+			}
+
+			// Path attributes are not taken from disabled files
+			if path.IsDisabled() {
+				continue
+			}
+
+			// Look up this file in the "sideEffects" map in the nearest enclosing
+			// directory with a "package.json" file.
+			//
+			// Only do this for the primary path. Some packages have the primary
+			// path marked as having side effects and the secondary path marked
+			// as not having side effects. This is likely a bug in the package
+			// definition but we don't want to consider the primary path as not
+			// having side effects just because the secondary path is marked as
+			// not having side effects.
+			if pkgJSON := dirInfo.enclosingPackageJSON; pkgJSON != nil {
+				if pkgJSON.sideEffectsMap != nil {
+					hasSideEffects := false
+					pathLookup := strings.ReplaceAll(path.Text, "\\", "/") // Avoid problems with Windows-style slashes
+					if pkgJSON.sideEffectsMap[pathLookup] {
+						// Fast path: map lookup
+						hasSideEffects = true
+					} else {
+						// Slow path: glob tests
+						for _, re := range pkgJSON.sideEffectsRegexps {
+							if re.MatchString(pathLookup) {
+								hasSideEffects = true
+								break
+							}
+						}
+					}
+					if !hasSideEffects {
+						if r.debugLogs != nil {
+							r.debugLogs.addNote(fmt.Sprintf("Marking this file as having no side effects due to %q",
+								pkgJSON.source.KeyPath.Text))
+						}
+						result.PrimarySideEffectsData = pkgJSON.sideEffectsData
+					}
+				}
+
+				// Also copy over the "type" field
+				result.ModuleTypeData = pkgJSON.moduleTypeData
+			}
+
+			// Copy various fields from the nearest enclosing "tsconfig.json" file if present
+			if tsConfigJSON := r.tsConfigForDir(dirInfo); tsConfigJSON != nil {
+				result.TSConfig = &tsConfigJSON.Settings
+				result.TSConfigJSX = tsConfigJSON.JSXSettings
+				result.TSAlwaysStrict = tsConfigJSON.TSAlwaysStrictOrStrict()
+
+				if r.debugLogs != nil {
+					r.debugLogs.addNote(fmt.Sprintf("This import is under the effect of %q",
+						tsConfigJSON.AbsPath))
+					if result.TSConfigJSX.JSXFactory != nil {
+						r.debugLogs.addNote(fmt.Sprintf("\"jsxFactory\" is %q due to %q",
+							strings.Join(result.TSConfigJSX.JSXFactory, "."),
+							tsConfigJSON.AbsPath))
+					}
+					if result.TSConfigJSX.JSXFragmentFactory != nil {
+						r.debugLogs.addNote(fmt.Sprintf("\"jsxFragment\" is %q due to %q",
+							strings.Join(result.TSConfigJSX.JSXFragmentFactory, "."),
+							tsConfigJSON.AbsPath))
+					}
+				}
+			}
+		}
+	}
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Primary path is %q in namespace %q", result.PathPair.Primary.Text, result.PathPair.Primary.Namespace))
+		if result.PathPair.HasSecondary() {
+			r.debugLogs.addNote(fmt.Sprintf("Secondary path is %q in namespace %q", result.PathPair.Secondary.Text, result.PathPair.Secondary.Namespace))
+		}
+	}
+}
+
+func (r resolverQuery) resolveWithoutSymlinks(sourceDir string, sourceDirInfo *dirInfo, importPath string) *ResolveResult {
+	// This implements the module resolution algorithm from node.js, which is
+	// described here: https://nodejs.org/api/modules.html#modules_all_together
+	var result ResolveResult
+
+	// Return early if this is already an absolute path. In addition to asking
+	// the file system whether this is an absolute path, we also explicitly check
+	// whether it starts with a "/" and consider that an absolute path too. This
+	// is because relative paths can technically start with a "/" on Windows
+	// because it's not an absolute path on Windows. Then people might write code
+	// with imports that start with a "/" that works fine on Windows only to
+	// experience unexpected build failures later on other operating systems.
+	// Treating these paths as absolute paths on all platforms means Windows
+	// users will not be able to accidentally make use of these paths.
+	if strings.HasPrefix(importPath, "/") || r.fs.IsAbs(importPath) {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("The import %q is being treated as an absolute path", importPath))
+		}
+
+		// First, check path overrides from the nearest enclosing TypeScript "tsconfig.json" file
+		if tsConfigJSON := r.tsConfigForDir(sourceDirInfo); tsConfigJSON != nil && tsConfigJSON.Paths != nil {
+			if absolute, ok, diffCase := r.matchTSConfigPaths(tsConfigJSON, importPath); ok {
+				return &ResolveResult{PathPair: absolute, DifferentCase: diffCase}
+			}
+		}
+
+		// Run node's resolution rules (e.g. adding ".js")
+		if absolute, ok, diffCase := r.loadAsFileOrDirectory(importPath); ok {
+			return &ResolveResult{PathPair: absolute, DifferentCase: diffCase}
+		} else {
+			return nil
+		}
+	}
+
+	// Check both relative and package paths for CSS URL tokens, with relative
+	// paths taking precedence over package paths to match Webpack behavior.
+	isPackagePath := IsPackagePath(importPath)
+	checkRelative := !isPackagePath || r.kind.IsFromCSS()
+	checkPackage := isPackagePath
+
+	if checkRelative {
+		absPath := r.fs.Join(sourceDir, importPath)
+
+		// Check for external packages first
+		if r.isExternal(r.options.ExternalSettings.PostResolve, absPath, r.kind) {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The path %q was marked as external by the user", absPath))
+			}
+			return &ResolveResult{PathPair: PathPair{Primary: logger.Path{Text: absPath, Namespace: "file"}, IsExternal: true}}
+		}
+
+		// Check the "browser" map
+		if importDirInfo := r.dirInfoCached(r.fs.Dir(absPath)); importDirInfo != nil {
+			if remapped, ok := r.checkBrowserMap(importDirInfo, absPath, absolutePathKind); ok {
+				if remapped == nil {
+					return &ResolveResult{PathPair: PathPair{Primary: logger.Path{Text: absPath, Namespace: "file", Flags: logger.PathDisabled}}}
+				}
+				if remappedResult, ok, diffCase, sideEffects := r.resolveWithoutRemapping(importDirInfo.enclosingBrowserScope, *remapped); ok {
+					result = ResolveResult{PathPair: remappedResult, DifferentCase: diffCase, PrimarySideEffectsData: sideEffects}
+					checkRelative = false
+					checkPackage = false
+				}
+			}
+		}
+
+		if checkRelative {
+			if absolute, ok, diffCase := r.loadAsFileOrDirectory(absPath); ok {
+				checkPackage = false
+				result = ResolveResult{PathPair: absolute, DifferentCase: diffCase}
+			} else if !checkPackage {
+				return nil
+			}
+		}
+	}
+
+	if checkPackage {
+		// Support remapping one package path to another via the "browser" field
+		if remapped, ok := r.checkBrowserMap(sourceDirInfo, importPath, packagePathKind); ok {
+			if remapped == nil {
+				// "browser": {"module": false}
+				if absolute, ok, diffCase, sideEffects := r.loadNodeModules(importPath, sourceDirInfo, false /* forbidImports */); ok {
+					absolute.Primary = logger.Path{Text: absolute.Primary.Text, Namespace: "file", Flags: logger.PathDisabled}
+					if absolute.HasSecondary() {
+						absolute.Secondary = logger.Path{Text: absolute.Secondary.Text, Namespace: "file", Flags: logger.PathDisabled}
+					}
+					return &ResolveResult{PathPair: absolute, DifferentCase: diffCase, PrimarySideEffectsData: sideEffects}
+				} else {
+					return &ResolveResult{PathPair: PathPair{Primary: logger.Path{Text: importPath, Flags: logger.PathDisabled}}, DifferentCase: diffCase}
+				}
+			}
+
+			// "browser": {"module": "./some-file"}
+			// "browser": {"module": "another-module"}
+			importPath = *remapped
+			sourceDirInfo = sourceDirInfo.enclosingBrowserScope
+		}
+
+		if absolute, ok, diffCase, sideEffects := r.resolveWithoutRemapping(sourceDirInfo, importPath); ok {
+			result = ResolveResult{PathPair: absolute, DifferentCase: diffCase, PrimarySideEffectsData: sideEffects}
+		} else {
+			// Note: node's "self references" are not currently supported
+			return nil
+		}
+	}
+
+	return &result
+}
+
+func (r resolverQuery) resolveWithoutRemapping(sourceDirInfo *dirInfo, importPath string) (PathPair, bool, *fs.DifferentCase, *SideEffectsData) {
+	if IsPackagePath(importPath) {
+		return r.loadNodeModules(importPath, sourceDirInfo, false /* forbidImports */)
+	} else {
+		absolute, ok, diffCase := r.loadAsFileOrDirectory(r.fs.Join(sourceDirInfo.absPath, importPath))
+		return absolute, ok, diffCase, nil
+	}
+}
+
+func PrettyPath(fs fs.FS, path logger.Path) string {
+	if path.Namespace == "file" {
+		if rel, ok := fs.Rel(fs.Cwd(), path.Text); ok {
+			path.Text = rel
+		}
+
+		// These human-readable paths are used in error messages, comments in output
+		// files, source names in source maps, and paths in the metadata JSON file.
+		// These should be platform-independent so our output doesn't depend on which
+		// operating system it was run. Replace Windows backward slashes with standard
+		// forward slashes.
+		path.Text = strings.ReplaceAll(path.Text, "\\", "/")
+	} else if path.Namespace != "" {
+		path.Text = fmt.Sprintf("%s:%s", path.Namespace, path.Text)
+	}
+
+	if path.IsDisabled() {
+		path.Text = "(disabled):" + path.Text
+	}
+
+	return path.Text + path.IgnoredSuffix
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+type dirInfo struct {
+	// These objects are immutable, so we can just point to the parent directory
+	// and avoid having to lock the cache again
+	parent *dirInfo
+
+	// A pointer to the enclosing dirInfo with a valid "browser" field in
+	// package.json. We need this to remap paths after they have been resolved.
+	enclosingBrowserScope *dirInfo
+
+	// All relevant information about this directory
+	absPath               string
+	pnpManifestAbsPath    string
+	entries               fs.DirEntries
+	packageJSON           *packageJSON  // Is there a "package.json" file in this directory?
+	enclosingPackageJSON  *packageJSON  // Is there a "package.json" file in this directory or a parent directory?
+	enclosingTSConfigJSON *TSConfigJSON // Is there a "tsconfig.json" file in this directory or a parent directory?
+	absRealPath           string        // If non-empty, this is the real absolute path resolving any symlinks
+	isNodeModules         bool          // Is the base name "node_modules"?
+	hasNodeModules        bool          // Is there a "node_modules" subdirectory?
+	isInsideNodeModules   bool          // Is this within a  "node_modules" subtree?
+}
+
+func (r resolverQuery) tsConfigForDir(dirInfo *dirInfo) *TSConfigJSON {
+	if dirInfo.isInsideNodeModules {
+		return nil
+	}
+	if r.tsConfigOverride != nil {
+		return r.tsConfigOverride
+	}
+	if dirInfo != nil {
+		return dirInfo.enclosingTSConfigJSON
+	}
+	return nil
+}
+
+func (r resolverQuery) dirInfoCached(path string) *dirInfo {
+	// First, check the cache
+	cached, ok := r.dirCache[path]
+
+	// Cache hit: stop now
+	if !ok {
+		// Update the cache to indicate failure. Even if the read failed, we don't
+		// want to retry again later. The directory is inaccessible so trying again
+		// is wasted. Doing this before calling "dirInfoUncached" prevents stack
+		// overflow in case this directory is recursively encountered again.
+		r.dirCache[path] = nil
+
+		// Cache miss: read the info
+		cached = r.dirInfoUncached(path)
+
+		// Only update the cache again on success
+		if cached != nil {
+			r.dirCache[path] = cached
+		}
+	}
+
+	if r.debugLogs != nil {
+		if cached == nil {
+			r.debugLogs.addNote(fmt.Sprintf("Failed to read directory %q", path))
+		} else {
+			count := cached.entries.PeekEntryCount()
+			entries := "entries"
+			if count == 1 {
+				entries = "entry"
+			}
+			r.debugLogs.addNote(fmt.Sprintf("Read %d %s for directory %q", count, entries, path))
+		}
+	}
+
+	return cached
+}
+
+var errParseErrorImportCycle = errors.New("(import cycle)")
+var errParseErrorAlreadyLogged = errors.New("(error already logged)")
+
+// This may return "parseErrorAlreadyLogged" in which case there was a syntax
+// error, but it's already been reported. No further errors should be logged.
+//
+// Nested calls may also return "parseErrorImportCycle". In that case the
+// caller is responsible for logging an appropriate error message.
+func (r resolverQuery) parseTSConfig(file string, visited map[string]bool, configDir string) (*TSConfigJSON, error) {
+	// Resolve any symlinks first before parsing the file
+	if !r.options.PreserveSymlinks {
+		if real, ok := r.fs.EvalSymlinks(file); ok {
+			file = real
+		}
+	}
+
+	// Don't infinite loop if a series of "extends" links forms a cycle
+	if visited[file] {
+		return nil, errParseErrorImportCycle
+	}
+
+	contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, file)
+	if r.debugLogs != nil && originalError != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", file, originalError.Error()))
+	}
+	if err != nil {
+		return nil, err
+	}
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("The file %q exists", file))
+	}
+
+	keyPath := logger.Path{Text: file, Namespace: "file"}
+	source := logger.Source{
+		KeyPath:    keyPath,
+		PrettyPath: PrettyPath(r.fs, keyPath),
+		Contents:   contents,
+	}
+	if visited != nil {
+		// This is only non-nil for "build" API calls. This is nil for "transform"
+		// API calls, which tells us to not process "extends" fields.
+		visited[file] = true
+	}
+	result, err := r.parseTSConfigFromSource(source, visited, configDir)
+	if visited != nil {
+		// Reset this to back false in case something uses TypeScript 5.0's multiple
+		// inheritance feature for "tsconfig.json" files. It should be valid to visit
+		// the same base "tsconfig.json" file multiple times from different multiple
+		// inheritance subtrees.
+		visited[file] = false
+	}
+	return result, err
+}
+
+func (r resolverQuery) parseTSConfigFromSource(source logger.Source, visited map[string]bool, configDir string) (*TSConfigJSON, error) {
+	tracker := logger.MakeLineColumnTracker(&source)
+	fileDir := r.fs.Dir(source.KeyPath.Text)
+	isExtends := len(visited) > 1
+
+	result := ParseTSConfigJSON(r.log, source, &r.caches.JSONCache, r.fs, fileDir, configDir, func(extends string, extendsRange logger.Range) *TSConfigJSON {
+		if visited == nil {
+			// If this is nil, then we're in a "transform" API call. In that case we
+			// deliberately skip processing "extends" fields. This is because the
+			// "transform" API is supposed to be without a file system.
+			return nil
+		}
+
+		// Note: This doesn't use the normal node module resolution algorithm
+		// both because it's different (e.g. we don't want to match a directory)
+		// and because it would deadlock since we're currently in the middle of
+		// populating the directory info cache.
+
+		maybeFinishOurSearch := func(base *TSConfigJSON, err error, extendsFile string) (*TSConfigJSON, bool) {
+			if err == nil {
+				return base, true
+			}
+
+			if err == syscall.ENOENT {
+				// Return false to indicate that we should continue searching
+				return nil, false
+			}
+
+			if err == errParseErrorImportCycle {
+				r.log.AddID(logger.MsgID_TSConfigJSON_Cycle, logger.Warning, &tracker, extendsRange,
+					fmt.Sprintf("Base config file %q forms cycle", extends))
+			} else if err != errParseErrorAlreadyLogged {
+				r.log.AddError(&tracker, extendsRange,
+					fmt.Sprintf("Cannot read file %q: %s",
+						PrettyPath(r.fs, logger.Path{Text: extendsFile, Namespace: "file"}), err.Error()))
+			}
+			return nil, true
+		}
+
+		// Check for a Yarn PnP manifest and use that to rewrite the path
+		if IsPackagePath(extends) {
+			pnpData := r.pnpManifest
+
+			// If we haven't loaded the Yarn PnP manifest yet, try to find one
+			if pnpData == nil {
+				current := fileDir
+				for {
+					if _, _, ok := fs.ParseYarnPnPVirtualPath(current); !ok {
+						absPath := r.fs.Join(current, ".pnp.data.json")
+						if json, source := r.extractYarnPnPDataFromJSON(absPath, pnpIgnoreErrorsAboutMissingFiles); json.Data != nil {
+							pnpData = compileYarnPnPData(absPath, current, json, source)
+							break
+						}
+
+						absPath = r.fs.Join(current, ".pnp.cjs")
+						if json, source := r.tryToExtractYarnPnPDataFromJS(absPath, pnpIgnoreErrorsAboutMissingFiles); json.Data != nil {
+							pnpData = compileYarnPnPData(absPath, current, json, source)
+							break
+						}
+
+						absPath = r.fs.Join(current, ".pnp.js")
+						if json, source := r.tryToExtractYarnPnPDataFromJS(absPath, pnpIgnoreErrorsAboutMissingFiles); json.Data != nil {
+							pnpData = compileYarnPnPData(absPath, current, json, source)
+							break
+						}
+					}
+
+					// Go to the parent directory, stopping at the file system root
+					next := r.fs.Dir(current)
+					if current == next {
+						break
+					}
+					current = next
+				}
+			}
+
+			if pnpData != nil {
+				if result := r.resolveToUnqualified(extends, fileDir, pnpData); result.status == pnpErrorGeneric {
+					if r.debugLogs != nil {
+						r.debugLogs.addNote("The Yarn PnP path resolution algorithm returned an error")
+					}
+					goto pnpError
+				} else if result.status == pnpSuccess {
+					// If Yarn PnP path resolution succeeded, run a custom abbreviated
+					// version of node's module resolution algorithm. The Yarn PnP
+					// specification says to use node's module resolution algorithm verbatim
+					// but that isn't what Yarn actually does. See this for more info:
+					// https://github.com/evanw/esbuild/issues/2473#issuecomment-1216774461
+					if entries, _, dirErr := r.fs.ReadDirectory(result.pkgDirPath); dirErr == nil {
+						if entry, _ := entries.Get("package.json"); entry != nil && entry.Kind(r.fs) == fs.FileEntry {
+							// Check the "exports" map
+							if packageJSON := r.parsePackageJSON(result.pkgDirPath); packageJSON != nil && packageJSON.exportsMap != nil {
+								if absolute, ok, _ := r.esmResolveAlgorithm(finalizeImportsExportsYarnPnPTSConfigExtends,
+									result.pkgIdent, "."+result.pkgSubpath, packageJSON, result.pkgDirPath, source.KeyPath.Text); ok {
+									base, err := r.parseTSConfig(absolute.Primary.Text, visited, configDir)
+									if result, shouldReturn := maybeFinishOurSearch(base, err, absolute.Primary.Text); shouldReturn {
+										return result
+									}
+								}
+								goto pnpError
+							}
+						}
+					}
+
+					// Continue with the module resolution algorithm from node.js
+					extends = r.fs.Join(result.pkgDirPath, result.pkgSubpath)
+				}
+			}
+		}
+
+		if IsPackagePath(extends) && !r.fs.IsAbs(extends) {
+			esmPackageName, esmPackageSubpath, esmOK := esmParsePackageName(extends)
+			if r.debugLogs != nil && esmOK {
+				r.debugLogs.addNote(fmt.Sprintf("Parsed tsconfig package name %q and package subpath %q", esmPackageName, esmPackageSubpath))
+			}
+
+			// If this is still a package path, try to resolve it to a "node_modules" directory
+			current := fileDir
+			for {
+				// Skip "node_modules" folders
+				if r.fs.Base(current) != "node_modules" {
+					join := r.fs.Join(current, "node_modules", extends)
+
+					// Check to see if "package.json" exists
+					pkgDir := r.fs.Join(current, "node_modules", esmPackageName)
+					pjFile := r.fs.Join(pkgDir, "package.json")
+					if _, err, originalError := r.fs.ReadFile(pjFile); err == nil {
+						if packageJSON := r.parsePackageJSON(pkgDir); packageJSON != nil {
+							// Try checking the "tsconfig" field of "package.json". The ability to use "extends" like this was added in TypeScript 3.2:
+							// https://www.typescriptlang.org/docs/handbook/release-notes/typescript-3-2.html#tsconfigjson-inheritance-via-nodejs-packages
+							if packageJSON.tsconfig != "" {
+								join = packageJSON.tsconfig
+								if !r.fs.IsAbs(join) {
+									join = r.fs.Join(pkgDir, join)
+								}
+							}
+
+							// Try checking the "exports" map. The ability to use "extends" like this was added in TypeScript 5.0:
+							// https://devblogs.microsoft.com/typescript/announcing-typescript-5-0/
+							if packageJSON.exportsMap != nil {
+								if r.debugLogs != nil {
+									r.debugLogs.addNote(fmt.Sprintf("Looking for %q in \"exports\" map in %q", esmPackageSubpath, packageJSON.source.KeyPath.Text))
+									r.debugLogs.increaseIndent()
+									defer r.debugLogs.decreaseIndent()
+								}
+
+								// Note: TypeScript appears to always treat this as a "require" import
+								conditions := r.esmConditionsRequire
+								resolvedPath, status, debug := r.esmPackageExportsResolve("/", esmPackageSubpath, packageJSON.exportsMap.root, conditions)
+								resolvedPath, status, debug = r.esmHandlePostConditions(resolvedPath, status, debug)
+
+								// This is a very abbreviated version of our ESM resolution
+								if status == pjStatusExact || status == pjStatusExactEndsWithStar {
+									fileToCheck := r.fs.Join(pkgDir, resolvedPath)
+									base, err := r.parseTSConfig(fileToCheck, visited, configDir)
+
+									if result, shouldReturn := maybeFinishOurSearch(base, err, fileToCheck); shouldReturn {
+										return result
+									}
+								}
+							}
+						}
+					} else if r.debugLogs != nil && originalError != nil {
+						r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pjFile, originalError.Error()))
+					}
+
+					filesToCheck := []string{r.fs.Join(join, "tsconfig.json"), join, join + ".json"}
+					for _, fileToCheck := range filesToCheck {
+						base, err := r.parseTSConfig(fileToCheck, visited, configDir)
+
+						// Explicitly ignore matches if they are directories instead of files
+						if err != nil && err != syscall.ENOENT {
+							if entries, _, dirErr := r.fs.ReadDirectory(r.fs.Dir(fileToCheck)); dirErr == nil {
+								if entry, _ := entries.Get(r.fs.Base(fileToCheck)); entry != nil && entry.Kind(r.fs) == fs.DirEntry {
+									continue
+								}
+							}
+						}
+
+						if result, shouldReturn := maybeFinishOurSearch(base, err, fileToCheck); shouldReturn {
+							return result
+						}
+					}
+				}
+
+				// Go to the parent directory, stopping at the file system root
+				next := r.fs.Dir(current)
+				if current == next {
+					break
+				}
+				current = next
+			}
+		} else {
+			extendsFile := extends
+
+			// The TypeScript compiler has a strange behavior that seems like a bug
+			// where "." and ".." behave differently than other forms such as "./."
+			// or "../." and are interpreted as having an implicit "tsconfig.json"
+			// suffix.
+			//
+			// I believe their bug is caused by some parts of their code checking for
+			// relative paths using the literal "./" and "../" prefixes (requiring
+			// the slash) and other parts checking using the regular expression
+			// /^\.\.?($|[\\/])/ (with the slash optional).
+			//
+			// In any case, people are now relying on this behavior. One example is
+			// this: https://github.com/esbuild-kit/tsx/pull/158. So we replicate this
+			// bug in esbuild as well.
+			if extendsFile == "." || extendsFile == ".." {
+				extendsFile += "/tsconfig.json"
+			}
+
+			// If this is a regular path, search relative to the enclosing directory
+			if !r.fs.IsAbs(extendsFile) {
+				extendsFile = r.fs.Join(fileDir, extendsFile)
+			}
+			base, err := r.parseTSConfig(extendsFile, visited, configDir)
+
+			// TypeScript's handling of "extends" has some specific edge cases. We
+			// must only try adding ".json" if it's not already present, which is
+			// unlike how node path resolution works. We also need to explicitly
+			// ignore matches if they are directories instead of files. Some users
+			// name directories the same name as their config files.
+			if err != nil && !strings.HasSuffix(extendsFile, ".json") {
+				if entries, _, dirErr := r.fs.ReadDirectory(r.fs.Dir(extendsFile)); dirErr == nil {
+					extendsBase := r.fs.Base(extendsFile)
+					if entry, _ := entries.Get(extendsBase); entry == nil || entry.Kind(r.fs) != fs.FileEntry {
+						if entry, _ := entries.Get(extendsBase + ".json"); entry != nil && entry.Kind(r.fs) == fs.FileEntry {
+							base, err = r.parseTSConfig(extendsFile+".json", visited, configDir)
+						}
+					}
+				}
+			}
+
+			if result, shouldReturn := maybeFinishOurSearch(base, err, extendsFile); shouldReturn {
+				return result
+			}
+		}
+
+		// Suppress warnings about missing base config files inside "node_modules"
+	pnpError:
+		if !helpers.IsInsideNodeModules(source.KeyPath.Text) {
+			var notes []logger.MsgData
+			if r.debugLogs != nil {
+				notes = r.debugLogs.notes
+			}
+			r.log.AddIDWithNotes(logger.MsgID_TSConfigJSON_Missing, logger.Warning, &tracker, extendsRange,
+				fmt.Sprintf("Cannot find base config file %q", extends), notes)
+		}
+
+		return nil
+	})
+
+	if result == nil {
+		return nil, errParseErrorAlreadyLogged
+	}
+
+	// Now that we have parsed the entire "tsconfig.json" file, filter out any
+	// paths that are invalid due to being a package-style path without a base
+	// URL specified. This must be done here instead of when we're parsing the
+	// original file because TypeScript allows one "tsconfig.json" file to
+	// specify "baseUrl" and inherit a "paths" from another file via "extends".
+	if !isExtends && result.Paths != nil && result.BaseURL == nil {
+		var tracker *logger.LineColumnTracker
+		for key, paths := range result.Paths.Map {
+			end := 0
+			for _, path := range paths {
+				if isValidTSConfigPathNoBaseURLPattern(path.Text, r.log, &result.Paths.Source, &tracker, path.Loc) {
+					paths[end] = path
+					end++
+				}
+			}
+			if end < len(paths) {
+				result.Paths.Map[key] = paths[:end]
+			}
+		}
+	}
+
+	return result, nil
+}
+
+func (r resolverQuery) dirInfoUncached(path string) *dirInfo {
+	// Get the info for the parent directory
+	var parentInfo *dirInfo
+	parentDir := r.fs.Dir(path)
+	if parentDir != path {
+		parentInfo = r.dirInfoCached(parentDir)
+
+		// Stop now if the parent directory doesn't exist
+		if parentInfo == nil {
+			return nil
+		}
+	}
+
+	// List the directories
+	entries, err, originalError := r.fs.ReadDirectory(path)
+	if err == syscall.EACCES || err == syscall.EPERM {
+		// Just pretend this directory is empty if we can't access it. This is the
+		// case on Unix for directories that only have the execute permission bit
+		// set. It means we will just pass through the empty directory and
+		// continue to check the directories above it, which is now node behaves.
+		entries = fs.MakeEmptyDirEntries(path)
+		err = nil
+	}
+	if r.debugLogs != nil && originalError != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Failed to read directory %q: %s", path, originalError.Error()))
+	}
+	if err != nil {
+		// Ignore "ENOTDIR" here so that calling "ReadDirectory" on a file behaves
+		// as if there is nothing there at all instead of causing an error due to
+		// the directory actually being a file. This is a workaround for situations
+		// where people try to import from a path containing a file as a parent
+		// directory. The "pnpm" package manager generates a faulty "NODE_PATH"
+		// list which contains such paths and treating them as missing means we just
+		// ignore them during path resolution.
+		if err != syscall.ENOENT && err != syscall.ENOTDIR {
+			r.log.AddError(nil, logger.Range{},
+				fmt.Sprintf("Cannot read directory %q: %s",
+					PrettyPath(r.fs, logger.Path{Text: path, Namespace: "file"}), err.Error()))
+		}
+		return nil
+	}
+	info := &dirInfo{
+		absPath: path,
+		parent:  parentInfo,
+		entries: entries,
+	}
+
+	// A "node_modules" directory isn't allowed to directly contain another "node_modules" directory
+	base := r.fs.Base(path)
+	if base == "node_modules" {
+		info.isNodeModules = true
+		info.isInsideNodeModules = true
+	} else if entry, _ := entries.Get("node_modules"); entry != nil {
+		info.hasNodeModules = entry.Kind(r.fs) == fs.DirEntry
+	}
+
+	// Propagate the browser scope into child directories
+	if parentInfo != nil {
+		info.enclosingPackageJSON = parentInfo.enclosingPackageJSON
+		info.enclosingBrowserScope = parentInfo.enclosingBrowserScope
+		info.enclosingTSConfigJSON = parentInfo.enclosingTSConfigJSON
+		if parentInfo.isInsideNodeModules {
+			info.isInsideNodeModules = true
+		}
+
+		// Make sure "absRealPath" is the real path of the directory (resolving any symlinks)
+		if !r.options.PreserveSymlinks {
+			if entry, _ := parentInfo.entries.Get(base); entry != nil {
+				if symlink := entry.Symlink(r.fs); symlink != "" {
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("Resolved symlink %q to %q", path, symlink))
+					}
+					info.absRealPath = symlink
+				} else if parentInfo.absRealPath != "" {
+					symlink := r.fs.Join(parentInfo.absRealPath, base)
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("Resolved symlink %q to %q", path, symlink))
+					}
+					info.absRealPath = symlink
+				}
+			}
+		}
+	}
+
+	// Record if this directory has a package.json file
+	if entry, _ := entries.Get("package.json"); entry != nil && entry.Kind(r.fs) == fs.FileEntry {
+		info.packageJSON = r.parsePackageJSON(path)
+
+		// Propagate this "package.json" file into child directories
+		if info.packageJSON != nil {
+			info.enclosingPackageJSON = info.packageJSON
+			if info.packageJSON.browserMap != nil {
+				info.enclosingBrowserScope = info
+			}
+		}
+	}
+
+	// Record if this directory has a tsconfig.json or jsconfig.json file
+	if r.tsConfigOverride == nil {
+		var tsConfigPath string
+		if entry, _ := entries.Get("tsconfig.json"); entry != nil && entry.Kind(r.fs) == fs.FileEntry {
+			tsConfigPath = r.fs.Join(path, "tsconfig.json")
+		} else if entry, _ := entries.Get("jsconfig.json"); entry != nil && entry.Kind(r.fs) == fs.FileEntry {
+			tsConfigPath = r.fs.Join(path, "jsconfig.json")
+		}
+
+		// Except don't do this if we're inside a "node_modules" directory. Package
+		// authors often publish their "tsconfig.json" files to npm because of
+		// npm's default-include publishing model and because these authors
+		// probably don't know about ".npmignore" files.
+		//
+		// People trying to use these packages with esbuild have historically
+		// complained that esbuild is respecting "tsconfig.json" in these cases.
+		// The assumption is that the package author published these files by
+		// accident.
+		//
+		// Ignoring "tsconfig.json" files inside "node_modules" directories breaks
+		// the use case of publishing TypeScript code and having it be transpiled
+		// for you, but that's the uncommon case and likely doesn't work with
+		// many other tools anyway. So now these files are ignored.
+		if tsConfigPath != "" && !info.isInsideNodeModules {
+			var err error
+			info.enclosingTSConfigJSON, err = r.parseTSConfig(tsConfigPath, make(map[string]bool), r.fs.Dir(tsConfigPath))
+			if err != nil {
+				if err == syscall.ENOENT {
+					r.log.AddError(nil, logger.Range{}, fmt.Sprintf("Cannot find tsconfig file %q",
+						PrettyPath(r.fs, logger.Path{Text: tsConfigPath, Namespace: "file"})))
+				} else if err != errParseErrorAlreadyLogged {
+					r.log.AddID(logger.MsgID_TSConfigJSON_Missing, logger.Debug, nil, logger.Range{},
+						fmt.Sprintf("Cannot read file %q: %s",
+							PrettyPath(r.fs, logger.Path{Text: tsConfigPath, Namespace: "file"}), err.Error()))
+				}
+			}
+		}
+	}
+
+	// Record if this directory has a Yarn PnP manifest. This must not be done
+	// for Yarn virtual paths because that will result in duplicate copies of
+	// the same manifest which will result in multiple copies of the same virtual
+	// directory in the same path, which we don't handle (and which also doesn't
+	// match Yarn's behavior).
+	//
+	// For example, imagine a project with a manifest here:
+	//
+	//   /project/.pnp.cjs
+	//
+	// and a source file with an import of "bar" here:
+	//
+	//   /project/.yarn/__virtual__/pkg/1/foo.js
+	//
+	// If we didn't ignore Yarn PnP manifests in virtual folders, then we would
+	// pick up on the one here:
+	//
+	//   /project/.yarn/__virtual__/pkg/1/.pnp.cjs
+	//
+	// which means we would potentially resolve the import to something like this:
+	//
+	//   /project/.yarn/__virtual__/pkg/1/.yarn/__virtual__/pkg/1/bar
+	//
+	if r.pnpManifest == nil {
+		if _, _, ok := fs.ParseYarnPnPVirtualPath(path); !ok {
+			if pnp, _ := entries.Get(".pnp.data.json"); pnp != nil && pnp.Kind(r.fs) == fs.FileEntry {
+				info.pnpManifestAbsPath = r.fs.Join(path, ".pnp.data.json")
+			} else if pnp, _ := entries.Get(".pnp.cjs"); pnp != nil && pnp.Kind(r.fs) == fs.FileEntry {
+				info.pnpManifestAbsPath = r.fs.Join(path, ".pnp.cjs")
+			} else if pnp, _ := entries.Get(".pnp.js"); pnp != nil && pnp.Kind(r.fs) == fs.FileEntry {
+				info.pnpManifestAbsPath = r.fs.Join(path, ".pnp.js")
+			}
+		}
+	}
+
+	return info
+}
+
+// TypeScript-specific behavior: if the extension is ".js" or ".jsx", try
+// replacing it with ".ts" or ".tsx". At the time of writing this specific
+// behavior comes from the function "loadModuleFromFile()" in the file
+// "moduleNameResolver.ts" in the TypeScript compiler source code. It
+// contains this comment:
+//
+//	If that didn't work, try stripping a ".js" or ".jsx" extension and
+//	replacing it with a TypeScript one; e.g. "./foo.js" can be matched
+//	by "./foo.ts" or "./foo.d.ts"
+//
+// We don't care about ".d.ts" files because we can't do anything with
+// those, so we ignore that part of the behavior.
+//
+// See the discussion here for more historical context:
+// https://github.com/microsoft/TypeScript/issues/4595
+var rewrittenFileExtensions = map[string][]string{
+	// Note that the official compiler code always tries ".ts" before
+	// ".tsx" even if the original extension was ".jsx".
+	".js":  {".ts", ".tsx"},
+	".jsx": {".ts", ".tsx"},
+	".mjs": {".mts"},
+	".cjs": {".cts"},
+}
+
+func (r resolverQuery) loadAsFile(path string, extensionOrder []string) (string, bool, *fs.DifferentCase) {
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Attempting to load %q as a file", path))
+		r.debugLogs.increaseIndent()
+		defer r.debugLogs.decreaseIndent()
+	}
+
+	// Read the directory entries once to minimize locking
+	dirPath := r.fs.Dir(path)
+	entries, err, originalError := r.fs.ReadDirectory(dirPath)
+	if r.debugLogs != nil && originalError != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Failed to read directory %q: %s", dirPath, originalError.Error()))
+	}
+	if err != nil {
+		if err != syscall.ENOENT {
+			r.log.AddError(nil, logger.Range{},
+				fmt.Sprintf("Cannot read directory %q: %s",
+					PrettyPath(r.fs, logger.Path{Text: dirPath, Namespace: "file"}), err.Error()))
+		}
+		return "", false, nil
+	}
+
+	tryFile := func(base string) (string, bool, *fs.DifferentCase) {
+		baseWithSuffix := base
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Checking for file %q", baseWithSuffix))
+		}
+		if entry, diffCase := entries.Get(baseWithSuffix); entry != nil && entry.Kind(r.fs) == fs.FileEntry {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Found file %q", baseWithSuffix))
+			}
+			return r.fs.Join(dirPath, baseWithSuffix), true, diffCase
+		}
+		return "", false, nil
+	}
+
+	base := r.fs.Base(path)
+
+	// Given "./x.js", node's algorithm tries things in the following order:
+	//
+	//   ./x.js
+	//   ./x.js.js
+	//   ./x.js.json
+	//   ./x.js.node
+	//   ./x.js/index.js
+	//   ./x.js/index.json
+	//   ./x.js/index.node
+	//
+	// Given "./x.js", TypeScript's algorithm tries things in the following order:
+	//
+	//   ./x.js.ts
+	//   ./x.js.tsx
+	//   ./x.js.d.ts
+	//   ./x.ts
+	//   ./x.tsx
+	//   ./x.d.ts
+	//   ./x.js/index.ts
+	//   ./x.js/index.tsx
+	//   ./x.js/index.d.ts
+	//   ./x.js.js
+	//   ./x.js.jsx
+	//   ./x.js
+	//   ./x.jsx
+	//   ./x.js/index.js
+	//   ./x.js/index.jsx
+	//
+	// Our order below is a blend of both. We try to follow node's algorithm but
+	// with the features of TypeScript's algorithm (omitting ".d.ts" files, which
+	// don't contain code). This means we should end up checking the same files
+	// as TypeScript, but in a different order.
+	//
+	// One reason we use a different order is because we support a customizable
+	// extension resolution order, which doesn't fit well into TypeScript's
+	// algorithm. For example, you can configure esbuild to check for extensions
+	// in the order ".js,.ts,.jsx,.tsx" but TypeScript always checks TypeScript
+	// extensions before JavaScript extensions, so we can't obey the user's
+	// intent if we follow TypeScript's algorithm exactly.
+	//
+	// Another reason we deviate from TypeScript's order is because our code is
+	// structured to handle node's algorithm and TypeScript's algorithm has a
+	// different structure. It intermixes multiple calls to LOAD_AS_FILE and
+	// LOAD_INDEX together while node always does one LOAD_AS_FILE before one
+	// LOAD_INDEX.
+
+	// Try the plain path without any extensions
+	if absolute, ok, diffCase := tryFile(base); ok {
+		return absolute, ok, diffCase
+	}
+
+	// Try the path with extensions
+	for _, ext := range extensionOrder {
+		if absolute, ok, diffCase := tryFile(base + ext); ok {
+			return absolute, ok, diffCase
+		}
+	}
+
+	// TypeScript-specific behavior: try rewriting ".js" to ".ts"
+	for old, exts := range rewrittenFileExtensions {
+		if !strings.HasSuffix(base, old) {
+			continue
+		}
+		lastDot := strings.LastIndexByte(base, '.')
+		for _, ext := range exts {
+			if absolute, ok, diffCase := tryFile(base[:lastDot] + ext); ok {
+				return absolute, ok, diffCase
+			}
+		}
+		break
+	}
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Failed to find file %q", base))
+	}
+	return "", false, nil
+}
+
+func (r resolverQuery) loadAsIndex(dirInfo *dirInfo, extensionOrder []string) (PathPair, bool, *fs.DifferentCase) {
+	// Try the "index" file with extensions
+	for _, ext := range extensionOrder {
+		base := "index" + ext
+		if entry, diffCase := dirInfo.entries.Get(base); entry != nil && entry.Kind(r.fs) == fs.FileEntry {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Found file %q", r.fs.Join(dirInfo.absPath, base)))
+			}
+			return PathPair{Primary: logger.Path{Text: r.fs.Join(dirInfo.absPath, base), Namespace: "file"}}, true, diffCase
+		}
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Failed to find file %q", r.fs.Join(dirInfo.absPath, base)))
+		}
+	}
+
+	return PathPair{}, false, nil
+}
+
+func (r resolverQuery) loadAsIndexWithBrowserRemapping(dirInfo *dirInfo, path string, extensionOrder []string) (PathPair, bool, *fs.DifferentCase) {
+	// Potentially remap using the "browser" field
+	absPath := r.fs.Join(path, "index")
+	if remapped, ok := r.checkBrowserMap(dirInfo, absPath, absolutePathKind); ok {
+		if remapped == nil {
+			return PathPair{Primary: logger.Path{Text: absPath, Namespace: "file", Flags: logger.PathDisabled}}, true, nil
+		}
+		remappedAbs := r.fs.Join(path, *remapped)
+
+		// Is this a file?
+		absolute, ok, diffCase := r.loadAsFile(remappedAbs, extensionOrder)
+		if ok {
+			return PathPair{Primary: logger.Path{Text: absolute, Namespace: "file"}}, true, diffCase
+		}
+
+		// Is it a directory with an index?
+		if fieldDirInfo := r.dirInfoCached(remappedAbs); fieldDirInfo != nil {
+			if absolute, ok, _ := r.loadAsIndex(fieldDirInfo, extensionOrder); ok {
+				return absolute, true, nil
+			}
+		}
+
+		return PathPair{}, false, nil
+	}
+
+	return r.loadAsIndex(dirInfo, extensionOrder)
+}
+
+func getProperty(json js_ast.Expr, name string) (js_ast.Expr, logger.Loc, bool) {
+	if obj, ok := json.Data.(*js_ast.EObject); ok {
+		for _, prop := range obj.Properties {
+			if key, ok := prop.Key.Data.(*js_ast.EString); ok && key.Value != nil && helpers.UTF16EqualsString(key.Value, name) {
+				return prop.ValueOrNil, prop.Key.Loc, true
+			}
+		}
+	}
+	return js_ast.Expr{}, logger.Loc{}, false
+}
+
+func getString(json js_ast.Expr) (string, bool) {
+	if value, ok := json.Data.(*js_ast.EString); ok {
+		return helpers.UTF16ToString(value.Value), true
+	}
+	return "", false
+}
+
+func getBool(json js_ast.Expr) (bool, bool) {
+	if value, ok := json.Data.(*js_ast.EBoolean); ok {
+		return value.Value, true
+	}
+	return false, false
+}
+
+func (r resolverQuery) loadAsFileOrDirectory(path string) (PathPair, bool, *fs.DifferentCase) {
+	extensionOrder := r.options.ExtensionOrder
+	if r.kind.MustResolveToCSS() {
+		// Use a special import order for CSS "@import" imports
+		extensionOrder = r.cssExtensionOrder
+	} else if helpers.IsInsideNodeModules(path) {
+		// Use a special import order for imports inside "node_modules"
+		extensionOrder = r.nodeModulesExtensionOrder
+	}
+
+	// Is this a file?
+	absolute, ok, diffCase := r.loadAsFile(path, extensionOrder)
+	if ok {
+		return PathPair{Primary: logger.Path{Text: absolute, Namespace: "file"}}, true, diffCase
+	}
+
+	// Is this a directory?
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Attempting to load %q as a directory", path))
+		r.debugLogs.increaseIndent()
+		defer r.debugLogs.decreaseIndent()
+	}
+	dirInfo := r.dirInfoCached(path)
+	if dirInfo == nil {
+		return PathPair{}, false, nil
+	}
+
+	// Try using the main field(s) from "package.json"
+	if absolute, ok, diffCase := r.loadAsMainField(dirInfo, path, extensionOrder); ok {
+		return absolute, true, diffCase
+	}
+
+	// Look for an "index" file with known extensions
+	if absolute, ok, diffCase := r.loadAsIndexWithBrowserRemapping(dirInfo, path, extensionOrder); ok {
+		return absolute, true, diffCase
+	}
+
+	return PathPair{}, false, nil
+}
+
+func (r resolverQuery) loadAsMainField(dirInfo *dirInfo, path string, extensionOrder []string) (PathPair, bool, *fs.DifferentCase) {
+	if dirInfo.packageJSON == nil {
+		return PathPair{}, false, nil
+	}
+
+	mainFieldValues := dirInfo.packageJSON.mainFields
+	mainFieldKeys := r.options.MainFields
+	autoMain := false
+
+	// If the user has not explicitly specified a "main" field order,
+	// use a default one determined by the current platform target
+	if mainFieldKeys == nil {
+		mainFieldKeys = defaultMainFields[r.options.Platform]
+		autoMain = true
+	}
+
+	loadMainField := func(fieldRelPath string, field string) (PathPair, bool, *fs.DifferentCase) {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Found main field %q with path %q", field, fieldRelPath))
+			r.debugLogs.increaseIndent()
+			defer r.debugLogs.decreaseIndent()
+		}
+
+		// Potentially remap using the "browser" field
+		fieldAbsPath := r.fs.Join(path, fieldRelPath)
+		if remapped, ok := r.checkBrowserMap(dirInfo, fieldAbsPath, absolutePathKind); ok {
+			if remapped == nil {
+				return PathPair{Primary: logger.Path{Text: fieldAbsPath, Namespace: "file", Flags: logger.PathDisabled}}, true, nil
+			}
+			fieldAbsPath = r.fs.Join(path, *remapped)
+		}
+
+		// Is this a file?
+		absolute, ok, diffCase := r.loadAsFile(fieldAbsPath, extensionOrder)
+		if ok {
+			return PathPair{Primary: logger.Path{Text: absolute, Namespace: "file"}}, true, diffCase
+		}
+
+		// Is it a directory with an index?
+		if fieldDirInfo := r.dirInfoCached(fieldAbsPath); fieldDirInfo != nil {
+			if absolute, ok, _ := r.loadAsIndexWithBrowserRemapping(fieldDirInfo, fieldAbsPath, extensionOrder); ok {
+				return absolute, true, nil
+			}
+		}
+
+		return PathPair{}, false, nil
+	}
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Searching for main fields in %q", dirInfo.packageJSON.source.KeyPath.Text))
+		r.debugLogs.increaseIndent()
+		defer r.debugLogs.decreaseIndent()
+	}
+
+	foundSomething := false
+
+	for _, key := range mainFieldKeys {
+		value, ok := mainFieldValues[key]
+		if !ok {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Did not find main field %q", key))
+			}
+			continue
+		}
+		foundSomething = true
+
+		absolute, ok, diffCase := loadMainField(value.relPath, key)
+		if !ok {
+			continue
+		}
+
+		// If the user did not manually configure a "main" field order, then
+		// use a special per-module automatic algorithm to decide whether to
+		// use "module" or "main" based on whether the package is imported
+		// using "import" or "require".
+		if autoMain && key == "module" {
+			var absoluteMain PathPair
+			var okMain bool
+			var diffCaseMain *fs.DifferentCase
+
+			if main, ok := mainFieldValues["main"]; ok {
+				if absolute, ok, diffCase := loadMainField(main.relPath, "main"); ok {
+					absoluteMain = absolute
+					okMain = true
+					diffCaseMain = diffCase
+				}
+			} else {
+				// Some packages have a "module" field without a "main" field but
+				// still have an implicit "index.js" file. In that case, treat that
+				// as the value for "main".
+				if absolute, ok, diffCase := r.loadAsIndexWithBrowserRemapping(dirInfo, path, extensionOrder); ok {
+					absoluteMain = absolute
+					okMain = true
+					diffCaseMain = diffCase
+				}
+			}
+
+			if okMain {
+				// If both the "main" and "module" fields exist, use "main" if the
+				// path is for "require" and "module" if the path is for "import".
+				// If we're using "module", return enough information to be able to
+				// fall back to "main" later if something ended up using "require()"
+				// with this same path. The goal of this code is to avoid having
+				// both the "module" file and the "main" file in the bundle at the
+				// same time.
+				if r.kind != ast.ImportRequire {
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("Resolved to %q using the \"module\" field in %q",
+							absolute.Primary.Text, dirInfo.packageJSON.source.KeyPath.Text))
+						r.debugLogs.addNote(fmt.Sprintf("The fallback path in case of \"require\" is %q",
+							absoluteMain.Primary.Text))
+					}
+					return PathPair{
+						// This is the whole point of the path pair
+						Primary:   absolute.Primary,
+						Secondary: absoluteMain.Primary,
+					}, true, diffCase
+				} else {
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("Resolved to %q because of \"require\"", absoluteMain.Primary.Text))
+					}
+					return absoluteMain, true, diffCaseMain
+				}
+			}
+		}
+
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Resolved to %q using the %q field in %q",
+				absolute.Primary.Text, key, dirInfo.packageJSON.source.KeyPath.Text))
+		}
+		return absolute, true, diffCase
+	}
+
+	// Let the user know if "main" exists but was skipped due to mis-configuration
+	if !foundSomething {
+		for _, field := range mainFieldsForFailure {
+			if main, ok := mainFieldValues[field]; ok {
+				tracker := logger.MakeLineColumnTracker(&dirInfo.packageJSON.source)
+				keyRange := dirInfo.packageJSON.source.RangeOfString(main.keyLoc)
+				if len(mainFieldKeys) == 0 && r.options.Platform == config.PlatformNeutral {
+					r.debugMeta.notes = append(r.debugMeta.notes, tracker.MsgData(keyRange,
+						fmt.Sprintf("The %q field here was ignored. Main fields must be configured explicitly when using the \"neutral\" platform.",
+							field)))
+				} else {
+					r.debugMeta.notes = append(r.debugMeta.notes, tracker.MsgData(keyRange,
+						fmt.Sprintf("The %q field here was ignored because the list of main fields to use is currently set to [%s].",
+							field, helpers.StringArrayToQuotedCommaSeparatedString(mainFieldKeys))))
+				}
+				break
+			}
+		}
+	}
+
+	return PathPair{}, false, nil
+}
+
+func hasCaseInsensitiveSuffix(s string, suffix string) bool {
+	return len(s) >= len(suffix) && strings.EqualFold(s[len(s)-len(suffix):], suffix)
+}
+
+// This closely follows the behavior of "tryLoadModuleUsingPaths()" in the
+// official TypeScript compiler
+func (r resolverQuery) matchTSConfigPaths(tsConfigJSON *TSConfigJSON, path string) (PathPair, bool, *fs.DifferentCase) {
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Matching %q against \"paths\" in %q", path, tsConfigJSON.AbsPath))
+		r.debugLogs.increaseIndent()
+		defer r.debugLogs.decreaseIndent()
+	}
+
+	absBaseURL := tsConfigJSON.BaseURLForPaths
+
+	// The explicit base URL should take precedence over the implicit base URL
+	// if present. This matters when a tsconfig.json file overrides "baseUrl"
+	// from another extended tsconfig.json file but doesn't override "paths".
+	if tsConfigJSON.BaseURL != nil {
+		absBaseURL = *tsConfigJSON.BaseURL
+	}
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Using %q as \"baseUrl\"", absBaseURL))
+	}
+
+	// Check for exact matches first
+	for key, originalPaths := range tsConfigJSON.Paths.Map {
+		if key == path {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Found an exact match for %q in \"paths\"", key))
+			}
+			for _, originalPath := range originalPaths {
+				// Ignore ".d.ts" files because this rule is obviously only here for type checking
+				if hasCaseInsensitiveSuffix(originalPath.Text, ".d.ts") {
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("Ignoring substitution %q because it ends in \".d.ts\"", originalPath.Text))
+					}
+					continue
+				}
+
+				// Load the original path relative to the "baseUrl" from tsconfig.json
+				absoluteOriginalPath := originalPath.Text
+				if !r.fs.IsAbs(absoluteOriginalPath) {
+					absoluteOriginalPath = r.fs.Join(absBaseURL, absoluteOriginalPath)
+				}
+				if absolute, ok, diffCase := r.loadAsFileOrDirectory(absoluteOriginalPath); ok {
+					return absolute, true, diffCase
+				}
+			}
+			return PathPair{}, false, nil
+		}
+	}
+
+	type match struct {
+		prefix        string
+		suffix        string
+		originalPaths []TSConfigPath
+	}
+
+	// Check for pattern matches next
+	longestMatchPrefixLength := -1
+	longestMatchSuffixLength := -1
+	var longestMatch match
+	for key, originalPaths := range tsConfigJSON.Paths.Map {
+		if starIndex := strings.IndexByte(key, '*'); starIndex != -1 {
+			prefix, suffix := key[:starIndex], key[starIndex+1:]
+
+			// Find the match with the longest prefix. If two matches have the same
+			// prefix length, pick the one with the longest suffix. This second edge
+			// case isn't handled by the TypeScript compiler, but we handle it
+			// because we want the output to always be deterministic and Go map
+			// iteration order is deliberately non-deterministic.
+			if strings.HasPrefix(path, prefix) && strings.HasSuffix(path, suffix) && (len(prefix) > longestMatchPrefixLength ||
+				(len(prefix) == longestMatchPrefixLength && len(suffix) > longestMatchSuffixLength)) {
+				longestMatchPrefixLength = len(prefix)
+				longestMatchSuffixLength = len(suffix)
+				longestMatch = match{
+					prefix:        prefix,
+					suffix:        suffix,
+					originalPaths: originalPaths,
+				}
+			}
+		}
+	}
+
+	// If there is at least one match, only consider the one with the longest
+	// prefix. This matches the behavior of the TypeScript compiler.
+	if longestMatchPrefixLength != -1 {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Found a fuzzy match for %q in \"paths\"", longestMatch.prefix+"*"+longestMatch.suffix))
+		}
+
+		for _, originalPath := range longestMatch.originalPaths {
+			// Swap out the "*" in the original path for whatever the "*" matched
+			matchedText := path[len(longestMatch.prefix) : len(path)-len(longestMatch.suffix)]
+			originalPath := strings.Replace(originalPath.Text, "*", matchedText, 1)
+
+			// Ignore ".d.ts" files because this rule is obviously only here for type checking
+			if hasCaseInsensitiveSuffix(originalPath, ".d.ts") {
+				if r.debugLogs != nil {
+					r.debugLogs.addNote(fmt.Sprintf("Ignoring substitution %q because it ends in \".d.ts\"", originalPath))
+				}
+				continue
+			}
+
+			// Load the original path relative to the "baseUrl" from tsconfig.json
+			absoluteOriginalPath := originalPath
+			if !r.fs.IsAbs(originalPath) {
+				absoluteOriginalPath = r.fs.Join(absBaseURL, originalPath)
+			}
+			if absolute, ok, diffCase := r.loadAsFileOrDirectory(absoluteOriginalPath); ok {
+				return absolute, true, diffCase
+			}
+		}
+	}
+
+	return PathPair{}, false, nil
+}
+
+func (r resolverQuery) loadPackageImports(importPath string, dirInfoPackageJSON *dirInfo) (PathPair, bool, *fs.DifferentCase, *SideEffectsData) {
+	packageJSON := dirInfoPackageJSON.packageJSON
+
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Looking for %q in \"imports\" map in %q", importPath, packageJSON.source.KeyPath.Text))
+		r.debugLogs.increaseIndent()
+		defer r.debugLogs.decreaseIndent()
+	}
+
+	// Filter out invalid module specifiers now where we have more information for
+	// a better error message instead of later when we're inside the algorithm
+	if importPath == "#" || strings.HasPrefix(importPath, "#/") {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("The path %q must not equal \"#\" and must not start with \"#/\".", importPath))
+		}
+		tracker := logger.MakeLineColumnTracker(&packageJSON.source)
+		r.debugMeta.notes = append(r.debugMeta.notes, tracker.MsgData(packageJSON.importsMap.root.firstToken,
+			fmt.Sprintf("This \"imports\" map was ignored because the module specifier %q is invalid:", importPath)))
+		return PathPair{}, false, nil, nil
+	}
+
+	// The condition set is determined by the kind of import
+	conditions := r.esmConditionsDefault
+	switch r.kind {
+	case ast.ImportStmt, ast.ImportDynamic:
+		conditions = r.esmConditionsImport
+	case ast.ImportRequire, ast.ImportRequireResolve:
+		conditions = r.esmConditionsRequire
+	}
+
+	resolvedPath, status, debug := r.esmPackageImportsResolve(importPath, packageJSON.importsMap.root, conditions)
+	resolvedPath, status, debug = r.esmHandlePostConditions(resolvedPath, status, debug)
+
+	if status == pjStatusPackageResolve {
+		if pathPair, ok, sideEffects := r.checkForBuiltInNodeModules(resolvedPath); ok {
+			return pathPair, true, nil, sideEffects
+		}
+
+		// The import path was remapped via "imports" to another import path
+		// that now needs to be resolved too. Set "forbidImports" to true
+		// so we don't try to resolve "imports" again and end up in a loop.
+		absolute, ok, diffCase, sideEffects := r.loadNodeModules(resolvedPath, dirInfoPackageJSON, true /* forbidImports */)
+		if !ok {
+			tracker := logger.MakeLineColumnTracker(&packageJSON.source)
+			r.debugMeta.notes = append(
+				[]logger.MsgData{tracker.MsgData(debug.token,
+					fmt.Sprintf("The remapped path %q could not be resolved:", resolvedPath))},
+				r.debugMeta.notes...)
+		}
+		return absolute, ok, diffCase, sideEffects
+	}
+
+	absolute, ok, diffCase := r.finalizeImportsExportsResult(
+		finalizeImportsExportsNormal,
+		dirInfoPackageJSON.absPath, conditions, *packageJSON.importsMap, packageJSON,
+		resolvedPath, status, debug,
+		"", "", "",
+	)
+	return absolute, ok, diffCase, nil
+}
+
+func (r resolverQuery) esmResolveAlgorithm(
+	kind finalizeImportsExportsKind,
+	esmPackageName string,
+	esmPackageSubpath string,
+	packageJSON *packageJSON,
+	absPkgPath string,
+	absPath string,
+) (PathPair, bool, *fs.DifferentCase) {
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Looking for %q in \"exports\" map in %q", esmPackageSubpath, packageJSON.source.KeyPath.Text))
+		r.debugLogs.increaseIndent()
+		defer r.debugLogs.decreaseIndent()
+	}
+
+	// The condition set is determined by the kind of import
+	conditions := r.esmConditionsDefault
+	switch r.kind {
+	case ast.ImportStmt, ast.ImportDynamic:
+		conditions = r.esmConditionsImport
+	case ast.ImportRequire, ast.ImportRequireResolve:
+		conditions = r.esmConditionsRequire
+	case ast.ImportEntryPoint:
+		// Treat entry points as imports instead of requires for consistency with
+		// Webpack and Rollup. More information:
+		//
+		// * https://github.com/evanw/esbuild/issues/1956
+		// * https://github.com/nodejs/node/issues/41686
+		// * https://github.com/evanw/entry-point-resolve-test
+		//
+		conditions = r.esmConditionsImport
+	}
+
+	// Resolve against the path "/", then join it with the absolute
+	// directory path. This is done because ESM package resolution uses
+	// URLs while our path resolution uses file system paths. We don't
+	// want problems due to Windows paths, which are very unlike URL
+	// paths. We also want to avoid any "%" characters in the absolute
+	// directory path accidentally being interpreted as URL escapes.
+	resolvedPath, status, debug := r.esmPackageExportsResolve("/", esmPackageSubpath, packageJSON.exportsMap.root, conditions)
+	resolvedPath, status, debug = r.esmHandlePostConditions(resolvedPath, status, debug)
+
+	return r.finalizeImportsExportsResult(
+		kind,
+		absPkgPath, conditions, *packageJSON.exportsMap, packageJSON,
+		resolvedPath, status, debug,
+		esmPackageName, esmPackageSubpath, absPath,
+	)
+}
+
+func (r resolverQuery) loadNodeModules(importPath string, dirInfo *dirInfo, forbidImports bool) (PathPair, bool, *fs.DifferentCase, *SideEffectsData) {
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Searching for %q in \"node_modules\" directories starting from %q", importPath, dirInfo.absPath))
+		r.debugLogs.increaseIndent()
+		defer r.debugLogs.decreaseIndent()
+	}
+
+	// First, check path overrides from the nearest enclosing TypeScript "tsconfig.json" file
+	if tsConfigJSON := r.tsConfigForDir(dirInfo); tsConfigJSON != nil {
+		// Try path substitutions first
+		if tsConfigJSON.Paths != nil {
+			if absolute, ok, diffCase := r.matchTSConfigPaths(tsConfigJSON, importPath); ok {
+				return absolute, true, diffCase, nil
+			}
+		}
+
+		// Try looking up the path relative to the base URL
+		if tsConfigJSON.BaseURL != nil {
+			basePath := r.fs.Join(*tsConfigJSON.BaseURL, importPath)
+			if absolute, ok, diffCase := r.loadAsFileOrDirectory(basePath); ok {
+				return absolute, true, diffCase, nil
+			}
+		}
+	}
+
+	// Find the parent directory with the "package.json" file
+	dirInfoPackageJSON := dirInfo
+	for dirInfoPackageJSON != nil && dirInfoPackageJSON.packageJSON == nil {
+		dirInfoPackageJSON = dirInfoPackageJSON.parent
+	}
+
+	// Check for subpath imports: https://nodejs.org/api/packages.html#subpath-imports
+	if dirInfoPackageJSON != nil && strings.HasPrefix(importPath, "#") && !forbidImports && dirInfoPackageJSON.packageJSON.importsMap != nil {
+		return r.loadPackageImports(importPath, dirInfoPackageJSON)
+	}
+
+	// "import 'pkg'" when all packages are external (vs. "import './pkg'")
+	if r.options.ExternalPackages && IsPackagePath(importPath) {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Marking this path as external because it's a package path")
+		}
+		return PathPair{Primary: logger.Path{Text: importPath}, IsExternal: true}, true, nil, nil
+	}
+
+	// If Yarn PnP is active, use it to find the package
+	if r.pnpManifest != nil {
+		if result := r.resolveToUnqualified(importPath, dirInfo.absPath, r.pnpManifest); result.status.isError() {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote("The Yarn PnP path resolution algorithm returned an error")
+			}
+
+			// Try to provide more information about this error if it's available
+			switch result.status {
+			case pnpErrorDependencyNotFound:
+				r.debugMeta.notes = []logger.MsgData{r.pnpManifest.tracker.MsgData(result.errorRange,
+					fmt.Sprintf("The Yarn Plug'n'Play manifest forbids importing %q here because it's not listed as a dependency of this package:", result.errorIdent))}
+
+			case pnpErrorUnfulfilledPeerDependency:
+				r.debugMeta.notes = []logger.MsgData{r.pnpManifest.tracker.MsgData(result.errorRange,
+					fmt.Sprintf("The Yarn Plug'n'Play manifest says this package has a peer dependency on %q, but the package %q has not been installed:", result.errorIdent, result.errorIdent))}
+			}
+
+			return PathPair{}, false, nil, nil
+		} else if result.status == pnpSuccess {
+			absPath := r.fs.Join(result.pkgDirPath, result.pkgSubpath)
+
+			// If Yarn PnP path resolution succeeded, run a custom abbreviated
+			// version of node's module resolution algorithm. The Yarn PnP
+			// specification says to use node's module resolution algorithm verbatim
+			// but that isn't what Yarn actually does. See this for more info:
+			// https://github.com/evanw/esbuild/issues/2473#issuecomment-1216774461
+			if pkgDirInfo := r.dirInfoCached(result.pkgDirPath); pkgDirInfo != nil {
+				// Check the "exports" map
+				if packageJSON := pkgDirInfo.packageJSON; packageJSON != nil && packageJSON.exportsMap != nil {
+					absolute, ok, diffCase := r.esmResolveAlgorithm(finalizeImportsExportsNormal, result.pkgIdent, "."+result.pkgSubpath, packageJSON, pkgDirInfo.absPath, absPath)
+					return absolute, ok, diffCase, nil
+				}
+
+				// Check the "browser" map
+				if remapped, ok := r.checkBrowserMap(pkgDirInfo, absPath, absolutePathKind); ok {
+					if remapped == nil {
+						return PathPair{Primary: logger.Path{Text: absPath, Namespace: "file", Flags: logger.PathDisabled}}, true, nil, nil
+					}
+					if remappedResult, ok, diffCase, sideEffects := r.resolveWithoutRemapping(pkgDirInfo.enclosingBrowserScope, *remapped); ok {
+						return remappedResult, true, diffCase, sideEffects
+					}
+				}
+
+				if absolute, ok, diffCase := r.loadAsFileOrDirectory(absPath); ok {
+					return absolute, true, diffCase, nil
+				}
+			}
+
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("Failed to resolve %q to a file", absPath))
+			}
+			return PathPair{}, false, nil, nil
+		}
+	}
+
+	// Try to parse the package name using node's ESM-specific rules
+	esmPackageName, esmPackageSubpath, esmOK := esmParsePackageName(importPath)
+	if r.debugLogs != nil && esmOK {
+		r.debugLogs.addNote(fmt.Sprintf("Parsed package name %q and package subpath %q", esmPackageName, esmPackageSubpath))
+	}
+
+	// Check for self-references
+	if dirInfoPackageJSON != nil {
+		if packageJSON := dirInfoPackageJSON.packageJSON; packageJSON.name == esmPackageName && packageJSON.exportsMap != nil {
+			absolute, ok, diffCase := r.esmResolveAlgorithm(finalizeImportsExportsNormal, esmPackageName, esmPackageSubpath, packageJSON,
+				dirInfoPackageJSON.absPath, r.fs.Join(dirInfoPackageJSON.absPath, esmPackageSubpath))
+			return absolute, ok, diffCase, nil
+		}
+	}
+
+	// Common package resolution logic shared between "node_modules" and "NODE_PATHS"
+	tryToResolvePackage := func(absDir string) (PathPair, bool, *fs.DifferentCase, *SideEffectsData, bool) {
+		absPath := r.fs.Join(absDir, importPath)
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("Checking for a package in the directory %q", absPath))
+		}
+
+		// Try node's new package resolution rules
+		if esmOK {
+			absPkgPath := r.fs.Join(absDir, esmPackageName)
+			if pkgDirInfo := r.dirInfoCached(absPkgPath); pkgDirInfo != nil {
+				// Check the "exports" map
+				if packageJSON := pkgDirInfo.packageJSON; packageJSON != nil && packageJSON.exportsMap != nil {
+					absolute, ok, diffCase := r.esmResolveAlgorithm(finalizeImportsExportsNormal, esmPackageName, esmPackageSubpath, packageJSON, absPkgPath, absPath)
+					return absolute, ok, diffCase, nil, true
+				}
+
+				// Check the "browser" map
+				if remapped, ok := r.checkBrowserMap(pkgDirInfo, absPath, absolutePathKind); ok {
+					if remapped == nil {
+						return PathPair{Primary: logger.Path{Text: absPath, Namespace: "file", Flags: logger.PathDisabled}}, true, nil, nil, true
+					}
+					if remappedResult, ok, diffCase, sideEffects := r.resolveWithoutRemapping(pkgDirInfo.enclosingBrowserScope, *remapped); ok {
+						return remappedResult, true, diffCase, sideEffects, true
+					}
+				}
+			}
+		}
+
+		// Try node's old package resolution rules
+		if absolute, ok, diffCase := r.loadAsFileOrDirectory(absPath); ok {
+			return absolute, true, diffCase, nil, true
+		}
+
+		return PathPair{}, false, nil, nil, false
+	}
+
+	// Then check for the package in any enclosing "node_modules" directories
+	for {
+		// Skip directories that are themselves called "node_modules", since we
+		// don't ever want to search for "node_modules/node_modules"
+		if dirInfo.hasNodeModules {
+			if absolute, ok, diffCase, sideEffects, shouldStop := tryToResolvePackage(r.fs.Join(dirInfo.absPath, "node_modules")); shouldStop {
+				return absolute, ok, diffCase, sideEffects
+			}
+		}
+
+		// Go to the parent directory, stopping at the file system root
+		dirInfo = dirInfo.parent
+		if dirInfo == nil {
+			break
+		}
+	}
+
+	// Then check the global "NODE_PATH" environment variable. It has been
+	// clarified that this step comes last after searching for "node_modules"
+	// directories: https://github.com/nodejs/node/issues/38128.
+	for _, absDir := range r.options.AbsNodePaths {
+		if absolute, ok, diffCase, sideEffects, shouldStop := tryToResolvePackage(absDir); shouldStop {
+			return absolute, ok, diffCase, sideEffects
+		}
+	}
+
+	return PathPair{}, false, nil, nil
+}
+
+func (r resolverQuery) checkForBuiltInNodeModules(importPath string) (PathPair, bool, *SideEffectsData) {
+	// "import fs from 'fs'"
+	if r.options.Platform == config.PlatformNode && BuiltInNodeModules[importPath] {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Marking this path as implicitly external due to it being a node built-in")
+		}
+
+		r.flushDebugLogs(flushDueToSuccess)
+		return PathPair{Primary: logger.Path{Text: importPath}, IsExternal: true},
+			true,
+			&SideEffectsData{} // Mark this with "sideEffects: false"
+	}
+
+	// "import fs from 'node:fs'"
+	// "require('node:fs')"
+	if r.options.Platform == config.PlatformNode && strings.HasPrefix(importPath, "node:") {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote("Marking this path as implicitly external due to the \"node:\" prefix")
+		}
+
+		// If this is a known node built-in module, mark it with "sideEffects: false"
+		var sideEffects *SideEffectsData
+		if BuiltInNodeModules[strings.TrimPrefix(importPath, "node:")] {
+			sideEffects = &SideEffectsData{}
+		}
+
+		// Check whether the path will end up as "import" or "require"
+		convertImportToRequire := !r.options.OutputFormat.KeepESMImportExportSyntax()
+		isImport := !convertImportToRequire && (r.kind == ast.ImportStmt || r.kind == ast.ImportDynamic)
+		isRequire := r.kind == ast.ImportRequire || r.kind == ast.ImportRequireResolve ||
+			(convertImportToRequire && (r.kind == ast.ImportStmt || r.kind == ast.ImportDynamic))
+
+		// Check for support with "import"
+		if isImport && r.options.UnsupportedJSFeatures.Has(compat.NodeColonPrefixImport) {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote("Removing the \"node:\" prefix because the target environment doesn't support it with \"import\" statements")
+			}
+
+			// Automatically strip the prefix if it's not supported
+			importPath = importPath[5:]
+		}
+
+		// Check for support with "require"
+		if isRequire && r.options.UnsupportedJSFeatures.Has(compat.NodeColonPrefixRequire) {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote("Removing the \"node:\" prefix because the target environment doesn't support it with \"require\" calls")
+			}
+
+			// Automatically strip the prefix if it's not supported
+			importPath = importPath[5:]
+		}
+
+		r.flushDebugLogs(flushDueToSuccess)
+		return PathPair{Primary: logger.Path{Text: importPath}, IsExternal: true}, true, sideEffects
+	}
+
+	return PathPair{}, false, nil
+}
+
+type finalizeImportsExportsKind uint8
+
+const (
+	finalizeImportsExportsNormal finalizeImportsExportsKind = iota
+	finalizeImportsExportsYarnPnPTSConfigExtends
+)
+
+func (r resolverQuery) finalizeImportsExportsResult(
+	kind finalizeImportsExportsKind,
+	absDirPath string,
+	conditions map[string]bool,
+	importExportMap pjMap,
+	packageJSON *packageJSON,
+
+	// Resolution results
+	resolvedPath string,
+	status pjStatus,
+	debug pjDebug,
+
+	// Only for exports
+	esmPackageName string,
+	esmPackageSubpath string,
+	absImportPath string,
+) (PathPair, bool, *fs.DifferentCase) {
+	missingSuffix := ""
+
+	if (status == pjStatusExact || status == pjStatusExactEndsWithStar || status == pjStatusInexact) && strings.HasPrefix(resolvedPath, "/") {
+		absResolvedPath := r.fs.Join(absDirPath, resolvedPath)
+
+		switch status {
+		case pjStatusExact, pjStatusExactEndsWithStar:
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The resolved path %q is exact", absResolvedPath))
+			}
+
+			// Avoid calling "dirInfoCached" recursively for "tsconfig.json" extends with Yarn PnP
+			if kind == finalizeImportsExportsYarnPnPTSConfigExtends {
+				if r.debugLogs != nil {
+					r.debugLogs.addNote(fmt.Sprintf("Resolved to %q", absResolvedPath))
+				}
+				return PathPair{Primary: logger.Path{Text: absResolvedPath, Namespace: "file"}}, true, nil
+			}
+
+			resolvedDirInfo := r.dirInfoCached(r.fs.Dir(absResolvedPath))
+			base := r.fs.Base(absResolvedPath)
+			extensionOrder := r.options.ExtensionOrder
+			if r.kind.MustResolveToCSS() {
+				extensionOrder = r.cssExtensionOrder
+			}
+
+			if resolvedDirInfo == nil {
+				status = pjStatusModuleNotFound
+			} else {
+				entry, diffCase := resolvedDirInfo.entries.Get(base)
+
+				// TypeScript-specific behavior: try rewriting ".js" to ".ts"
+				if entry == nil {
+					for old, exts := range rewrittenFileExtensions {
+						if !strings.HasSuffix(base, old) {
+							continue
+						}
+						lastDot := strings.LastIndexByte(base, '.')
+						for _, ext := range exts {
+							baseWithExt := base[:lastDot] + ext
+							entry, diffCase = resolvedDirInfo.entries.Get(baseWithExt)
+							if entry != nil {
+								absResolvedPath = r.fs.Join(resolvedDirInfo.absPath, baseWithExt)
+								break
+							}
+						}
+						break
+					}
+				}
+
+				if entry == nil {
+					endsWithStar := status == pjStatusExactEndsWithStar
+					status = pjStatusModuleNotFound
+
+					// Try to have a friendly error message if people forget the extension
+					if endsWithStar {
+						for _, ext := range extensionOrder {
+							if entry, _ := resolvedDirInfo.entries.Get(base + ext); entry != nil {
+								if r.debugLogs != nil {
+									r.debugLogs.addNote(fmt.Sprintf("The import %q is missing the extension %q", path.Join(esmPackageName, esmPackageSubpath), ext))
+								}
+								status = pjStatusModuleNotFoundMissingExtension
+								missingSuffix = ext
+								break
+							}
+						}
+					}
+				} else if kind := entry.Kind(r.fs); kind == fs.DirEntry {
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("The path %q is a directory, which is not allowed", absResolvedPath))
+					}
+					endsWithStar := status == pjStatusExactEndsWithStar
+					status = pjStatusUnsupportedDirectoryImport
+
+					// Try to have a friendly error message if people forget the "/index.js" suffix
+					if endsWithStar {
+						if resolvedDirInfo := r.dirInfoCached(absResolvedPath); resolvedDirInfo != nil {
+							for _, ext := range extensionOrder {
+								base := "index" + ext
+								if entry, _ := resolvedDirInfo.entries.Get(base); entry != nil && entry.Kind(r.fs) == fs.FileEntry {
+									status = pjStatusUnsupportedDirectoryImportMissingIndex
+									missingSuffix = "/" + base
+									if r.debugLogs != nil {
+										r.debugLogs.addNote(fmt.Sprintf("The import %q is missing the suffix %q", path.Join(esmPackageName, esmPackageSubpath), missingSuffix))
+									}
+									break
+								}
+							}
+						}
+					}
+				} else if kind != fs.FileEntry {
+					status = pjStatusModuleNotFound
+				} else {
+					if r.debugLogs != nil {
+						r.debugLogs.addNote(fmt.Sprintf("Resolved to %q", absResolvedPath))
+					}
+					return PathPair{Primary: logger.Path{Text: absResolvedPath, Namespace: "file"}}, true, diffCase
+				}
+			}
+
+		case pjStatusInexact:
+			// If this was resolved against an expansion key ending in a "/"
+			// instead of a "*", we need to try CommonJS-style implicit
+			// extension and/or directory detection.
+			if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("The resolved path %q is inexact", absResolvedPath))
+			}
+			if absolute, ok, diffCase := r.loadAsFileOrDirectory(absResolvedPath); ok {
+				return absolute, true, diffCase
+			}
+			status = pjStatusModuleNotFound
+		}
+	}
+
+	if strings.HasPrefix(resolvedPath, "/") {
+		resolvedPath = "." + resolvedPath
+	}
+
+	// Provide additional details about the failure to help with debugging
+	tracker := logger.MakeLineColumnTracker(&packageJSON.source)
+	switch status {
+	case pjStatusInvalidModuleSpecifier:
+		r.debugMeta.notes = []logger.MsgData{tracker.MsgData(debug.token,
+			fmt.Sprintf("The module specifier %q is invalid%s:", resolvedPath, debug.invalidBecause))}
+
+	case pjStatusInvalidPackageConfiguration:
+		r.debugMeta.notes = []logger.MsgData{tracker.MsgData(debug.token,
+			"The package configuration has an invalid value here:")}
+
+	case pjStatusInvalidPackageTarget:
+		why := fmt.Sprintf("The package target %q is invalid%s:", resolvedPath, debug.invalidBecause)
+		if resolvedPath == "" {
+			// "PACKAGE_TARGET_RESOLVE" is specified to throw an "Invalid
+			// Package Target" error for what is actually an invalid package
+			// configuration error
+			why = "The package configuration has an invalid value here:"
+		}
+		r.debugMeta.notes = []logger.MsgData{tracker.MsgData(debug.token, why)}
+
+	case pjStatusPackagePathNotExported:
+		if debug.isBecauseOfNullLiteral {
+			r.debugMeta.notes = []logger.MsgData{tracker.MsgData(debug.token,
+				fmt.Sprintf("The path %q cannot be imported from package %q because it was explicitly disabled by the package author here:", esmPackageSubpath, esmPackageName))}
+			break
+		}
+
+		r.debugMeta.notes = []logger.MsgData{tracker.MsgData(debug.token,
+			fmt.Sprintf("The path %q is not exported by package %q:", esmPackageSubpath, esmPackageName))}
+
+		// If this fails, try to resolve it using the old algorithm
+		if absolute, ok, _ := r.loadAsFileOrDirectory(absImportPath); ok && absolute.Primary.Namespace == "file" {
+			if relPath, ok := r.fs.Rel(absDirPath, absolute.Primary.Text); ok {
+				query := "." + path.Join("/", strings.ReplaceAll(relPath, "\\", "/"))
+
+				// If that succeeds, try to do a reverse lookup using the
+				// "exports" map for the currently-active set of conditions
+				if ok, subpath, token := r.esmPackageExportsReverseResolve(
+					query, importExportMap.root, conditions); ok {
+					r.debugMeta.notes = append(r.debugMeta.notes, tracker.MsgData(token,
+						fmt.Sprintf("The file %q is exported at path %q:", query, subpath)))
+
+					// Provide an inline suggestion message with the correct import path
+					actualImportPath := path.Join(esmPackageName, subpath)
+					r.debugMeta.suggestionText = string(helpers.QuoteForJSON(actualImportPath, false))
+					r.debugMeta.suggestionMessage = fmt.Sprintf("Import from %q to get the file %q:",
+						actualImportPath, PrettyPath(r.fs, absolute.Primary))
+				}
+			}
+		}
+
+	case pjStatusPackageImportNotDefined:
+		r.debugMeta.notes = []logger.MsgData{tracker.MsgData(debug.token,
+			fmt.Sprintf("The package import %q is not defined in this \"imports\" map:", resolvedPath))}
+
+	case pjStatusModuleNotFound, pjStatusModuleNotFoundMissingExtension:
+		r.debugMeta.notes = []logger.MsgData{tracker.MsgData(debug.token,
+			fmt.Sprintf("The module %q was not found on the file system:", resolvedPath))}
+
+		// Provide an inline suggestion message with the correct import path
+		if status == pjStatusModuleNotFoundMissingExtension {
+			actualImportPath := path.Join(esmPackageName, esmPackageSubpath+missingSuffix)
+			r.debugMeta.suggestionRange = suggestionRangeEnd
+			r.debugMeta.suggestionText = missingSuffix
+			r.debugMeta.suggestionMessage = fmt.Sprintf("Import from %q to get the file %q:",
+				actualImportPath, PrettyPath(r.fs, logger.Path{Text: r.fs.Join(absDirPath, resolvedPath+missingSuffix), Namespace: "file"}))
+		}
+
+	case pjStatusUnsupportedDirectoryImport, pjStatusUnsupportedDirectoryImportMissingIndex:
+		r.debugMeta.notes = []logger.MsgData{
+			tracker.MsgData(debug.token, fmt.Sprintf("Importing the directory %q is forbidden by this package:", resolvedPath)),
+			tracker.MsgData(packageJSON.source.RangeOfString(importExportMap.propertyKeyLoc),
+				fmt.Sprintf("The presence of %q here makes importing a directory forbidden:", importExportMap.propertyKey)),
+		}
+
+		// Provide an inline suggestion message with the correct import path
+		if status == pjStatusUnsupportedDirectoryImportMissingIndex {
+			actualImportPath := path.Join(esmPackageName, esmPackageSubpath+missingSuffix)
+			r.debugMeta.suggestionRange = suggestionRangeEnd
+			r.debugMeta.suggestionText = missingSuffix
+			r.debugMeta.suggestionMessage = fmt.Sprintf("Import from %q to get the file %q:",
+				actualImportPath, PrettyPath(r.fs, logger.Path{Text: r.fs.Join(absDirPath, resolvedPath+missingSuffix), Namespace: "file"}))
+		}
+
+	case pjStatusUndefinedNoConditionsMatch:
+		keys := make([]string, 0, len(conditions))
+		for key := range conditions {
+			keys = append(keys, key)
+		}
+		sort.Strings(keys)
+
+		unmatchedConditions := make([]string, len(debug.unmatchedConditions))
+		for i, key := range debug.unmatchedConditions {
+			unmatchedConditions[i] = key.Text
+		}
+
+		r.debugMeta.notes = []logger.MsgData{
+			tracker.MsgData(importExportMap.root.firstToken,
+				fmt.Sprintf("The path %q is not currently exported by package %q:",
+					esmPackageSubpath, esmPackageName)),
+
+			tracker.MsgData(debug.token,
+				fmt.Sprintf("None of the conditions in the package definition (%s) match any of the currently active conditions (%s):",
+					helpers.StringArrayToQuotedCommaSeparatedString(unmatchedConditions),
+					helpers.StringArrayToQuotedCommaSeparatedString(keys),
+				)),
+		}
+
+		didSuggestEnablingCondition := false
+		for _, key := range debug.unmatchedConditions {
+			switch key.Text {
+			case "import":
+				if r.kind == ast.ImportRequire || r.kind == ast.ImportRequireResolve {
+					r.debugMeta.suggestionMessage = "Consider using an \"import\" statement to import this file, " +
+						"which will work because the \"import\" condition is supported by this package:"
+				}
+
+			case "require":
+				if r.kind == ast.ImportStmt || r.kind == ast.ImportDynamic {
+					r.debugMeta.suggestionMessage = "Consider using a \"require()\" call to import this file, " +
+						"which will work because the \"require\" condition is supported by this package:"
+				}
+
+			default:
+				// Note: Don't suggest the adding the "types" condition because
+				// TypeScript uses that for type definitions, which are not
+				// intended to be included in a bundle as executable code
+				if !didSuggestEnablingCondition && key.Text != "types" {
+					var how string
+					switch logger.API {
+					case logger.CLIAPI:
+						how = fmt.Sprintf("\"--conditions=%s\"", key.Text)
+					case logger.JSAPI:
+						how = fmt.Sprintf("\"conditions: ['%s']\"", key.Text)
+					case logger.GoAPI:
+						how = fmt.Sprintf("'Conditions: []string{%q}'", key.Text)
+					}
+					r.debugMeta.notes = append(r.debugMeta.notes, tracker.MsgData(key.Range,
+						fmt.Sprintf("Consider enabling the %q condition if this package expects it to be enabled. "+
+							"You can use %s to do that:", key.Text, how)))
+					didSuggestEnablingCondition = true
+				}
+			}
+		}
+	}
+
+	return PathPair{}, false, nil
+}
+
+// Package paths are loaded from a "node_modules" directory. Non-package paths
+// are relative or absolute paths.
+func IsPackagePath(path string) bool {
+	return !strings.HasPrefix(path, "/") && !strings.HasPrefix(path, "./") &&
+		!strings.HasPrefix(path, "../") && path != "." && path != ".."
+}
+
+// This list can be obtained with the following command:
+//
+//	node --experimental-wasi-unstable-preview1 -p "[...require('module').builtinModules].join('\n')"
+//
+// Be sure to use the *LATEST* version of node when updating this list!
+var BuiltInNodeModules = map[string]bool{
+	"_http_agent":         true,
+	"_http_client":        true,
+	"_http_common":        true,
+	"_http_incoming":      true,
+	"_http_outgoing":      true,
+	"_http_server":        true,
+	"_stream_duplex":      true,
+	"_stream_passthrough": true,
+	"_stream_readable":    true,
+	"_stream_transform":   true,
+	"_stream_wrap":        true,
+	"_stream_writable":    true,
+	"_tls_common":         true,
+	"_tls_wrap":           true,
+	"assert":              true,
+	"assert/strict":       true,
+	"async_hooks":         true,
+	"buffer":              true,
+	"child_process":       true,
+	"cluster":             true,
+	"console":             true,
+	"constants":           true,
+	"crypto":              true,
+	"dgram":               true,
+	"diagnostics_channel": true,
+	"dns":                 true,
+	"dns/promises":        true,
+	"domain":              true,
+	"events":              true,
+	"fs":                  true,
+	"fs/promises":         true,
+	"http":                true,
+	"http2":               true,
+	"https":               true,
+	"inspector":           true,
+	"module":              true,
+	"net":                 true,
+	"os":                  true,
+	"path":                true,
+	"path/posix":          true,
+	"path/win32":          true,
+	"perf_hooks":          true,
+	"process":             true,
+	"punycode":            true,
+	"querystring":         true,
+	"readline":            true,
+	"repl":                true,
+	"stream":              true,
+	"stream/consumers":    true,
+	"stream/promises":     true,
+	"stream/web":          true,
+	"string_decoder":      true,
+	"sys":                 true,
+	"timers":              true,
+	"timers/promises":     true,
+	"tls":                 true,
+	"trace_events":        true,
+	"tty":                 true,
+	"url":                 true,
+	"util":                true,
+	"util/types":          true,
+	"v8":                  true,
+	"vm":                  true,
+	"wasi":                true,
+	"worker_threads":      true,
+	"zlib":                true,
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/resolver/testExpectations.json b/source/vendor/github.com/evanw/esbuild/internal/resolver/testExpectations.json
new file mode 100644
index 0000000..56cd4a1
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/resolver/testExpectations.json
@@ -0,0 +1,311 @@
+[{
+  "manifest": {
+    "__info": [],
+    "dependencyTreeRoots": [{
+      "name": "root",
+      "reference": "workspace:."
+    }],
+    "ignorePatternData": null,
+    "enableTopLevelFallback": false,
+    "fallbackPool": [],
+    "fallbackExclusionList": [],
+    "packageRegistryData": [
+      [null, [
+        [null, {
+          "packageLocation": "./",
+          "packageDependencies": [["test", "npm:1.0.0"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["root", [
+        ["workspace:.", {
+          "packageLocation": "./",
+          "packageDependencies": [["test", "npm:1.0.0"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["workspace-alias-dependency", [
+        ["workspace:workspace-alias-dependency", {
+          "packageLocation": "./workspace-alias-dependency/",
+          "packageDependencies": [["alias", ["test", "npm:1.0.0"]]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["workspace-self-dependency", [
+        ["workspace:workspace-self-dependency", {
+          "packageLocation": "./workspace-self-dependency/",
+          "packageDependencies": [["workspace-self-dependency", "workspace:workspace-self-dependency"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["workspace-unfulfilled-peer-dependency", [
+        ["workspace:workspace-unfulfilled-peer-dependency", {
+          "packageLocation": "./workspace-unfulfilled-peer-dependency/",
+          "packageDependencies": [["test", null]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["longer", [
+        ["workspace:longer", {
+          "packageLocation": "./longer/",
+          "packageDependencies": [["test", "npm:2.0.0"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["long", [
+        ["workspace:long", {
+          "packageLocation": "./long/",
+          "packageDependencies": [["test", "npm:1.0.0"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["longerer", [
+        ["workspace:longerer", {
+          "packageLocation": "./longerer/",
+          "packageDependencies": [["test", "npm:3.0.0"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["test", [
+        ["npm:1.0.0", {
+          "packageLocation": "./test-1.0.0/",
+          "packageDependencies": [],
+          "linkType": "HARD"
+        }],
+        ["npm:2.0.0", {
+          "packageLocation": "./test-2.0.0/",
+          "packageDependencies": [],
+          "linkType": "HARD"
+        }],
+        ["npm:3.0.0", {
+          "packageLocation": "./test-3.0.0/",
+          "packageDependencies": [],
+          "linkType": "HARD"
+        }]
+      ]]
+    ]
+  },
+  "tests": [{
+    "it": "should allow a package to import one of its dependencies",
+    "imported": "test",
+    "importer": "/path/to/project/",
+    "expected": "/path/to/project/test-1.0.0/"
+  }, {
+    "it": "should allow a package to import itself, if specified in its own dependencies",
+    "imported": "workspace-self-dependency",
+    "importer": "/path/to/project/workspace-self-dependency/",
+    "expected": "/path/to/project/workspace-self-dependency/"
+  }, {
+    "it": "should allow a package to import an aliased dependency",
+    "imported": "alias",
+    "importer": "/path/to/project/workspace-alias-dependency/",
+    "expected": "/path/to/project/test-1.0.0/"
+  }, {
+    "it": "shouldn't allow a package to import something that isn't one of its dependencies",
+    "imported": "missing-dependency",
+    "importer": "/path/to/project/",
+    "expected": "error!"
+  }, {
+    "it": "shouldn't accidentally discard the trailing slash from the package locations",
+    "imported": "test",
+    "importer": "/path/to/project/long/",
+    "expected": "/path/to/project/test-1.0.0/"
+  }, {
+    "it": "should throw an exception when trying to access an unfulfilled peer dependency",
+    "imported": "test",
+    "importer": "/path/to/project/workspace-unfulfilled-peer-dependency/",
+    "expected": "error!"
+  }]
+}, {
+  "manifest": {
+    "__info": [],
+    "dependencyTreeRoots": [{
+      "name": "root",
+      "reference": "workspace:."
+    }],
+    "ignorePatternData": null,
+    "enableTopLevelFallback": true,
+    "fallbackPool": [
+      ["test-2", "npm:1.0.0"],
+      ["alias", ["test-1", "npm:1.0.0"]]
+    ],
+    "fallbackExclusionList": [[
+      "workspace-no-fallbacks",
+      ["workspace:workspace-no-fallbacks"]
+    ]],
+    "packageRegistryData": [
+      [null, [
+        [null, {
+          "packageLocation": "./",
+          "packageDependencies": [["test-1", "npm:1.0.0"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["root", [
+        ["workspace:.", {
+          "packageLocation": "./",
+          "packageDependencies": [["test-1", "npm:1.0.0"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["workspace-no-fallbacks", [
+        ["workspace:workspace-no-fallbacks", {
+          "packageLocation": "./workspace-no-fallbacks/",
+          "packageDependencies": [],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["workspace-with-fallbacks", [
+        ["workspace:workspace-with-fallbacks", {
+          "packageLocation": "./workspace-with-fallbacks/",
+          "packageDependencies": [],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["workspace-unfulfilled-peer-dependency", [
+        ["workspace:workspace-unfulfilled-peer-dependency", {
+          "packageLocation": "./workspace-unfulfilled-peer-dependency/",
+          "packageDependencies": [
+            ["test-1", null],
+            ["test-2", null]
+          ],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["test-1", [
+        ["npm:1.0.0", {
+          "packageLocation": "./test-1/",
+          "packageDependencies": [],
+          "linkType": "HARD"
+        }]
+      ]],
+      ["test-2", [
+        ["npm:1.0.0", {
+          "packageLocation": "./test-2/",
+          "packageDependencies": [],
+          "linkType": "HARD"
+        }]
+      ]]
+    ]
+  },
+  "tests": [{
+    "it": "should allow resolution coming from the fallback pool if enableTopLevelFallback is set to true",
+    "imported": "test-1",
+    "importer": "/path/to/project/",
+    "expected": "/path/to/project/test-1/"
+  }, {
+    "it": "should allow the fallback pool to contain aliases",
+    "imported": "alias",
+    "importer": "/path/to/project/",
+    "expected": "/path/to/project/test-1/"
+  }, {
+    "it": "shouldn't use the fallback pool when the importer package is listed in fallbackExclusionList",
+    "imported": "test-1",
+    "importer": "/path/to/project/workspace-no-fallbacks/",
+    "expected": "error!"
+  }, {
+    "it": "should implicitly use the top-level package dependencies as part of the fallback pool",
+    "imported": "test-2",
+    "importer": "/path/to/project/workspace-with-fallbacks/",
+    "expected": "/path/to/project/test-2/"
+  }, {
+    "it": "should throw an error if a resolution isn't in in the package dependencies, nor inside the fallback pool",
+    "imported": "test-3",
+    "importer": "/path/to/project/workspace-with-fallbacks/",
+    "expected": "error!"
+  }, {
+    "it": "should use the top-level fallback if a dependency is missing because of an unfulfilled peer dependency",
+    "imported": "test-1",
+    "importer": "/path/to/project/workspace-unfulfilled-peer-dependency/",
+    "expected": "/path/to/project/test-1/"
+  }, {
+    "it": "should use the fallback pool if a dependency is missing because of an unfulfilled peer dependency",
+    "imported": "test-2",
+    "importer": "/path/to/project/workspace-unfulfilled-peer-dependency/",
+    "expected": "/path/to/project/test-2/"
+  }]
+}, {
+  "manifest": {
+    "__info": [],
+    "dependencyTreeRoots": [{
+      "name": "root",
+      "reference": "workspace:."
+    }],
+    "ignorePatternData": null,
+    "enableTopLevelFallback": false,
+    "fallbackPool": [
+      ["test", "npm:1.0.0"]
+    ],
+    "fallbackExclusionList": [],
+    "packageRegistryData": [
+      [null, [
+        [null, {
+          "packageLocation": "./",
+          "packageDependencies": [],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["root", [
+        ["workspace:.", {
+          "packageLocation": "./",
+          "packageDependencies": [],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["test", [
+        ["npm:1.0.0", {
+          "packageLocation": "./test-1/",
+          "packageDependencies": [],
+          "linkType": "HARD"
+        }]
+      ]]
+    ]
+  },
+  "tests": [{
+    "it": "should ignore the fallback pool if enableTopLevelFallback is set to false",
+    "imported": "test",
+    "importer": "/path/to/project/",
+    "expected": "error!"
+  }]
+}, {
+  "manifest": {
+    "__info": [],
+    "dependencyTreeRoots": [{
+      "name": "root",
+      "reference": "workspace:."
+    }],
+    "ignorePatternData": "^not-a-workspace(/|$)",
+    "enableTopLevelFallback": false,
+    "fallbackPool": [],
+    "fallbackExclusionList": [],
+    "packageRegistryData": [
+      [null, [
+        [null, {
+          "packageLocation": "./",
+          "packageDependencies": [],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["root", [
+        ["workspace:.", {
+          "packageLocation": "./",
+          "packageDependencies": [["test", "npm:1.0.0"]],
+          "linkType": "SOFT"
+        }]
+      ]],
+      ["test", [
+        ["npm:1.0.0", {
+          "packageLocation": "./test/",
+          "packageDependencies": [],
+          "linkType": "HARD"
+        }]
+      ]]
+    ]
+  },
+  "tests": [{
+    "it": "shouldn't go through PnP when trying to resolve dependencies from packages covered by ignorePatternData",
+    "imported": "test",
+    "importer": "/path/to/project/not-a-workspace/",
+    "expected": "error!"
+  }]
+}]
diff --git a/source/vendor/github.com/evanw/esbuild/internal/resolver/tsconfig_json.go b/source/vendor/github.com/evanw/esbuild/internal/resolver/tsconfig_json.go
new file mode 100644
index 0000000..edfc775
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/resolver/tsconfig_json.go
@@ -0,0 +1,481 @@
+package resolver
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/evanw/esbuild/internal/cache"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/fs"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_lexer"
+	"github.com/evanw/esbuild/internal/js_parser"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type TSConfigJSON struct {
+	AbsPath string
+
+	// The absolute path of "compilerOptions.baseUrl"
+	BaseURL *string
+
+	// This is used if "Paths" is non-nil. It's equal to "BaseURL" except if
+	// "BaseURL" is missing, in which case it is as if "BaseURL" was ".". This
+	// is to implement the "paths without baseUrl" feature from TypeScript 4.1.
+	// More info: https://github.com/microsoft/TypeScript/issues/31869
+	BaseURLForPaths string
+
+	// The verbatim values of "compilerOptions.paths". The keys are patterns to
+	// match and the values are arrays of fallback paths to search. Each key and
+	// each fallback path can optionally have a single "*" wildcard character.
+	// If both the key and the value have a wildcard, the substring matched by
+	// the wildcard is substituted into the fallback path. The keys represent
+	// module-style path names and the fallback paths are relative to the
+	// "baseUrl" value in the "tsconfig.json" file.
+	Paths *TSConfigPaths
+
+	tsTargetKey    tsTargetKey
+	TSStrict       *config.TSAlwaysStrict
+	TSAlwaysStrict *config.TSAlwaysStrict
+	JSXSettings    config.TSConfigJSX
+	Settings       config.TSConfig
+}
+
+func (derived *TSConfigJSON) applyExtendedConfig(base TSConfigJSON) {
+	if base.tsTargetKey.Range.Len > 0 {
+		derived.tsTargetKey = base.tsTargetKey
+	}
+	if base.TSStrict != nil {
+		derived.TSStrict = base.TSStrict
+	}
+	if base.TSAlwaysStrict != nil {
+		derived.TSAlwaysStrict = base.TSAlwaysStrict
+	}
+	if base.BaseURL != nil {
+		derived.BaseURL = base.BaseURL
+	}
+	if base.Paths != nil {
+		derived.Paths = base.Paths
+		derived.BaseURLForPaths = base.BaseURLForPaths
+	}
+	derived.JSXSettings.ApplyExtendedConfig(base.JSXSettings)
+	derived.Settings.ApplyExtendedConfig(base.Settings)
+}
+
+func (config *TSConfigJSON) TSAlwaysStrictOrStrict() *config.TSAlwaysStrict {
+	if config.TSAlwaysStrict != nil {
+		return config.TSAlwaysStrict
+	}
+
+	// If "alwaysStrict" is absent, it defaults to "strict" instead
+	return config.TSStrict
+}
+
+// This information is only used for error messages
+type tsTargetKey struct {
+	LowerValue string
+	Source     logger.Source
+	Range      logger.Range
+}
+
+type TSConfigPath struct {
+	Text string
+	Loc  logger.Loc
+}
+
+type TSConfigPaths struct {
+	Map map[string][]TSConfigPath
+
+	// This may be different from the original "tsconfig.json" source if the
+	// "paths" value is from another file via an "extends" clause.
+	Source logger.Source
+}
+
+func ParseTSConfigJSON(
+	log logger.Log,
+	source logger.Source,
+	jsonCache *cache.JSONCache,
+	fs fs.FS,
+	fileDir string,
+	configDir string,
+	extends func(string, logger.Range) *TSConfigJSON,
+) *TSConfigJSON {
+	// Unfortunately "tsconfig.json" isn't actually JSON. It's some other
+	// format that appears to be defined by the implementation details of the
+	// TypeScript compiler.
+	//
+	// Attempt to parse it anyway by modifying the JSON parser, but just for
+	// these particular files. This is likely not a completely accurate
+	// emulation of what the TypeScript compiler does (e.g. string escape
+	// behavior may also be different).
+	json, ok := jsonCache.Parse(log, source, js_parser.JSONOptions{Flavor: js_lexer.TSConfigJSON})
+	if !ok {
+		return nil
+	}
+
+	var result TSConfigJSON
+	result.AbsPath = source.KeyPath.Text
+	tracker := logger.MakeLineColumnTracker(&source)
+
+	// Parse "extends"
+	if extends != nil {
+		if valueJSON, _, ok := getProperty(json, "extends"); ok {
+			if value, ok := getString(valueJSON); ok {
+				if base := extends(value, source.RangeOfString(valueJSON.Loc)); base != nil {
+					result.applyExtendedConfig(*base)
+				}
+			} else if array, ok := valueJSON.Data.(*js_ast.EArray); ok {
+				for _, item := range array.Items {
+					if str, ok := getString(item); ok {
+						if base := extends(str, source.RangeOfString(item.Loc)); base != nil {
+							result.applyExtendedConfig(*base)
+						}
+					}
+				}
+			}
+		}
+	}
+
+	// Parse "compilerOptions"
+	if compilerOptionsJSON, _, ok := getProperty(json, "compilerOptions"); ok {
+		// Parse "baseUrl"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "baseUrl"); ok {
+			if value, ok := getString(valueJSON); ok {
+				value = getSubstitutedPathWithConfigDirTemplate(fs, value, configDir)
+				if !fs.IsAbs(value) {
+					value = fs.Join(fileDir, value)
+				}
+				result.BaseURL = &value
+			}
+		}
+
+		// Parse "jsx"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "jsx"); ok {
+			if value, ok := getString(valueJSON); ok {
+				switch strings.ToLower(value) {
+				case "preserve":
+					result.JSXSettings.JSX = config.TSJSXPreserve
+				case "react-native":
+					result.JSXSettings.JSX = config.TSJSXReactNative
+				case "react":
+					result.JSXSettings.JSX = config.TSJSXReact
+				case "react-jsx":
+					result.JSXSettings.JSX = config.TSJSXReactJSX
+				case "react-jsxdev":
+					result.JSXSettings.JSX = config.TSJSXReactJSXDev
+				}
+			}
+		}
+
+		// Parse "jsxFactory"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "jsxFactory"); ok {
+			if value, ok := getString(valueJSON); ok {
+				result.JSXSettings.JSXFactory = parseMemberExpressionForJSX(log, &source, &tracker, valueJSON.Loc, value)
+			}
+		}
+
+		// Parse "jsxFragmentFactory"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "jsxFragmentFactory"); ok {
+			if value, ok := getString(valueJSON); ok {
+				result.JSXSettings.JSXFragmentFactory = parseMemberExpressionForJSX(log, &source, &tracker, valueJSON.Loc, value)
+			}
+		}
+
+		// Parse "jsxImportSource"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "jsxImportSource"); ok {
+			if value, ok := getString(valueJSON); ok {
+				result.JSXSettings.JSXImportSource = &value
+			}
+		}
+
+		// Parse "experimentalDecorators"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "experimentalDecorators"); ok {
+			if value, ok := getBool(valueJSON); ok {
+				if value {
+					result.Settings.ExperimentalDecorators = config.True
+				} else {
+					result.Settings.ExperimentalDecorators = config.False
+				}
+			}
+		}
+
+		// Parse "useDefineForClassFields"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "useDefineForClassFields"); ok {
+			if value, ok := getBool(valueJSON); ok {
+				if value {
+					result.Settings.UseDefineForClassFields = config.True
+				} else {
+					result.Settings.UseDefineForClassFields = config.False
+				}
+			}
+		}
+
+		// Parse "target"
+		if valueJSON, keyLoc, ok := getProperty(compilerOptionsJSON, "target"); ok {
+			if value, ok := getString(valueJSON); ok {
+				lowerValue := strings.ToLower(value)
+				ok := true
+
+				// See https://www.typescriptlang.org/tsconfig#target
+				switch lowerValue {
+				case "es3", "es5", "es6", "es2015", "es2016", "es2017", "es2018", "es2019", "es2020", "es2021":
+					result.Settings.Target = config.TSTargetBelowES2022
+				case "es2022", "es2023", "esnext":
+					result.Settings.Target = config.TSTargetAtOrAboveES2022
+				default:
+					ok = false
+					if !helpers.IsInsideNodeModules(source.KeyPath.Text) {
+						log.AddID(logger.MsgID_TSConfigJSON_InvalidTarget, logger.Warning, &tracker, source.RangeOfString(valueJSON.Loc),
+							fmt.Sprintf("Unrecognized target environment %q", value))
+					}
+				}
+
+				if ok {
+					result.tsTargetKey = tsTargetKey{
+						Source:     source,
+						Range:      source.RangeOfString(keyLoc),
+						LowerValue: lowerValue,
+					}
+				}
+			}
+		}
+
+		// Parse "strict"
+		if valueJSON, keyLoc, ok := getProperty(compilerOptionsJSON, "strict"); ok {
+			if value, ok := getBool(valueJSON); ok {
+				valueRange := js_lexer.RangeOfIdentifier(source, valueJSON.Loc)
+				result.TSStrict = &config.TSAlwaysStrict{
+					Name:   "strict",
+					Value:  value,
+					Source: source,
+					Range:  logger.Range{Loc: keyLoc, Len: valueRange.End() - keyLoc.Start},
+				}
+			}
+		}
+
+		// Parse "alwaysStrict"
+		if valueJSON, keyLoc, ok := getProperty(compilerOptionsJSON, "alwaysStrict"); ok {
+			if value, ok := getBool(valueJSON); ok {
+				valueRange := js_lexer.RangeOfIdentifier(source, valueJSON.Loc)
+				result.TSAlwaysStrict = &config.TSAlwaysStrict{
+					Name:   "alwaysStrict",
+					Value:  value,
+					Source: source,
+					Range:  logger.Range{Loc: keyLoc, Len: valueRange.End() - keyLoc.Start},
+				}
+			}
+		}
+
+		// Parse "importsNotUsedAsValues"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "importsNotUsedAsValues"); ok {
+			if value, ok := getString(valueJSON); ok {
+				switch value {
+				case "remove":
+					result.Settings.ImportsNotUsedAsValues = config.TSImportsNotUsedAsValues_Remove
+				case "preserve":
+					result.Settings.ImportsNotUsedAsValues = config.TSImportsNotUsedAsValues_Preserve
+				case "error":
+					result.Settings.ImportsNotUsedAsValues = config.TSImportsNotUsedAsValues_Error
+				default:
+					log.AddID(logger.MsgID_TSConfigJSON_InvalidImportsNotUsedAsValues, logger.Warning, &tracker, source.RangeOfString(valueJSON.Loc),
+						fmt.Sprintf("Invalid value %q for \"importsNotUsedAsValues\"", value))
+				}
+			}
+		}
+
+		// Parse "preserveValueImports"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "preserveValueImports"); ok {
+			if value, ok := getBool(valueJSON); ok {
+				if value {
+					result.Settings.PreserveValueImports = config.True
+				} else {
+					result.Settings.PreserveValueImports = config.False
+				}
+			}
+		}
+
+		// Parse "verbatimModuleSyntax"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "verbatimModuleSyntax"); ok {
+			if value, ok := getBool(valueJSON); ok {
+				if value {
+					result.Settings.VerbatimModuleSyntax = config.True
+				} else {
+					result.Settings.VerbatimModuleSyntax = config.False
+				}
+			}
+		}
+
+		// Parse "paths"
+		if valueJSON, _, ok := getProperty(compilerOptionsJSON, "paths"); ok {
+			if paths, ok := valueJSON.Data.(*js_ast.EObject); ok {
+				result.BaseURLForPaths = fileDir
+				result.Paths = &TSConfigPaths{Source: source, Map: make(map[string][]TSConfigPath)}
+				for _, prop := range paths.Properties {
+					if key, ok := getString(prop.Key); ok {
+						if !isValidTSConfigPathPattern(key, log, &source, &tracker, prop.Key.Loc) {
+							continue
+						}
+
+						// The "paths" field is an object which maps a pattern to an
+						// array of remapping patterns to try, in priority order. See
+						// the documentation for examples of how this is used:
+						// https://www.typescriptlang.org/docs/handbook/module-resolution.html#path-mapping.
+						//
+						// One particular example:
+						//
+						//   {
+						//     "compilerOptions": {
+						//       "baseUrl": "projectRoot",
+						//       "paths": {
+						//         "*": [
+						//           "*",
+						//           "generated/*"
+						//         ]
+						//       }
+						//     }
+						//   }
+						//
+						// Matching "folder1/file2" should first check "projectRoot/folder1/file2"
+						// and then, if that didn't work, also check "projectRoot/generated/folder1/file2".
+						if array, ok := prop.ValueOrNil.Data.(*js_ast.EArray); ok {
+							for _, item := range array.Items {
+								if str, ok := getString(item); ok {
+									if isValidTSConfigPathPattern(str, log, &source, &tracker, item.Loc) {
+										str = getSubstitutedPathWithConfigDirTemplate(fs, str, configDir)
+										result.Paths.Map[key] = append(result.Paths.Map[key], TSConfigPath{Text: str, Loc: item.Loc})
+									}
+								}
+							}
+						} else {
+							log.AddID(logger.MsgID_TSConfigJSON_InvalidPaths, logger.Warning, &tracker, source.RangeOfString(prop.ValueOrNil.Loc), fmt.Sprintf(
+								"Substitutions for pattern %q should be an array", key))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	// Warn about compiler options not wrapped in "compilerOptions".
+	// For example: https://github.com/evanw/esbuild/issues/3301
+	if obj, ok := json.Data.(*js_ast.EObject); ok {
+	loop:
+		for _, prop := range obj.Properties {
+			if key, ok := prop.Key.Data.(*js_ast.EString); ok && key.Value != nil {
+				key := helpers.UTF16ToString(key.Value)
+				switch key {
+				case "alwaysStrict",
+					"baseUrl",
+					"experimentalDecorators",
+					"importsNotUsedAsValues",
+					"jsx",
+					"jsxFactory",
+					"jsxFragmentFactory",
+					"jsxImportSource",
+					"paths",
+					"preserveValueImports",
+					"strict",
+					"target",
+					"useDefineForClassFields",
+					"verbatimModuleSyntax":
+					log.AddIDWithNotes(logger.MsgID_TSConfigJSON_InvalidTopLevelOption, logger.Warning, &tracker, source.RangeOfString(prop.Key.Loc),
+						fmt.Sprintf("Expected the %q option to be nested inside a \"compilerOptions\" object", key),
+						[]logger.MsgData{})
+					break loop
+				}
+			}
+		}
+	}
+
+	return &result
+}
+
+// See: https://github.com/microsoft/TypeScript/pull/58042
+func getSubstitutedPathWithConfigDirTemplate(fs fs.FS, value string, basePath string) string {
+	if strings.HasPrefix(value, "${configDir}") {
+		return fs.Join(basePath, "./"+value[12:])
+	}
+	return value
+}
+
+func parseMemberExpressionForJSX(log logger.Log, source *logger.Source, tracker *logger.LineColumnTracker, loc logger.Loc, text string) []string {
+	if text == "" {
+		return nil
+	}
+	parts := strings.Split(text, ".")
+	for _, part := range parts {
+		if !js_ast.IsIdentifier(part) {
+			warnRange := source.RangeOfString(loc)
+			log.AddID(logger.MsgID_TSConfigJSON_InvalidJSX, logger.Warning, tracker, warnRange, fmt.Sprintf("Invalid JSX member expression: %q", text))
+			return nil
+		}
+	}
+	return parts
+}
+
+func isValidTSConfigPathPattern(text string, log logger.Log, source *logger.Source, tracker *logger.LineColumnTracker, loc logger.Loc) bool {
+	foundAsterisk := false
+	for i := 0; i < len(text); i++ {
+		if text[i] == '*' {
+			if foundAsterisk {
+				r := source.RangeOfString(loc)
+				log.AddID(logger.MsgID_TSConfigJSON_InvalidPaths, logger.Warning, tracker, r, fmt.Sprintf(
+					"Invalid pattern %q, must have at most one \"*\" character", text))
+				return false
+			}
+			foundAsterisk = true
+		}
+	}
+	return true
+}
+
+func isSlash(c byte) bool {
+	return c == '/' || c == '\\'
+}
+
+func isValidTSConfigPathNoBaseURLPattern(text string, log logger.Log, source *logger.Source, tracker **logger.LineColumnTracker, loc logger.Loc) bool {
+	var c0 byte
+	var c1 byte
+	var c2 byte
+	n := len(text)
+
+	if n > 0 {
+		c0 = text[0]
+		if n > 1 {
+			c1 = text[1]
+			if n > 2 {
+				c2 = text[2]
+			}
+		}
+	}
+
+	// Relative "." or ".."
+	if c0 == '.' && (n == 1 || (n == 2 && c1 == '.')) {
+		return true
+	}
+
+	// Relative "./" or "../" or ".\\" or "..\\"
+	if c0 == '.' && (isSlash(c1) || (c1 == '.' && isSlash(c2))) {
+		return true
+	}
+
+	// Absolute POSIX "/" or UNC "\\"
+	if isSlash(c0) {
+		return true
+	}
+
+	// Absolute DOS "c:/" or "c:\\"
+	if ((c0 >= 'a' && c0 <= 'z') || (c0 >= 'A' && c0 <= 'Z')) && c1 == ':' && isSlash(c2) {
+		return true
+	}
+
+	r := source.RangeOfString(loc)
+	if *tracker == nil {
+		t := logger.MakeLineColumnTracker(source)
+		*tracker = &t
+	}
+	log.AddID(logger.MsgID_TSConfigJSON_InvalidPaths, logger.Warning, *tracker, r, fmt.Sprintf(
+		"Non-relative path %q is not allowed when \"baseUrl\" is not set (did you forget a leading \"./\"?)", text))
+	return false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/resolver/yarnpnp.go b/source/vendor/github.com/evanw/esbuild/internal/resolver/yarnpnp.go
new file mode 100644
index 0000000..5a6a68d
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/resolver/yarnpnp.go
@@ -0,0 +1,665 @@
+package resolver
+
+// This file implements the Yarn PnP specification: https://yarnpkg.com/advanced/pnp-spec/
+
+import (
+	"fmt"
+	"regexp"
+	"strings"
+	"syscall"
+
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_parser"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type pnpData struct {
+	// Keys are the package idents, values are sets of references. Combining the
+	// ident with each individual reference yields the set of affected locators.
+	fallbackExclusionList map[string]map[string]bool
+
+	// A map of locators that all packages are allowed to access, regardless
+	// whether they list them in their dependencies or not.
+	fallbackPool map[string]pnpIdentAndReference
+
+	// A nullable regexp. If set, all project-relative importer paths should be
+	// matched against it. If the match succeeds, the resolution should follow
+	// the classic Node.js resolution algorithm rather than the Plug'n'Play one.
+	// Note that unlike other paths in the manifest, the one checked against this
+	// regexp won't begin by `./`.
+	ignorePatternData        *regexp.Regexp
+	invalidIgnorePatternData string
+
+	// This is the main part of the PnP data file. This table contains the list
+	// of all packages, first keyed by package ident then by package reference.
+	// One entry will have `null` in both fields and represents the absolute
+	// top-level package.
+	packageRegistryData map[string]map[string]pnpPackage
+
+	packageLocatorsByLocations map[string]pnpPackageLocatorByLocation
+
+	// If true, should a dependency resolution fail for an importer that isn't
+	// explicitly listed in `fallbackExclusionList`, the runtime must first check
+	// whether the resolution would succeed for any of the packages in
+	// `fallbackPool`; if it would, transparently return this resolution. Note
+	// that all dependencies from the top-level package are implicitly part of
+	// the fallback pool, even if not listed here.
+	enableTopLevelFallback bool
+
+	tracker    logger.LineColumnTracker
+	absPath    string
+	absDirPath string
+}
+
+// This is called both a "locator" and a "dependency target" in the specification.
+// When it's used as a dependency target, it can only be in one of three states:
+//
+//  1. A reference, to link with the dependency name
+//     In this case ident is "".
+//
+//  2. An aliased package
+//     In this case neither ident nor reference are "".
+//
+//  3. A missing peer dependency
+//     In this case ident and reference are "".
+type pnpIdentAndReference struct {
+	ident     string // Empty if null
+	reference string // Empty if null
+	span      logger.Range
+}
+
+type pnpPackage struct {
+	packageDependencies      map[string]pnpIdentAndReference
+	packageLocation          string
+	packageDependenciesRange logger.Range
+	discardFromLookup        bool
+}
+
+type pnpPackageLocatorByLocation struct {
+	locator           pnpIdentAndReference
+	discardFromLookup bool
+}
+
+func parseBareIdentifier(specifier string) (ident string, modulePath string, ok bool) {
+	slash := strings.IndexByte(specifier, '/')
+
+	// If specifier starts with "@", then
+	if strings.HasPrefix(specifier, "@") {
+		// If specifier doesn't contain a "/" separator, then
+		if slash == -1 {
+			// Throw an error
+			return
+		}
+
+		// Otherwise,
+		// Set ident to the substring of specifier until the second "/" separator or the end of string, whatever happens first
+		if slash2 := strings.IndexByte(specifier[slash+1:], '/'); slash2 != -1 {
+			ident = specifier[:slash+1+slash2]
+		} else {
+			ident = specifier
+		}
+	} else {
+		// Otherwise,
+		// Set ident to the substring of specifier until the first "/" separator or the end of string, whatever happens first
+		if slash != -1 {
+			ident = specifier[:slash]
+		} else {
+			ident = specifier
+		}
+	}
+
+	// Set modulePath to the substring of specifier starting from ident.length
+	modulePath = specifier[len(ident):]
+
+	// Return {ident, modulePath}
+	ok = true
+	return
+}
+
+type pnpStatus uint8
+
+const (
+	pnpErrorGeneric pnpStatus = iota
+	pnpErrorDependencyNotFound
+	pnpErrorUnfulfilledPeerDependency
+	pnpSuccess
+	pnpSkipped
+)
+
+func (status pnpStatus) isError() bool {
+	return status < pnpSuccess
+}
+
+type pnpResult struct {
+	status     pnpStatus
+	pkgDirPath string
+	pkgIdent   string
+	pkgSubpath string
+
+	// This is for error messages
+	errorIdent string
+	errorRange logger.Range
+}
+
+// Note: If this returns successfully then the node module resolution algorithm
+// (i.e. NM_RESOLVE in the Yarn PnP specification) is always run afterward
+func (r resolverQuery) resolveToUnqualified(specifier string, parentURL string, manifest *pnpData) pnpResult {
+	// Let resolved be undefined
+
+	// Let manifest be FIND_PNP_MANIFEST(parentURL)
+	// (this is already done by the time we get here)
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Using Yarn PnP manifest from %q", manifest.absPath))
+		r.debugLogs.addNote(fmt.Sprintf("  Resolving %q in %q", specifier, parentURL))
+	}
+
+	// Let ident and modulePath be the result of PARSE_BARE_IDENTIFIER(specifier)
+	ident, modulePath, ok := parseBareIdentifier(specifier)
+	if !ok {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("  Failed to parse specifier %q into a bare identifier", specifier))
+		}
+		return pnpResult{status: pnpErrorGeneric}
+	}
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("  Parsed bare identifier %q and module path %q", ident, modulePath))
+	}
+
+	// Let parentLocator be FIND_LOCATOR(manifest, parentURL)
+	parentLocator, ok := r.findLocator(manifest, parentURL)
+
+	// If parentLocator is null, then
+	// Set resolved to NM_RESOLVE(specifier, parentURL) and return it
+	if !ok {
+		return pnpResult{status: pnpSkipped}
+	}
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("  Found parent locator: [%s, %s]", quoteOrNullIfEmpty(parentLocator.ident), quoteOrNullIfEmpty(parentLocator.reference)))
+	}
+
+	// Let parentPkg be GET_PACKAGE(manifest, parentLocator)
+	parentPkg, ok := r.getPackage(manifest, parentLocator.ident, parentLocator.reference)
+	if !ok {
+		// We aren't supposed to get here according to the Yarn PnP specification
+		return pnpResult{status: pnpErrorGeneric}
+	}
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("  Found parent package at %q", parentPkg.packageLocation))
+	}
+
+	// Let referenceOrAlias be the entry from parentPkg.packageDependencies referenced by ident
+	referenceOrAlias, ok := parentPkg.packageDependencies[ident]
+
+	// If referenceOrAlias is null or undefined, then
+	if !ok || referenceOrAlias.reference == "" {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("  Failed to find %q in \"packageDependencies\" of parent package", ident))
+		}
+
+		// If manifest.enableTopLevelFallback is true, then
+		if manifest.enableTopLevelFallback {
+			if r.debugLogs != nil {
+				r.debugLogs.addNote("  Searching for a fallback because \"enableTopLevelFallback\" is true")
+			}
+
+			// If parentLocator isn't in manifest.fallbackExclusionList, then
+			if set := manifest.fallbackExclusionList[parentLocator.ident]; !set[parentLocator.reference] {
+				// Let fallback be RESOLVE_VIA_FALLBACK(manifest, ident)
+				fallback, _ := r.resolveViaFallback(manifest, ident)
+
+				// If fallback is neither null nor undefined
+				if fallback.reference != "" {
+					// Set referenceOrAlias to fallback
+					referenceOrAlias = fallback
+					ok = true
+				}
+			} else if r.debugLogs != nil {
+				r.debugLogs.addNote(fmt.Sprintf("    Stopping because [%s, %s] is in \"fallbackExclusionList\"",
+					quoteOrNullIfEmpty(parentLocator.ident), quoteOrNullIfEmpty(parentLocator.reference)))
+			}
+		}
+	}
+
+	// If referenceOrAlias is still undefined, then
+	if !ok {
+		// Throw a resolution error
+		return pnpResult{
+			status:     pnpErrorDependencyNotFound,
+			errorIdent: ident,
+			errorRange: parentPkg.packageDependenciesRange,
+		}
+	}
+
+	// If referenceOrAlias is still null, then
+	if referenceOrAlias.reference == "" {
+		// Note: It means that parentPkg has an unfulfilled peer dependency on ident
+		// Throw a resolution error
+		return pnpResult{
+			status:     pnpErrorUnfulfilledPeerDependency,
+			errorIdent: ident,
+			errorRange: referenceOrAlias.span,
+		}
+	}
+
+	if r.debugLogs != nil {
+		var referenceOrAliasStr string
+		if referenceOrAlias.ident != "" {
+			referenceOrAliasStr = fmt.Sprintf("[%q, %q]", referenceOrAlias.ident, referenceOrAlias.reference)
+		} else {
+			referenceOrAliasStr = quoteOrNullIfEmpty(referenceOrAlias.reference)
+		}
+		r.debugLogs.addNote(fmt.Sprintf("  Found dependency locator: [%s, %s]", quoteOrNullIfEmpty(ident), referenceOrAliasStr))
+	}
+
+	// Otherwise, if referenceOrAlias is an array, then
+	var dependencyPkg pnpPackage
+	if referenceOrAlias.ident != "" {
+		// Let alias be referenceOrAlias
+		alias := referenceOrAlias
+
+		// Let dependencyPkg be GET_PACKAGE(manifest, alias)
+		dependencyPkg, ok = r.getPackage(manifest, alias.ident, alias.reference)
+		if !ok {
+			// We aren't supposed to get here according to the Yarn PnP specification
+			return pnpResult{status: pnpErrorGeneric}
+		}
+	} else {
+		// Otherwise,
+		// Let dependencyPkg be GET_PACKAGE(manifest, {ident, reference})
+		dependencyPkg, ok = r.getPackage(manifest, ident, referenceOrAlias.reference)
+		if !ok {
+			// We aren't supposed to get here according to the Yarn PnP specification
+			return pnpResult{status: pnpErrorGeneric}
+		}
+	}
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("  Found package %q at %q", ident, dependencyPkg.packageLocation))
+	}
+
+	// Return path.resolve(manifest.dirPath, dependencyPkg.packageLocation, modulePath)
+	pkgDirPath := r.fs.Join(manifest.absDirPath, dependencyPkg.packageLocation)
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("  Resolved %q via Yarn PnP to %q with subpath %q", specifier, pkgDirPath, modulePath))
+	}
+	return pnpResult{
+		status:     pnpSuccess,
+		pkgDirPath: pkgDirPath,
+		pkgIdent:   ident,
+		pkgSubpath: modulePath,
+	}
+}
+
+func (r resolverQuery) findLocator(manifest *pnpData, moduleUrl string) (pnpIdentAndReference, bool) {
+	// Let relativeUrl be the relative path between manifest and moduleUrl
+	relativeUrl, ok := r.fs.Rel(manifest.absDirPath, moduleUrl)
+	if !ok {
+		return pnpIdentAndReference{}, false
+	} else {
+		// Relative URLs on Windows will use \ instead of /, which will break
+		// everything we do below. Use normal slashes to keep things working.
+		relativeUrl = strings.ReplaceAll(relativeUrl, "\\", "/")
+	}
+
+	// The relative path must not start with ./; trim it if needed
+	relativeUrl = strings.TrimPrefix(relativeUrl, "./")
+
+	// If relativeUrl matches manifest.ignorePatternData, then
+	if manifest.ignorePatternData != nil && manifest.ignorePatternData.MatchString(relativeUrl) {
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("  Ignoring %q because it matches \"ignorePatternData\"", relativeUrl))
+		}
+
+		// Return null
+		return pnpIdentAndReference{}, false
+	}
+
+	// Note: Make sure relativeUrl always starts with a ./ or ../
+	if !strings.HasSuffix(relativeUrl, "/") {
+		relativeUrl += "/"
+	}
+	if !strings.HasPrefix(relativeUrl, "./") && !strings.HasPrefix(relativeUrl, "../") {
+		relativeUrl = "./" + relativeUrl
+	}
+
+	// This is the inner loop from Yarn's PnP resolver implementation. This is
+	// different from the specification, which contains a hypothetical slow
+	// algorithm instead. The algorithm from the specification can sometimes
+	// produce different results from the one used by the implementation, so
+	// we follow the implementation.
+	for {
+		entry, ok := manifest.packageLocatorsByLocations[relativeUrl]
+		if !ok || entry.discardFromLookup {
+			// Remove the last path component and try again
+			relativeUrl = relativeUrl[:strings.LastIndexByte(relativeUrl[:len(relativeUrl)-1], '/')+1]
+			if relativeUrl == "" {
+				break
+			}
+			continue
+		}
+		return entry.locator, true
+	}
+
+	return pnpIdentAndReference{}, false
+}
+
+func (r resolverQuery) resolveViaFallback(manifest *pnpData, ident string) (pnpIdentAndReference, bool) {
+	// Let topLevelPkg be GET_PACKAGE(manifest, {null, null})
+	topLevelPkg, ok := r.getPackage(manifest, "", "")
+	if !ok {
+		// We aren't supposed to get here according to the Yarn PnP specification
+		return pnpIdentAndReference{}, false
+	}
+
+	// Let referenceOrAlias be the entry from topLevelPkg.packageDependencies referenced by ident
+	referenceOrAlias, ok := topLevelPkg.packageDependencies[ident]
+
+	// If referenceOrAlias is defined, then
+	if ok {
+		// Return it immediately
+		if r.debugLogs != nil {
+			r.debugLogs.addNote(fmt.Sprintf("    Found fallback for %q in \"packageDependencies\" of top-level package: [%s, %s]", ident,
+				quoteOrNullIfEmpty(referenceOrAlias.ident), quoteOrNullIfEmpty(referenceOrAlias.reference)))
+		}
+		return referenceOrAlias, true
+	}
+
+	// Otherwise,
+	// Let referenceOrAlias be the entry from manifest.fallbackPool referenced by ident
+	referenceOrAlias, ok = manifest.fallbackPool[ident]
+
+	// Return it immediately, whether it's defined or not
+	if r.debugLogs != nil {
+		if ok {
+			r.debugLogs.addNote(fmt.Sprintf("    Found fallback for %q in \"fallbackPool\": [%s, %s]", ident,
+				quoteOrNullIfEmpty(referenceOrAlias.ident), quoteOrNullIfEmpty(referenceOrAlias.reference)))
+		} else {
+			r.debugLogs.addNote(fmt.Sprintf("    Failed to find fallback for %q in \"fallbackPool\"", ident))
+		}
+	}
+	return referenceOrAlias, ok
+}
+
+func (r resolverQuery) getPackage(manifest *pnpData, ident string, reference string) (pnpPackage, bool) {
+	if inner, ok := manifest.packageRegistryData[ident]; ok {
+		if pkg, ok := inner[reference]; ok {
+			return pkg, true
+		}
+	}
+
+	if r.debugLogs != nil {
+		// We aren't supposed to get here according to the Yarn PnP specification:
+		// "Note: pkg cannot be undefined here; all packages referenced in any of the
+		// Plug'n'Play data tables MUST have a corresponding entry inside packageRegistryData."
+		r.debugLogs.addNote(fmt.Sprintf("  Yarn PnP invariant violation: GET_PACKAGE failed to find a package: [%s, %s]",
+			quoteOrNullIfEmpty(ident), quoteOrNullIfEmpty(reference)))
+	}
+	return pnpPackage{}, false
+}
+
+func quoteOrNullIfEmpty(str string) string {
+	if str != "" {
+		return fmt.Sprintf("%q", str)
+	}
+	return "null"
+}
+
+func compileYarnPnPData(absPath string, absDirPath string, json js_ast.Expr, source logger.Source) *pnpData {
+	data := pnpData{
+		absPath:    absPath,
+		absDirPath: absDirPath,
+		tracker:    logger.MakeLineColumnTracker(&source),
+	}
+
+	if value, _, ok := getProperty(json, "enableTopLevelFallback"); ok {
+		if enableTopLevelFallback, ok := getBool(value); ok {
+			data.enableTopLevelFallback = enableTopLevelFallback
+		}
+	}
+
+	if value, _, ok := getProperty(json, "fallbackExclusionList"); ok {
+		if array, ok := value.Data.(*js_ast.EArray); ok {
+			data.fallbackExclusionList = make(map[string]map[string]bool, len(array.Items))
+
+			for _, item := range array.Items {
+				if tuple, ok := item.Data.(*js_ast.EArray); ok && len(tuple.Items) == 2 {
+					if ident, ok := getStringOrNull(tuple.Items[0]); ok {
+						if array2, ok := tuple.Items[1].Data.(*js_ast.EArray); ok {
+							references := make(map[string]bool, len(array2.Items))
+
+							for _, item2 := range array2.Items {
+								if reference, ok := getString(item2); ok {
+									references[reference] = true
+								}
+							}
+
+							data.fallbackExclusionList[ident] = references
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if value, _, ok := getProperty(json, "fallbackPool"); ok {
+		if array, ok := value.Data.(*js_ast.EArray); ok {
+			data.fallbackPool = make(map[string]pnpIdentAndReference, len(array.Items))
+
+			for _, item := range array.Items {
+				if array2, ok := item.Data.(*js_ast.EArray); ok && len(array2.Items) == 2 {
+					if ident, ok := getString(array2.Items[0]); ok {
+						if dependencyTarget, ok := getDependencyTarget(array2.Items[1]); ok {
+							data.fallbackPool[ident] = dependencyTarget
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if value, _, ok := getProperty(json, "ignorePatternData"); ok {
+		if ignorePatternData, ok := getString(value); ok {
+			// The Go regular expression engine doesn't support some of the features
+			// that JavaScript regular expressions support, including "(?!" negative
+			// lookaheads which Yarn uses. This is deliberate on Go's part. See this:
+			// https://github.com/golang/go/issues/18868.
+			//
+			// Yarn uses this feature to exclude the "." and ".." path segments in
+			// the middle of a relative path. However, we shouldn't ever generate
+			// such path segments in the first place. So as a hack, we just remove
+			// the specific character sequences used by Yarn for this so that the
+			// regular expression is more likely to be able to be compiled.
+			ignorePatternData = strings.ReplaceAll(ignorePatternData, `(?!\.)`, "")
+			ignorePatternData = strings.ReplaceAll(ignorePatternData, `(?!(?:^|\/)\.)`, "")
+			ignorePatternData = strings.ReplaceAll(ignorePatternData, `(?!\.{1,2}(?:\/|$))`, "")
+			ignorePatternData = strings.ReplaceAll(ignorePatternData, `(?!(?:^|\/)\.{1,2}(?:\/|$))`, "")
+
+			if reg, err := regexp.Compile(ignorePatternData); err == nil {
+				data.ignorePatternData = reg
+			} else {
+				data.invalidIgnorePatternData = ignorePatternData
+			}
+		}
+	}
+
+	if value, _, ok := getProperty(json, "packageRegistryData"); ok {
+		if array, ok := value.Data.(*js_ast.EArray); ok {
+			data.packageRegistryData = make(map[string]map[string]pnpPackage, len(array.Items))
+			data.packageLocatorsByLocations = make(map[string]pnpPackageLocatorByLocation)
+
+			for _, item := range array.Items {
+				if tuple, ok := item.Data.(*js_ast.EArray); ok && len(tuple.Items) == 2 {
+					if packageIdent, ok := getStringOrNull(tuple.Items[0]); ok {
+						if array2, ok := tuple.Items[1].Data.(*js_ast.EArray); ok {
+							references := make(map[string]pnpPackage, len(array2.Items))
+							data.packageRegistryData[packageIdent] = references
+
+							for _, item2 := range array2.Items {
+								if tuple2, ok := item2.Data.(*js_ast.EArray); ok && len(tuple2.Items) == 2 {
+									if packageReference, ok := getStringOrNull(tuple2.Items[0]); ok {
+										pkg := tuple2.Items[1]
+
+										if packageLocation, _, ok := getProperty(pkg, "packageLocation"); ok {
+											if packageDependencies, _, ok := getProperty(pkg, "packageDependencies"); ok {
+												if packageLocation, ok := getString(packageLocation); ok {
+													if array3, ok := packageDependencies.Data.(*js_ast.EArray); ok {
+														deps := make(map[string]pnpIdentAndReference, len(array3.Items))
+														discardFromLookup := false
+
+														for _, dep := range array3.Items {
+															if array4, ok := dep.Data.(*js_ast.EArray); ok && len(array4.Items) == 2 {
+																if ident, ok := getString(array4.Items[0]); ok {
+																	if dependencyTarget, ok := getDependencyTarget(array4.Items[1]); ok {
+																		deps[ident] = dependencyTarget
+																	}
+																}
+															}
+														}
+
+														if value, _, ok := getProperty(pkg, "discardFromLookup"); ok {
+															if value, ok := getBool(value); ok {
+																discardFromLookup = value
+															}
+														}
+
+														references[packageReference] = pnpPackage{
+															packageLocation:     packageLocation,
+															packageDependencies: deps,
+															packageDependenciesRange: logger.Range{
+																Loc: packageDependencies.Loc,
+																Len: array3.CloseBracketLoc.Start + 1 - packageDependencies.Loc.Start,
+															},
+															discardFromLookup: discardFromLookup,
+														}
+
+														// This is what Yarn's PnP implementation does (specifically in
+														// "hydrateRuntimeState"), so we replicate that behavior here:
+														if entry, ok := data.packageLocatorsByLocations[packageLocation]; !ok {
+															data.packageLocatorsByLocations[packageLocation] = pnpPackageLocatorByLocation{
+																locator:           pnpIdentAndReference{ident: packageIdent, reference: packageReference},
+																discardFromLookup: discardFromLookup,
+															}
+														} else {
+															entry.discardFromLookup = entry.discardFromLookup && discardFromLookup
+															if !discardFromLookup {
+																entry.locator = pnpIdentAndReference{ident: packageIdent, reference: packageReference}
+															}
+															data.packageLocatorsByLocations[packageLocation] = entry
+														}
+													}
+												}
+											}
+										}
+									}
+								}
+							}
+						}
+					}
+				}
+			}
+		}
+	}
+
+	return &data
+}
+
+func getStringOrNull(json js_ast.Expr) (string, bool) {
+	switch value := json.Data.(type) {
+	case *js_ast.EString:
+		return helpers.UTF16ToString(value.Value), true
+
+	case *js_ast.ENull:
+		return "", true
+	}
+
+	return "", false
+}
+
+func getDependencyTarget(json js_ast.Expr) (pnpIdentAndReference, bool) {
+	switch d := json.Data.(type) {
+	case *js_ast.ENull:
+		return pnpIdentAndReference{span: logger.Range{Loc: json.Loc, Len: 4}}, true
+
+	case *js_ast.EString:
+		return pnpIdentAndReference{reference: helpers.UTF16ToString(d.Value), span: logger.Range{Loc: json.Loc}}, true
+
+	case *js_ast.EArray:
+		if len(d.Items) == 2 {
+			if name, ok := getString(d.Items[0]); ok {
+				if reference, ok := getString(d.Items[1]); ok {
+					return pnpIdentAndReference{
+						ident:     name,
+						reference: reference,
+						span:      logger.Range{Loc: json.Loc, Len: d.CloseBracketLoc.Start + 1 - json.Loc.Start},
+					}, true
+				}
+			}
+		}
+	}
+
+	return pnpIdentAndReference{}, false
+}
+
+type pnpDataMode uint8
+
+const (
+	pnpIgnoreErrorsAboutMissingFiles pnpDataMode = iota
+	pnpReportErrorsAboutMissingFiles
+)
+
+func (r resolverQuery) extractYarnPnPDataFromJSON(pnpDataPath string, mode pnpDataMode) (result js_ast.Expr, source logger.Source) {
+	contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, pnpDataPath)
+	if r.debugLogs != nil && originalError != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pnpDataPath, originalError.Error()))
+	}
+	if err != nil {
+		if mode == pnpReportErrorsAboutMissingFiles || err != syscall.ENOENT {
+			r.log.AddError(nil, logger.Range{},
+				fmt.Sprintf("Cannot read file %q: %s",
+					PrettyPath(r.fs, logger.Path{Text: pnpDataPath, Namespace: "file"}), err.Error()))
+		}
+		return
+	}
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("The file %q exists", pnpDataPath))
+	}
+	keyPath := logger.Path{Text: pnpDataPath, Namespace: "file"}
+	source = logger.Source{
+		KeyPath:    keyPath,
+		PrettyPath: PrettyPath(r.fs, keyPath),
+		Contents:   contents,
+	}
+	result, _ = r.caches.JSONCache.Parse(r.log, source, js_parser.JSONOptions{})
+	return
+}
+
+func (r resolverQuery) tryToExtractYarnPnPDataFromJS(pnpDataPath string, mode pnpDataMode) (result js_ast.Expr, source logger.Source) {
+	contents, err, originalError := r.caches.FSCache.ReadFile(r.fs, pnpDataPath)
+	if r.debugLogs != nil && originalError != nil {
+		r.debugLogs.addNote(fmt.Sprintf("Failed to read file %q: %s", pnpDataPath, originalError.Error()))
+	}
+	if err != nil {
+		if mode == pnpReportErrorsAboutMissingFiles || err != syscall.ENOENT {
+			r.log.AddError(nil, logger.Range{},
+				fmt.Sprintf("Cannot read file %q: %s",
+					PrettyPath(r.fs, logger.Path{Text: pnpDataPath, Namespace: "file"}), err.Error()))
+		}
+		return
+	}
+	if r.debugLogs != nil {
+		r.debugLogs.addNote(fmt.Sprintf("The file %q exists", pnpDataPath))
+	}
+
+	keyPath := logger.Path{Text: pnpDataPath, Namespace: "file"}
+	source = logger.Source{
+		KeyPath:    keyPath,
+		PrettyPath: PrettyPath(r.fs, keyPath),
+		Contents:   contents,
+	}
+	ast, _ := r.caches.JSCache.Parse(r.log, source, js_parser.OptionsForYarnPnP())
+
+	if r.debugLogs != nil && ast.ManifestForYarnPnP.Data != nil {
+		r.debugLogs.addNote(fmt.Sprintf("  Extracted JSON data from %q", pnpDataPath))
+	}
+	return ast.ManifestForYarnPnP, source
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/runtime/runtime.go b/source/vendor/github.com/evanw/esbuild/internal/runtime/runtime.go
new file mode 100644
index 0000000..42ccf39
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/runtime/runtime.go
@@ -0,0 +1,604 @@
+package runtime
+
+// This is esbuild's runtime code. It contains helper functions that are
+// automatically injected into output files to implement certain features. For
+// example, the "**" operator is replaced with a call to "__pow" when targeting
+// ES2015. Tree shaking automatically removes unused code from the runtime.
+
+import (
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+// The runtime source is always at a special index. The index is always zero
+// but this constant is always used instead to improve readability and ensure
+// all code that references this index can be discovered easily.
+const SourceIndex = uint32(0)
+
+func Source(unsupportedJSFeatures compat.JSFeature) logger.Source {
+	// Note: These helper functions used to be named similar things to the helper
+	// functions from the TypeScript compiler. However, people sometimes use these
+	// two projects in combination and TypeScript's implementation of these helpers
+	// causes name collisions. Some examples:
+	//
+	// * The "tslib" library will overwrite esbuild's helper functions if the bundled
+	//   code is run in the global scope: https://github.com/evanw/esbuild/issues/1102
+	//
+	// * Running the TypeScript compiler on esbuild's output to convert ES6 to ES5
+	//   will also overwrite esbuild's helper functions because TypeScript doesn't
+	//   change the names of its helper functions to avoid name collisions:
+	//   https://github.com/microsoft/TypeScript/issues/43296
+	//
+	// These can both be considered bugs in TypeScript. However, they are unlikely
+	// to be fixed and it's simplest to just avoid using the same names to avoid
+	// these bugs. Forbidden names (from "tslib"):
+	//
+	//   __assign
+	//   __asyncDelegator
+	//   __asyncGenerator
+	//   __asyncValues
+	//   __await
+	//   __awaiter
+	//   __classPrivateFieldGet
+	//   __classPrivateFieldSet
+	//   __createBinding
+	//   __decorate
+	//   __exportStar
+	//   __extends
+	//   __generator
+	//   __importDefault
+	//   __importStar
+	//   __makeTemplateObject
+	//   __metadata
+	//   __param
+	//   __read
+	//   __rest
+	//   __spread
+	//   __spreadArray
+	//   __spreadArrays
+	//   __values
+	//
+	// Note: The "__objRest" function has a for-of loop which requires ES6, but
+	// transforming destructuring to ES5 isn't even supported so it's ok.
+	text := `
+		var __create = Object.create
+		var __freeze = Object.freeze
+		var __defProp = Object.defineProperty
+		var __defProps = Object.defineProperties
+		var __getOwnPropDesc = Object.getOwnPropertyDescriptor // Note: can return "undefined" due to a Safari bug
+		var __getOwnPropDescs = Object.getOwnPropertyDescriptors
+		var __getOwnPropNames = Object.getOwnPropertyNames
+		var __getOwnPropSymbols = Object.getOwnPropertySymbols
+		var __getProtoOf = Object.getPrototypeOf
+		var __hasOwnProp = Object.prototype.hasOwnProperty
+		var __propIsEnum = Object.prototype.propertyIsEnumerable
+		var __reflectGet = Reflect.get
+		var __reflectSet = Reflect.set
+
+		var __knownSymbol = (name, symbol) => (symbol = Symbol[name]) ? symbol : Symbol.for('Symbol.' + name)
+		var __typeError = msg => { throw TypeError(msg) }
+
+		export var __pow = Math.pow
+
+		var __defNormalProp = (obj, key, value) => key in obj
+			? __defProp(obj, key, {enumerable: true, configurable: true, writable: true, value})
+			: obj[key] = value
+
+		export var __spreadValues = (a, b) => {
+			for (var prop in b ||= {})
+				if (__hasOwnProp.call(b, prop))
+					__defNormalProp(a, prop, b[prop])
+			if (__getOwnPropSymbols)
+		`
+
+	// Avoid "of" when not using ES6
+	if !unsupportedJSFeatures.Has(compat.ForOf) {
+		text += `
+				for (var prop of __getOwnPropSymbols(b)) {
+		`
+	} else {
+		text += `
+				for (var props = __getOwnPropSymbols(b), i = 0, n = props.length, prop; i < n; i++) {
+					prop = props[i]
+		`
+	}
+
+	text += `
+					if (__propIsEnum.call(b, prop))
+						__defNormalProp(a, prop, b[prop])
+				}
+			return a
+		}
+		export var __spreadProps = (a, b) => __defProps(a, __getOwnPropDescs(b))
+
+		// Update the "name" property on the function or class for "--keep-names"
+		export var __name = (target, value) => __defProp(target, 'name', { value, configurable: true })
+
+		// This fallback "require" function exists so that "typeof require" can
+		// naturally be "function" even in non-CommonJS environments since esbuild
+		// emulates a CommonJS environment (issue #1202). However, people want this
+		// shim to fall back to "globalThis.require" even if it's defined later
+		// (including property accesses such as "require.resolve") so we need to
+		// use a proxy (issue #1614).
+		export var __require =
+			/* @__PURE__ */ (x =>
+				typeof require !== 'undefined' ? require :
+				typeof Proxy !== 'undefined' ? new Proxy(x, {
+					get: (a, b) => (typeof require !== 'undefined' ? require : a)[b]
+				}) : x
+			)(function(x) {
+				if (typeof require !== 'undefined') return require.apply(this, arguments)
+				throw Error('Dynamic require of "' + x + '" is not supported')
+			})
+
+		// This is used for glob imports
+		export var __glob = map => path => {
+			var fn = map[path]
+			if (fn) return fn()
+			throw new Error('Module not found in bundle: ' + path)
+		}
+
+		// For object rest patterns
+		export var __restKey = key => typeof key === 'symbol' ? key : key + ''
+		export var __objRest = (source, exclude) => {
+			var target = {}
+			for (var prop in source)
+				if (__hasOwnProp.call(source, prop) && exclude.indexOf(prop) < 0)
+					target[prop] = source[prop]
+			if (source != null && __getOwnPropSymbols)
+	`
+
+	// Avoid "of" when not using ES6
+	if !unsupportedJSFeatures.Has(compat.ForOf) {
+		text += `
+				for (var prop of __getOwnPropSymbols(source)) {
+		`
+	} else {
+		text += `
+				for (var props = __getOwnPropSymbols(source), i = 0, n = props.length, prop; i < n; i++) {
+					prop = props[i]
+		`
+	}
+
+	text += `
+					if (exclude.indexOf(prop) < 0 && __propIsEnum.call(source, prop))
+						target[prop] = source[prop]
+				}
+			return target
+		}
+
+		// This is for lazily-initialized ESM code. This has two implementations, a
+		// compact one for minified code and a verbose one that generates friendly
+		// names in V8's profiler and in stack traces.
+		export var __esm = (fn, res) => function __init() {
+			return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res
+		}
+		export var __esmMin = (fn, res) => () => (fn && (res = fn(fn = 0)), res)
+
+		// Wraps a CommonJS closure and returns a require() function. This has two
+		// implementations, a compact one for minified code and a verbose one that
+		// generates friendly names in V8's profiler and in stack traces.
+		export var __commonJS = (cb, mod) => function __require() {
+			return mod || (0, cb[__getOwnPropNames(cb)[0]])((mod = {exports: {}}).exports, mod), mod.exports
+		}
+		export var __commonJSMin = (cb, mod) => () => (mod || cb((mod = {exports: {}}).exports, mod), mod.exports)
+
+		// Used to implement ESM exports both for "require()" and "import * as"
+		export var __export = (target, all) => {
+			for (var name in all)
+				__defProp(target, name, { get: all[name], enumerable: true })
+		}
+
+		var __copyProps = (to, from, except, desc) => {
+			if (from && typeof from === 'object' || typeof from === 'function')
+	`
+
+	// Avoid "let" when not using ES6
+	if !unsupportedJSFeatures.Has(compat.ForOf) && !unsupportedJSFeatures.Has(compat.ConstAndLet) {
+		text += `
+				for (let key of __getOwnPropNames(from))
+					if (!__hasOwnProp.call(to, key) && key !== except)
+						__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable })
+		`
+	} else {
+		text += `
+				for (var keys = __getOwnPropNames(from), i = 0, n = keys.length, key; i < n; i++) {
+					key = keys[i]
+					if (!__hasOwnProp.call(to, key) && key !== except)
+						__defProp(to, key, { get: (k => from[k]).bind(null, key), enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable })
+				}
+		`
+	}
+
+	text += `
+			return to
+		}
+
+		// This is used to implement "export * from" statements. It copies properties
+		// from the imported module to the current module's ESM export object. If the
+		// current module is an entry point and the target format is CommonJS, we
+		// also copy the properties to "module.exports" in addition to our module's
+		// internal ESM export object.
+		export var __reExport = (target, mod, secondTarget) => (
+			__copyProps(target, mod, 'default'),
+			secondTarget && __copyProps(secondTarget, mod, 'default')
+		)
+
+		// Converts the module from CommonJS to ESM. When in node mode (i.e. in an
+		// ".mjs" file, package.json has "type: module", or the "__esModule" export
+		// in the CommonJS file is falsy or missing), the "default" property is
+		// overridden to point to the original CommonJS exports object instead.
+		export var __toESM = (mod, isNodeMode, target) => (
+			target = mod != null ? __create(__getProtoOf(mod)) : {},
+			__copyProps(
+				// If the importer is in node compatibility mode or this is not an ESM
+				// file that has been converted to a CommonJS file using a Babel-
+				// compatible transform (i.e. "__esModule" has not been set), then set
+				// "default" to the CommonJS "module.exports" for node compatibility.
+				isNodeMode || !mod || !mod.__esModule
+					? __defProp(target, 'default', { value: mod, enumerable: true })
+					: target,
+				mod)
+		)
+
+		// Converts the module from ESM to CommonJS. This clones the input module
+		// object with the addition of a non-enumerable "__esModule" property set
+		// to "true", which overwrites any existing export named "__esModule".
+		export var __toCommonJS = mod => __copyProps(__defProp({}, '__esModule', { value: true }), mod)
+
+		// For TypeScript experimental decorators
+		// - kind === undefined: class
+		// - kind === 1: method, parameter
+		// - kind === 2: field
+		export var __decorateClass = (decorators, target, key, kind) => {
+			var result = kind > 1 ? void 0 : kind ? __getOwnPropDesc(target, key) : target
+			for (var i = decorators.length - 1, decorator; i >= 0; i--)
+				if (decorator = decorators[i])
+					result = (kind ? decorator(target, key, result) : decorator(result)) || result
+			if (kind && result) __defProp(target, key, result)
+			return result
+		}
+		export var __decorateParam = (index, decorator) => (target, key) => decorator(target, key, index)
+
+		// For JavaScript decorators
+		export var __decoratorStart = base => [, , , __create(base?.[__knownSymbol('metadata')] ?? null)]
+		var __decoratorStrings = ['class', 'method', 'getter', 'setter', 'accessor', 'field', 'value', 'get', 'set']
+		var __expectFn = fn => fn !== void 0 && typeof fn !== 'function' ? __typeError('Function expected') : fn
+		var __decoratorContext = (kind, name, done, metadata, fns) => ({ kind: __decoratorStrings[kind], name, metadata, addInitializer: fn =>
+			done._ ? __typeError('Already initialized') : fns.push(__expectFn(fn || null)) })
+		export var __decoratorMetadata = (array, target) => __defNormalProp(target, __knownSymbol('metadata'), array[3])
+		export var __runInitializers = (array, flags, self, value) => {
+			for (var i = 0, fns = array[flags >> 1], n = fns && fns.length; i < n; i++) flags & 1 ? fns[i].call(self) : value = fns[i].call(self, value)
+			return value
+		}
+		export var __decorateElement = (array, flags, name, decorators, target, extra) => {
+			var fn, it, done, ctx, access, k = flags & 7, s = !!(flags & 8), p = !!(flags & 16)
+			var j = k > 3 ? array.length + 1 : k ? s ? 1 : 2 : 0, key = __decoratorStrings[k + 5]
+			var initializers = k > 3 && (array[j - 1] = []), extraInitializers = array[j] || (array[j] = [])
+			var desc = k && (
+				!p && !s && (target = target.prototype),
+				k < 5 && (k > 3 || !p) &&
+			`
+
+	// Avoid object extensions when not using ES6
+	if !unsupportedJSFeatures.Has(compat.ObjectExtensions) && !unsupportedJSFeatures.Has(compat.ObjectAccessors) {
+		text += `__getOwnPropDesc(k < 4 ? target : { get [name]() { return __privateGet(this, extra) }, set [name](x) { return __privateSet(this, extra, x) } }, name)`
+	} else {
+		text += `(k < 4 ? __getOwnPropDesc(target, name) : { get: () => __privateGet(this, extra), set: x => __privateSet(this, extra, x) })`
+	}
+
+	text += `
+			)
+			k ? p && k < 4 && __name(extra, (k > 2 ? 'set ' : k > 1 ? 'get ' : '') + name) : __name(target, name)
+
+			for (var i = decorators.length - 1; i >= 0; i--) {
+				ctx = __decoratorContext(k, name, done = {}, array[3], extraInitializers)
+
+				if (k) {
+					ctx.static = s, ctx.private = p, access = ctx.access = { has: p ? x => __privateIn(target, x) : x => name in x }
+					if (k ^ 3) access.get = p ? x => (k ^ 1 ? __privateGet : __privateMethod)(x, target, k ^ 4 ? extra : desc.get) : x => x[name]
+					if (k > 2) access.set = p ? (x, y) => __privateSet(x, target, y, k ^ 4 ? extra : desc.set) : (x, y) => x[name] = y
+				}
+
+				it = (0, decorators[i])(k ? k < 4 ? p ? extra : desc[key] : k > 4 ? void 0 : { get: desc.get, set: desc.set } : target, ctx), done._ = 1
+
+				if (k ^ 4 || it === void 0) __expectFn(it) && (k > 4 ? initializers.unshift(it) : k ? p ? extra = it : desc[key] = it : target = it)
+				else if (typeof it !== 'object' || it === null) __typeError('Object expected')
+				else __expectFn(fn = it.get) && (desc.get = fn), __expectFn(fn = it.set) && (desc.set = fn), __expectFn(fn = it.init) && initializers.unshift(fn)
+			}
+
+			return k || __decoratorMetadata(array, target),
+				desc && __defProp(target, name, desc),
+				p ? k ^ 4 ? extra : desc : target
+		}
+
+		// For class members
+		export var __publicField = (obj, key, value) => (
+			__defNormalProp(obj, typeof key !== 'symbol' ? key + '' : key, value)
+		)
+		var __accessCheck = (obj, member, msg) => (
+			member.has(obj) || __typeError('Cannot ' + msg)
+		)
+		export var __privateIn = (member, obj) => (
+			Object(obj) !== obj ? __typeError('Cannot use the "in" operator on this value') :
+			member.has(obj)
+		)
+		export var __privateGet = (obj, member, getter) => (
+			__accessCheck(obj, member, 'read from private field'),
+			getter ? getter.call(obj) : member.get(obj)
+		)
+		export var __privateAdd = (obj, member, value) => (
+			member.has(obj) ? __typeError('Cannot add the same private member more than once') :
+			member instanceof WeakSet ? member.add(obj) : member.set(obj, value)
+		)
+		export var __privateSet = (obj, member, value, setter) => (
+			__accessCheck(obj, member, 'write to private field'),
+			setter ? setter.call(obj, value) : member.set(obj, value),
+			value
+		)
+		export var __privateMethod = (obj, member, method) => (
+			__accessCheck(obj, member, 'access private method'),
+			method
+		)
+		export var __earlyAccess = (name) => {
+			throw ReferenceError('Cannot access "' + name + '" before initialization')
+		}
+	`
+
+	if !unsupportedJSFeatures.Has(compat.ObjectAccessors) {
+		text += `
+			export var __privateWrapper = (obj, member, setter, getter) => ({
+				set _(value) { __privateSet(obj, member, value, setter) },
+				get _() { return __privateGet(obj, member, getter) },
+			})
+		`
+	} else {
+		text += `
+		export var __privateWrapper = (obj, member, setter, getter) => __defProp({}, '_', {
+			set: value => __privateSet(obj, member, value, setter),
+			get: () => __privateGet(obj, member, getter),
+		})
+		`
+	}
+
+	text += `
+		// For "super" property accesses
+		export var __superGet = (cls, obj, key) => __reflectGet(__getProtoOf(cls), key, obj)
+		export var __superSet = (cls, obj, key, val) => (__reflectSet(__getProtoOf(cls), key, val, obj), val)
+	`
+
+	if !unsupportedJSFeatures.Has(compat.ObjectAccessors) {
+		text += `
+			export var __superWrapper = (cls, obj, key) => ({
+				get _() { return __superGet(cls, obj, key) },
+				set _(val) { __superSet(cls, obj, key, val) },
+			})
+		`
+	} else {
+		text += `
+			export var __superWrapper = (cls, obj, key) => __defProp({}, '_', {
+				get: () => __superGet(cls, obj, key),
+				set: val => __superSet(cls, obj, key, val),
+			})
+		`
+	}
+
+	text += `
+		// For lowering tagged template literals
+		export var __template = (cooked, raw) => __freeze(__defProp(cooked, 'raw', { value: __freeze(raw || cooked.slice()) }))
+
+		// This helps for lowering async functions
+		export var __async = (__this, __arguments, generator) => {
+			return new Promise((resolve, reject) => {
+				var fulfilled = value => {
+					try {
+						step(generator.next(value))
+					} catch (e) {
+						reject(e)
+					}
+				}
+				var rejected = value => {
+					try {
+						step(generator.throw(value))
+					} catch (e) {
+						reject(e)
+					}
+				}
+				var step = x => x.done ? resolve(x.value) : Promise.resolve(x.value).then(fulfilled, rejected)
+				step((generator = generator.apply(__this, __arguments)).next())
+			})
+		}
+
+		// These help for lowering async generator functions
+		export var __await = function (promise, isYieldStar) {
+			this[0] = promise
+			this[1] = isYieldStar
+		}
+		export var __asyncGenerator = (__this, __arguments, generator) => {
+			var resume = (k, v, yes, no) => {
+				try {
+					var x = generator[k](v), isAwait = (v = x.value) instanceof __await, done = x.done
+					Promise.resolve(isAwait ? v[0] : v)
+						.then(y => isAwait
+							? resume(k === 'return' ? k : 'next', v[1] ? { done: y.done, value: y.value } : y, yes, no)
+							: yes({ value: y, done }))
+						.catch(e => resume('throw', e, yes, no))
+				} catch (e) {
+					no(e)
+				}
+			}, method = k => it[k] = x => new Promise((yes, no) => resume(k, x, yes, no)), it = {}
+			return generator = generator.apply(__this, __arguments),
+				it[__knownSymbol('asyncIterator')] = () => it,
+				method('next'),
+				method('throw'),
+				method('return'),
+				it
+		}
+		export var __yieldStar = value => {
+			var obj = value[__knownSymbol('asyncIterator')], isAwait = false, method, it = {}
+			if (obj == null) {
+				obj = value[__knownSymbol('iterator')]()
+				method = k => it[k] = x => obj[k](x)
+			} else {
+				obj = obj.call(value)
+				method = k => it[k] = v => {
+					if (isAwait) {
+						isAwait = false
+						if (k === 'throw') throw v
+						return v
+					}
+					isAwait = true
+					return {
+						done: false,
+						value: new __await(new Promise(resolve => {
+							var x = obj[k](v)
+							if (!(x instanceof Object)) __typeError('Object expected')
+							resolve(x)
+						}), 1),
+					}
+				}
+			}
+			return it[__knownSymbol('iterator')] = () => it,
+				method('next'),
+				'throw' in obj ? method('throw') : it.throw = x => { throw x },
+				'return' in obj && method('return'),
+				it
+		}
+
+		// This helps for lowering for-await loops
+		export var __forAwait = (obj, it, method) =>
+			(it = obj[__knownSymbol('asyncIterator')])
+				? it.call(obj)
+				: (obj = obj[__knownSymbol('iterator')](),
+					it = {},
+					method = (key, fn) =>
+						(fn = obj[key]) && (it[key] = arg =>
+							new Promise((yes, no, done) => (
+								arg = fn.call(obj, arg),
+								done = arg.done,
+								Promise.resolve(arg.value)
+									.then(value => yes({ value, done }), no)
+							))),
+					method('next'),
+					method('return'),
+					it)
+
+		// This is for the "binary" loader (custom code is ~2x faster than "atob")
+		export var __toBinaryNode = base64 => new Uint8Array(Buffer.from(base64, 'base64'))
+		export var __toBinary = /* @__PURE__ */ (() => {
+			var table = new Uint8Array(128)
+			for (var i = 0; i < 64; i++) table[i < 26 ? i + 65 : i < 52 ? i + 71 : i < 62 ? i - 4 : i * 4 - 205] = i
+			return base64 => {
+				var n = base64.length, bytes = new Uint8Array((n - (base64[n - 1] == '=') - (base64[n - 2] == '=')) * 3 / 4 | 0)
+				for (var i = 0, j = 0; i < n;) {
+					var c0 = table[base64.charCodeAt(i++)], c1 = table[base64.charCodeAt(i++)]
+					var c2 = table[base64.charCodeAt(i++)], c3 = table[base64.charCodeAt(i++)]
+					bytes[j++] = (c0 << 2) | (c1 >> 4)
+					bytes[j++] = (c1 << 4) | (c2 >> 2)
+					bytes[j++] = (c2 << 6) | c3
+				}
+				return bytes
+			}
+		})()
+
+		// These are for the "using" statement in TypeScript 5.2+
+		export var __using = (stack, value, async) => {
+			if (value != null) {
+				if (typeof value !== 'object' && typeof value !== 'function') __typeError('Object expected')
+				var dispose, inner
+				if (async) dispose = value[__knownSymbol('asyncDispose')]
+				if (dispose === void 0) {
+					dispose = value[__knownSymbol('dispose')]
+					if (async) inner = dispose
+				}
+				if (typeof dispose !== 'function') __typeError('Object not disposable')
+				if (inner) dispose = function() { try { inner.call(this) } catch (e) { return Promise.reject(e) } }
+				stack.push([async, dispose, value])
+			} else if (async) {
+				stack.push([async])
+			}
+			return value
+		}
+		export var __callDispose = (stack, error, hasError) => {
+			var E = typeof SuppressedError === 'function' ? SuppressedError :
+				function (e, s, m, _) { return _ = Error(m), _.name = 'SuppressedError', _.error = e, _.suppressed = s, _ }
+			var fail = e => error = hasError ? new E(e, error, 'An error was suppressed during disposal') : (hasError = true, e)
+			var next = (it) => {
+				while (it = stack.pop()) {
+					try {
+						var result = it[1] && it[1].call(it[2])
+						if (it[0]) return Promise.resolve(result).then(next, (e) => (fail(e), next()))
+					} catch (e) {
+						fail(e)
+					}
+				}
+				if (hasError) throw error
+			}
+			return next()
+		}
+	`
+
+	return logger.Source{
+		Index:          SourceIndex,
+		KeyPath:        logger.Path{Text: "<runtime>"},
+		PrettyPath:     "<runtime>",
+		IdentifierName: "runtime",
+		Contents:       text,
+	}
+}
+
+// The TypeScript decorator transform behaves similar to the official
+// TypeScript compiler.
+//
+// One difference is that the "__decorateClass" function doesn't contain a reference
+// to the non-existent "Reflect.decorate" function. This function was never
+// standardized and checking for it is wasted code (as well as a potentially
+// dangerous cause of unintentional behavior changes in the future).
+//
+// Another difference is that the "__decorateClass" function doesn't take in an
+// optional property descriptor like it does in the official TypeScript
+// compiler's support code. This appears to be a dead code path in the official
+// support code that is only there for legacy reasons.
+//
+// Here are some examples of how esbuild's decorator transform works:
+//
+// ============================= Class decorator ==============================
+//
+//   // TypeScript                      // JavaScript
+//   @dec                               let C = class {
+//   class C {                          };
+//   }                                  C = __decorateClass([
+//                                        dec
+//                                      ], C);
+//
+// ============================ Method decorator ==============================
+//
+//   // TypeScript                      // JavaScript
+//   class C {                          class C {
+//     @dec                               foo() {}
+//     foo() {}                         }
+//   }                                  __decorateClass([
+//                                        dec
+//                                      ], C.prototype, 'foo', 1);
+//
+// =========================== Parameter decorator ============================
+//
+//   // TypeScript                      // JavaScript
+//   class C {                          class C {
+//     foo(@dec bar) {}                   foo(bar) {}
+//   }                                  }
+//                                      __decorateClass([
+//                                        __decorateParam(0, dec)
+//                                      ], C.prototype, 'foo', 1);
+//
+// ============================= Field decorator ==============================
+//
+//   // TypeScript                      // JavaScript
+//   class C {                          class C {
+//     @dec                               constructor() {
+//     foo = 123                            this.foo = 123
+//   }                                    }
+//                                      }
+//                                      __decorateClass([
+//                                        dec
+//                                      ], C.prototype, 'foo', 2);
diff --git a/source/vendor/github.com/evanw/esbuild/internal/sourcemap/sourcemap.go b/source/vendor/github.com/evanw/esbuild/internal/sourcemap/sourcemap.go
new file mode 100644
index 0000000..93effc2
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/sourcemap/sourcemap.go
@@ -0,0 +1,834 @@
+package sourcemap
+
+import (
+	"bytes"
+	"unicode/utf8"
+
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type Mapping struct {
+	GeneratedLine   int32 // 0-based
+	GeneratedColumn int32 // 0-based count of UTF-16 code units
+
+	SourceIndex    int32       // 0-based
+	OriginalLine   int32       // 0-based
+	OriginalColumn int32       // 0-based count of UTF-16 code units
+	OriginalName   ast.Index32 // 0-based, optional
+}
+
+type SourceMap struct {
+	Sources        []string
+	SourcesContent []SourceContent
+	Mappings       []Mapping
+	Names          []string
+}
+
+type SourceContent struct {
+	// This stores both the unquoted and the quoted values. We try to use the
+	// already-quoted value if possible so we don't need to re-quote it
+	// unnecessarily for maximum performance.
+	Quoted string
+
+	// But sometimes we need to re-quote the value, such as when it contains
+	// non-ASCII characters and we are in ASCII-only mode. In that case we quote
+	// this parsed UTF-16 value.
+	Value []uint16
+}
+
+func (sm *SourceMap) Find(line int32, column int32) *Mapping {
+	mappings := sm.Mappings
+
+	// Binary search
+	count := len(mappings)
+	index := 0
+	for count > 0 {
+		step := count / 2
+		i := index + step
+		mapping := mappings[i]
+		if mapping.GeneratedLine < line || (mapping.GeneratedLine == line && mapping.GeneratedColumn <= column) {
+			index = i + 1
+			count -= step + 1
+		} else {
+			count = step
+		}
+	}
+
+	// Handle search failure
+	if index > 0 {
+		mapping := &mappings[index-1]
+
+		// Match the behavior of the popular "source-map" library from Mozilla
+		if mapping.GeneratedLine == line {
+			return mapping
+		}
+	}
+	return nil
+}
+
+var base64 = []byte("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/")
+
+// A single base 64 digit can contain 6 bits of data. For the base 64 variable
+// length quantities we use in the source map spec, the first bit is the sign,
+// the next four bits are the actual value, and the 6th bit is the continuation
+// bit. The continuation bit tells us whether there are more digits in this
+// value following this digit.
+//
+//	Continuation
+//	|    Sign
+//	|    |
+//	V    V
+//	101011
+func encodeVLQ(encoded []byte, value int) []byte {
+	var vlq int
+	if value < 0 {
+		vlq = ((-value) << 1) | 1
+	} else {
+		vlq = value << 1
+	}
+
+	// Handle the common case
+	if (vlq >> 5) == 0 {
+		digit := vlq & 31
+		encoded = append(encoded, base64[digit])
+		return encoded
+	}
+
+	for {
+		digit := vlq & 31
+		vlq >>= 5
+
+		// If there are still more digits in this value, we must make sure the
+		// continuation bit is marked
+		if vlq != 0 {
+			digit |= 32
+		}
+
+		encoded = append(encoded, base64[digit])
+
+		if vlq == 0 {
+			break
+		}
+	}
+
+	return encoded
+}
+
+func DecodeVLQ(encoded []byte, start int) (int, int) {
+	shift := 0
+	vlq := 0
+
+	// Scan over the input
+	for {
+		index := bytes.IndexByte(base64, encoded[start])
+		if index < 0 {
+			break
+		}
+
+		// Decode a single byte
+		vlq |= (index & 31) << shift
+		start++
+		shift += 5
+
+		// Stop if there's no continuation bit
+		if (index & 32) == 0 {
+			break
+		}
+	}
+
+	// Recover the value
+	value := vlq >> 1
+	if (vlq & 1) != 0 {
+		value = -value
+	}
+	return value, start
+}
+
+func DecodeVLQUTF16(encoded []uint16) (int32, int, bool) {
+	n := len(encoded)
+	if n == 0 {
+		return 0, 0, false
+	}
+
+	// Scan over the input
+	current := 0
+	shift := 0
+	var vlq int32
+	for {
+		if current >= n {
+			return 0, 0, false
+		}
+		index := int32(bytes.IndexByte(base64, byte(encoded[current])))
+		if index < 0 {
+			return 0, 0, false
+		}
+
+		// Decode a single byte
+		vlq |= (index & 31) << shift
+		current++
+		shift += 5
+
+		// Stop if there's no continuation bit
+		if (index & 32) == 0 {
+			break
+		}
+	}
+
+	// Recover the value
+	var value = vlq >> 1
+	if (vlq & 1) != 0 {
+		value = -value
+	}
+	return value, current, true
+}
+
+type LineColumnOffset struct {
+	Lines   int
+	Columns int
+}
+
+func (a LineColumnOffset) ComesBefore(b LineColumnOffset) bool {
+	return a.Lines < b.Lines || (a.Lines == b.Lines && a.Columns < b.Columns)
+}
+
+func (a *LineColumnOffset) Add(b LineColumnOffset) {
+	if b.Lines == 0 {
+		a.Columns += b.Columns
+	} else {
+		a.Lines += b.Lines
+		a.Columns = b.Columns
+	}
+}
+
+func (offset *LineColumnOffset) AdvanceBytes(bytes []byte) {
+	columns := offset.Columns
+	for len(bytes) > 0 {
+		c, width := utf8.DecodeRune(bytes)
+		bytes = bytes[width:]
+		switch c {
+		case '\r', '\n', '\u2028', '\u2029':
+			// Handle Windows-specific "\r\n" newlines
+			if c == '\r' && len(bytes) > 0 && bytes[0] == '\n' {
+				columns++
+				continue
+			}
+
+			offset.Lines++
+			columns = 0
+
+		default:
+			// Mozilla's "source-map" library counts columns using UTF-16 code units
+			if c <= 0xFFFF {
+				columns++
+			} else {
+				columns += 2
+			}
+		}
+	}
+	offset.Columns = columns
+}
+
+func (offset *LineColumnOffset) AdvanceString(text string) {
+	columns := offset.Columns
+	for i, c := range text {
+		switch c {
+		case '\r', '\n', '\u2028', '\u2029':
+			// Handle Windows-specific "\r\n" newlines
+			if c == '\r' && i+1 < len(text) && text[i+1] == '\n' {
+				columns++
+				continue
+			}
+
+			offset.Lines++
+			columns = 0
+
+		default:
+			// Mozilla's "source-map" library counts columns using UTF-16 code units
+			if c <= 0xFFFF {
+				columns++
+			} else {
+				columns += 2
+			}
+		}
+	}
+	offset.Columns = columns
+}
+
+type SourceMapPieces struct {
+	Prefix   []byte
+	Mappings []byte
+	Suffix   []byte
+}
+
+func (pieces SourceMapPieces) HasContent() bool {
+	return len(pieces.Prefix)+len(pieces.Mappings)+len(pieces.Suffix) > 0
+}
+
+type SourceMapShift struct {
+	Before LineColumnOffset
+	After  LineColumnOffset
+}
+
+func (pieces SourceMapPieces) Finalize(shifts []SourceMapShift) []byte {
+	// An optimized path for when there are no shifts
+	if len(shifts) == 1 {
+		bytes := pieces.Prefix
+		minCap := len(bytes) + len(pieces.Mappings) + len(pieces.Suffix)
+		if cap(bytes) < minCap {
+			bytes = append(make([]byte, 0, minCap), bytes...)
+		}
+		bytes = append(bytes, pieces.Mappings...)
+		bytes = append(bytes, pieces.Suffix...)
+		return bytes
+	}
+
+	startOfRun := 0
+	current := 0
+	generated := LineColumnOffset{}
+	prevShiftColumnDelta := 0
+	j := helpers.Joiner{}
+
+	// Start the source map
+	j.AddBytes(pieces.Prefix)
+
+	// This assumes that a) all mappings are valid and b) all mappings are ordered
+	// by increasing generated position. This should be the case for all mappings
+	// generated by esbuild, which should be the only mappings we process here.
+	for current < len(pieces.Mappings) {
+		// Handle a line break
+		if pieces.Mappings[current] == ';' {
+			generated.Lines++
+			generated.Columns = 0
+			prevShiftColumnDelta = 0
+			current++
+			continue
+		}
+
+		potentialEndOfRun := current
+
+		// Read the generated column
+		generatedColumnDelta, next := DecodeVLQ(pieces.Mappings, current)
+		generated.Columns += generatedColumnDelta
+		current = next
+
+		potentialStartOfRun := current
+
+		// Skip over the original position information
+		_, current = DecodeVLQ(pieces.Mappings, current) // The original source
+		_, current = DecodeVLQ(pieces.Mappings, current) // The original line
+		_, current = DecodeVLQ(pieces.Mappings, current) // The original column
+
+		// Skip over the original name
+		if current < len(pieces.Mappings) {
+			if c := pieces.Mappings[current]; c != ',' && c != ';' {
+				_, current = DecodeVLQ(pieces.Mappings, current)
+			}
+		}
+
+		// Skip a trailing comma
+		if current < len(pieces.Mappings) && pieces.Mappings[current] == ',' {
+			current++
+		}
+
+		// Detect crossing shift boundaries
+		didCrossBoundary := false
+		for len(shifts) > 1 && shifts[1].Before.ComesBefore(generated) {
+			shifts = shifts[1:]
+			didCrossBoundary = true
+		}
+		if !didCrossBoundary {
+			continue
+		}
+
+		// This shift isn't relevant if the next mapping after this shift is on a
+		// following line. In that case, don't split and keep scanning instead.
+		shift := shifts[0]
+		if shift.After.Lines != generated.Lines {
+			continue
+		}
+
+		// Add all previous mappings in a single run for efficiency. Since source
+		// mappings are relative, no data needs to be modified inside this run.
+		j.AddBytes(pieces.Mappings[startOfRun:potentialEndOfRun])
+
+		// Then modify the first mapping across the shift boundary with the updated
+		// generated column value. It's simplest to only support column shifts. This
+		// is reasonable because import paths should not contain newlines.
+		if shift.Before.Lines != shift.After.Lines {
+			panic("Unexpected line change when shifting source maps")
+		}
+		shiftColumnDelta := shift.After.Columns - shift.Before.Columns
+		j.AddBytes(encodeVLQ(nil, generatedColumnDelta+shiftColumnDelta-prevShiftColumnDelta))
+		prevShiftColumnDelta = shiftColumnDelta
+
+		// Finally, start the next run after the end of this generated column offset
+		startOfRun = potentialStartOfRun
+	}
+
+	// Finish the source map
+	j.AddBytes(pieces.Mappings[startOfRun:])
+	j.AddBytes(pieces.Suffix)
+	return j.Done()
+}
+
+// Coordinates in source maps are stored using relative offsets for size
+// reasons. When joining together chunks of a source map that were emitted
+// in parallel for different parts of a file, we need to fix up the first
+// segment of each chunk to be relative to the end of the previous chunk.
+type SourceMapState struct {
+	// This isn't stored in the source map. It's only used by the bundler to join
+	// source map chunks together correctly.
+	GeneratedLine int
+
+	// These are stored in the source map in VLQ format.
+	GeneratedColumn int
+	SourceIndex     int
+	OriginalLine    int
+	OriginalColumn  int
+	OriginalName    int
+	HasOriginalName bool
+}
+
+// Source map chunks are computed in parallel for speed. Each chunk is relative
+// to the zero state instead of being relative to the end state of the previous
+// chunk, since it's impossible to know the end state of the previous chunk in
+// a parallel computation.
+//
+// After all chunks are computed, they are joined together in a second pass.
+// This rewrites the first mapping in each chunk to be relative to the end
+// state of the previous chunk.
+func AppendSourceMapChunk(j *helpers.Joiner, prevEndState SourceMapState, startState SourceMapState, buffer MappingsBuffer) {
+	// Handle line breaks in between this mapping and the previous one
+	if startState.GeneratedLine != 0 {
+		j.AddBytes(bytes.Repeat([]byte{';'}, startState.GeneratedLine))
+		prevEndState.GeneratedColumn = 0
+	}
+
+	// Skip past any leading semicolons, which indicate line breaks
+	semicolons := 0
+	for buffer.Data[semicolons] == ';' {
+		semicolons++
+	}
+	if semicolons > 0 {
+		j.AddBytes(buffer.Data[:semicolons])
+		prevEndState.GeneratedColumn = 0
+		startState.GeneratedColumn = 0
+	}
+
+	// Strip off the first mapping from the buffer. The first mapping should be
+	// for the start of the original file (the printer always generates one for
+	// the start of the file).
+	//
+	// Note that we do not want to strip off the original name, even though it
+	// could be a part of the first mapping. This will be handled using a special
+	// case below instead. Original names are optional and are often omitted, so
+	// we handle it uniformly by saving an index to the first original name,
+	// which may or may not be a part of the first mapping.
+	generatedColumn, i := DecodeVLQ(buffer.Data, semicolons)
+	sourceIndex, i := DecodeVLQ(buffer.Data, i)
+	originalLine, i := DecodeVLQ(buffer.Data, i)
+	originalColumn, i := DecodeVLQ(buffer.Data, i)
+
+	// Rewrite the first mapping to be relative to the end state of the previous
+	// chunk. We now know what the end state is because we're in the second pass
+	// where all chunks have already been generated.
+	startState.SourceIndex += sourceIndex
+	startState.GeneratedColumn += generatedColumn
+	startState.OriginalLine += originalLine
+	startState.OriginalColumn += originalColumn
+	prevEndState.HasOriginalName = false // This is handled separately below
+	rewritten, _ := appendMappingToBuffer(nil, j.LastByte(), prevEndState, startState)
+	j.AddBytes(rewritten)
+
+	// Next, if there's an original name, we need to rewrite that as well to be
+	// relative to that of the previous chunk.
+	if buffer.FirstNameOffset.IsValid() {
+		before := int(buffer.FirstNameOffset.GetIndex())
+		originalName, after := DecodeVLQ(buffer.Data, before)
+		originalName += startState.OriginalName - prevEndState.OriginalName
+		j.AddBytes(buffer.Data[i:before])
+		j.AddBytes(encodeVLQ(nil, originalName))
+		j.AddBytes(buffer.Data[after:])
+		return
+	}
+
+	// Otherwise, just append everything after that without modification
+	j.AddBytes(buffer.Data[i:])
+}
+
+func appendMappingToBuffer(buffer []byte, lastByte byte, prevState SourceMapState, currentState SourceMapState) ([]byte, ast.Index32) {
+	// Put commas in between mappings
+	if lastByte != 0 && lastByte != ';' && lastByte != '"' {
+		buffer = append(buffer, ',')
+	}
+
+	// Record the mapping (note that the generated line is recorded using ';' elsewhere)
+	buffer = encodeVLQ(buffer, currentState.GeneratedColumn-prevState.GeneratedColumn)
+	buffer = encodeVLQ(buffer, currentState.SourceIndex-prevState.SourceIndex)
+	buffer = encodeVLQ(buffer, currentState.OriginalLine-prevState.OriginalLine)
+	buffer = encodeVLQ(buffer, currentState.OriginalColumn-prevState.OriginalColumn)
+
+	// Record the optional original name
+	var nameOffset ast.Index32
+	if currentState.HasOriginalName {
+		nameOffset = ast.MakeIndex32(uint32(len(buffer)))
+		buffer = encodeVLQ(buffer, currentState.OriginalName-prevState.OriginalName)
+	}
+
+	return buffer, nameOffset
+}
+
+type LineOffsetTable struct {
+	// The source map specification is very loose and does not specify what
+	// column numbers actually mean. The popular "source-map" library from Mozilla
+	// appears to interpret them as counts of UTF-16 code units, so we generate
+	// those too for compatibility.
+	//
+	// We keep mapping tables around to accelerate conversion from byte offsets
+	// to UTF-16 code unit counts. However, this mapping takes up a lot of memory
+	// and generates a lot of garbage. Since most JavaScript is ASCII and the
+	// mapping for ASCII is 1:1, we avoid creating a table for ASCII-only lines
+	// as an optimization.
+	columnsForNonASCII        []int32
+	byteOffsetToFirstNonASCII int32
+
+	byteOffsetToStartOfLine int32
+}
+
+func GenerateLineOffsetTables(contents string, approximateLineCount int32) []LineOffsetTable {
+	var ColumnsForNonASCII []int32
+	ByteOffsetToFirstNonASCII := int32(0)
+	lineByteOffset := 0
+	columnByteOffset := 0
+	column := int32(0)
+
+	// Preallocate the top-level table using the approximate line count from the lexer
+	lineOffsetTables := make([]LineOffsetTable, 0, approximateLineCount)
+
+	for i, c := range contents {
+		// Mark the start of the next line
+		if column == 0 {
+			lineByteOffset = i
+		}
+
+		// Start the mapping if this character is non-ASCII
+		if c > 0x7F && ColumnsForNonASCII == nil {
+			columnByteOffset = i - lineByteOffset
+			ByteOffsetToFirstNonASCII = int32(columnByteOffset)
+			ColumnsForNonASCII = []int32{}
+		}
+
+		// Update the per-byte column offsets
+		if ColumnsForNonASCII != nil {
+			for lineBytesSoFar := i - lineByteOffset; columnByteOffset <= lineBytesSoFar; columnByteOffset++ {
+				ColumnsForNonASCII = append(ColumnsForNonASCII, column)
+			}
+		}
+
+		switch c {
+		case '\r', '\n', '\u2028', '\u2029':
+			// Handle Windows-specific "\r\n" newlines
+			if c == '\r' && i+1 < len(contents) && contents[i+1] == '\n' {
+				column++
+				continue
+			}
+
+			lineOffsetTables = append(lineOffsetTables, LineOffsetTable{
+				byteOffsetToStartOfLine:   int32(lineByteOffset),
+				byteOffsetToFirstNonASCII: ByteOffsetToFirstNonASCII,
+				columnsForNonASCII:        ColumnsForNonASCII,
+			})
+			columnByteOffset = 0
+			ByteOffsetToFirstNonASCII = 0
+			ColumnsForNonASCII = nil
+			column = 0
+
+		default:
+			// Mozilla's "source-map" library counts columns using UTF-16 code units
+			if c <= 0xFFFF {
+				column++
+			} else {
+				column += 2
+			}
+		}
+	}
+
+	// Mark the start of the next line
+	if column == 0 {
+		lineByteOffset = len(contents)
+	}
+
+	// Do one last update for the column at the end of the file
+	if ColumnsForNonASCII != nil {
+		for lineBytesSoFar := len(contents) - lineByteOffset; columnByteOffset <= lineBytesSoFar; columnByteOffset++ {
+			ColumnsForNonASCII = append(ColumnsForNonASCII, column)
+		}
+	}
+
+	lineOffsetTables = append(lineOffsetTables, LineOffsetTable{
+		byteOffsetToStartOfLine:   int32(lineByteOffset),
+		byteOffsetToFirstNonASCII: ByteOffsetToFirstNonASCII,
+		columnsForNonASCII:        ColumnsForNonASCII,
+	})
+	return lineOffsetTables
+}
+
+type MappingsBuffer struct {
+	Data            []byte
+	FirstNameOffset ast.Index32
+}
+
+type Chunk struct {
+	Buffer      MappingsBuffer
+	QuotedNames [][]byte
+
+	// This end state will be used to rewrite the start of the following source
+	// map chunk so that the delta-encoded VLQ numbers are preserved.
+	EndState SourceMapState
+
+	// There probably isn't a source mapping at the end of the file (nor should
+	// there be) but if we're appending another source map chunk after this one,
+	// we'll need to know how many characters were in the last line we generated.
+	FinalGeneratedColumn int
+
+	ShouldIgnore bool
+}
+
+type ChunkBuilder struct {
+	inputSourceMap      *SourceMap
+	sourceMap           []byte
+	quotedNames         [][]byte
+	namesMap            map[string]uint32
+	lineOffsetTables    []LineOffsetTable
+	prevOriginalName    string
+	prevState           SourceMapState
+	lastGeneratedUpdate int
+	generatedColumn     int
+	prevGeneratedLen    int
+	prevOriginalLoc     logger.Loc
+	firstNameOffset     ast.Index32
+	hasPrevState        bool
+	asciiOnly           bool
+
+	// This is a workaround for a bug in the popular "source-map" library:
+	// https://github.com/mozilla/source-map/issues/261. The library will
+	// sometimes return null when querying a source map unless every line
+	// starts with a mapping at column zero.
+	//
+	// The workaround is to replicate the previous mapping if a line ends
+	// up not starting with a mapping. This is done lazily because we want
+	// to avoid replicating the previous mapping if we don't need to.
+	lineStartsWithMapping     bool
+	coverLinesWithoutMappings bool
+}
+
+func MakeChunkBuilder(inputSourceMap *SourceMap, lineOffsetTables []LineOffsetTable, asciiOnly bool) ChunkBuilder {
+	return ChunkBuilder{
+		inputSourceMap:   inputSourceMap,
+		prevOriginalLoc:  logger.Loc{Start: -1},
+		lineOffsetTables: lineOffsetTables,
+		asciiOnly:        asciiOnly,
+		namesMap:         make(map[string]uint32),
+
+		// We automatically repeat the previous source mapping if we ever generate
+		// a line that doesn't start with a mapping. This helps give files more
+		// complete mapping coverage without gaps.
+		//
+		// However, we probably shouldn't do this if the input file has a nested
+		// source map that we will be remapping through. We have no idea what state
+		// that source map is in and it could be pretty scrambled.
+		//
+		// I've seen cases where blindly repeating the last mapping for subsequent
+		// lines gives very strange and unhelpful results with source maps from
+		// other tools.
+		coverLinesWithoutMappings: inputSourceMap == nil,
+	}
+}
+
+func (b *ChunkBuilder) AddSourceMapping(originalLoc logger.Loc, originalName string, output []byte) {
+	// Avoid generating duplicate mappings
+	if originalLoc == b.prevOriginalLoc && (b.prevGeneratedLen == len(output) || b.prevOriginalName == originalName) {
+		return
+	}
+
+	b.prevOriginalLoc = originalLoc
+	b.prevGeneratedLen = len(output)
+	b.prevOriginalName = originalName
+
+	// Binary search to find the line
+	lineOffsetTables := b.lineOffsetTables
+	count := len(lineOffsetTables)
+	originalLine := 0
+	for count > 0 {
+		step := count / 2
+		i := originalLine + step
+		if lineOffsetTables[i].byteOffsetToStartOfLine <= originalLoc.Start {
+			originalLine = i + 1
+			count = count - step - 1
+		} else {
+			count = step
+		}
+	}
+	originalLine--
+
+	// Use the line to compute the column
+	line := &lineOffsetTables[originalLine]
+	originalColumn := int(originalLoc.Start - line.byteOffsetToStartOfLine)
+	if line.columnsForNonASCII != nil && originalColumn >= int(line.byteOffsetToFirstNonASCII) {
+		originalColumn = int(line.columnsForNonASCII[originalColumn-int(line.byteOffsetToFirstNonASCII)])
+	}
+
+	b.updateGeneratedLineAndColumn(output)
+
+	// If this line doesn't start with a mapping and we're about to add a mapping
+	// that's not at the start, insert a mapping first so the line starts with one.
+	if b.coverLinesWithoutMappings && !b.lineStartsWithMapping && b.generatedColumn > 0 && b.hasPrevState {
+		b.appendMappingWithoutRemapping(SourceMapState{
+			GeneratedLine:   b.prevState.GeneratedLine,
+			GeneratedColumn: 0,
+			SourceIndex:     b.prevState.SourceIndex,
+			OriginalLine:    b.prevState.OriginalLine,
+			OriginalColumn:  b.prevState.OriginalColumn,
+		})
+	}
+
+	b.appendMapping(originalName, SourceMapState{
+		GeneratedLine:   b.prevState.GeneratedLine,
+		GeneratedColumn: b.generatedColumn,
+		OriginalLine:    originalLine,
+		OriginalColumn:  originalColumn,
+	})
+
+	// This line now has a mapping on it, so don't insert another one
+	b.lineStartsWithMapping = true
+}
+
+func (b *ChunkBuilder) GenerateChunk(output []byte) Chunk {
+	b.updateGeneratedLineAndColumn(output)
+	shouldIgnore := true
+	for _, c := range b.sourceMap {
+		if c != ';' {
+			shouldIgnore = false
+			break
+		}
+	}
+	return Chunk{
+		Buffer: MappingsBuffer{
+			Data:            b.sourceMap,
+			FirstNameOffset: b.firstNameOffset,
+		},
+		QuotedNames:          b.quotedNames,
+		EndState:             b.prevState,
+		FinalGeneratedColumn: b.generatedColumn,
+		ShouldIgnore:         shouldIgnore,
+	}
+}
+
+// Scan over the printed text since the last source mapping and update the
+// generated line and column numbers
+func (b *ChunkBuilder) updateGeneratedLineAndColumn(output []byte) {
+	for i, c := range string(output[b.lastGeneratedUpdate:]) {
+		switch c {
+		case '\r', '\n', '\u2028', '\u2029':
+			// Handle Windows-specific "\r\n" newlines
+			if c == '\r' {
+				newlineCheck := b.lastGeneratedUpdate + i + 1
+				if newlineCheck < len(output) && output[newlineCheck] == '\n' {
+					continue
+				}
+			}
+
+			// If we're about to move to the next line and the previous line didn't have
+			// any mappings, add a mapping at the start of the previous line.
+			if b.coverLinesWithoutMappings && !b.lineStartsWithMapping && b.hasPrevState {
+				b.appendMappingWithoutRemapping(SourceMapState{
+					GeneratedLine:   b.prevState.GeneratedLine,
+					GeneratedColumn: 0,
+					SourceIndex:     b.prevState.SourceIndex,
+					OriginalLine:    b.prevState.OriginalLine,
+					OriginalColumn:  b.prevState.OriginalColumn,
+				})
+			}
+
+			b.prevState.GeneratedLine++
+			b.prevState.GeneratedColumn = 0
+			b.generatedColumn = 0
+			b.sourceMap = append(b.sourceMap, ';')
+
+			// This new line doesn't have a mapping yet
+			b.lineStartsWithMapping = false
+
+		default:
+			// Mozilla's "source-map" library counts columns using UTF-16 code units
+			if c <= 0xFFFF {
+				b.generatedColumn++
+			} else {
+				b.generatedColumn += 2
+			}
+		}
+	}
+
+	b.lastGeneratedUpdate = len(output)
+}
+
+func (b *ChunkBuilder) appendMapping(originalName string, currentState SourceMapState) {
+	// If the input file had a source map, map all the way back to the original
+	if b.inputSourceMap != nil {
+		mapping := b.inputSourceMap.Find(
+			int32(currentState.OriginalLine),
+			int32(currentState.OriginalColumn))
+
+		// Some locations won't have a mapping
+		if mapping == nil {
+			return
+		}
+
+		currentState.SourceIndex = int(mapping.SourceIndex)
+		currentState.OriginalLine = int(mapping.OriginalLine)
+		currentState.OriginalColumn = int(mapping.OriginalColumn)
+
+		// Map all the way back to the original name if present. Otherwise, keep
+		// the original name from esbuild, which corresponds to the name in the
+		// intermediate source code. This is important for tools that only emit
+		// a name mapping when the name is different than the original name.
+		if mapping.OriginalName.IsValid() {
+			originalName = b.inputSourceMap.Names[mapping.OriginalName.GetIndex()]
+		}
+	}
+
+	// Optionally reference the original name
+	if originalName != "" {
+		i, ok := b.namesMap[originalName]
+		if !ok {
+			i = uint32(len(b.quotedNames))
+			b.quotedNames = append(b.quotedNames, helpers.QuoteForJSON(originalName, b.asciiOnly))
+			b.namesMap[originalName] = i
+		}
+		currentState.OriginalName = int(i)
+		currentState.HasOriginalName = true
+	}
+
+	b.appendMappingWithoutRemapping(currentState)
+}
+
+func (b *ChunkBuilder) appendMappingWithoutRemapping(currentState SourceMapState) {
+	var lastByte byte
+	if len(b.sourceMap) != 0 {
+		lastByte = b.sourceMap[len(b.sourceMap)-1]
+	}
+
+	var nameOffset ast.Index32
+	b.sourceMap, nameOffset = appendMappingToBuffer(b.sourceMap, lastByte, b.prevState, currentState)
+	prevOriginalName := b.prevState.OriginalName
+	b.prevState = currentState
+	if !currentState.HasOriginalName {
+		// Revert the original name change if it's invalid
+		b.prevState.OriginalName = prevOriginalName
+	} else if !b.firstNameOffset.IsValid() {
+		// Keep track of the first name offset so we can jump right to it later
+		b.firstNameOffset = nameOffset
+	}
+	b.hasPrevState = true
+}
diff --git a/source/vendor/github.com/evanw/esbuild/internal/xxhash/LICENSE.txt b/source/vendor/github.com/evanw/esbuild/internal/xxhash/LICENSE.txt
new file mode 100644
index 0000000..24b5306
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/xxhash/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/source/vendor/github.com/evanw/esbuild/internal/xxhash/README.md b/source/vendor/github.com/evanw/esbuild/internal/xxhash/README.md
new file mode 100644
index 0000000..1c9f0af
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/xxhash/README.md
@@ -0,0 +1 @@
+This Go implementation of xxHash is from https://github.com/cespare/xxhash.
diff --git a/source/vendor/github.com/evanw/esbuild/internal/xxhash/xxhash.go b/source/vendor/github.com/evanw/esbuild/internal/xxhash/xxhash.go
new file mode 100644
index 0000000..15c835d
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/xxhash/xxhash.go
@@ -0,0 +1,235 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+	prime1v = prime1
+	prime2v = prime2
+	prime3v = prime3
+	prime4v = prime4
+	prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total uint64
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+	var d Digest
+	d.Reset()
+	return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+	d.v1 = prime1v + prime2
+	d.v2 = prime2
+	d.v3 = 0
+	d.v4 = -prime1v
+	d.total = 0
+	d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+	n = len(b)
+	d.total += uint64(n)
+
+	if d.n+n < 32 {
+		// This new data doesn't even fill the current block.
+		copy(d.mem[d.n:], b)
+		d.n += n
+		return
+	}
+
+	if d.n > 0 {
+		// Finish off the partial block.
+		copy(d.mem[d.n:], b)
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
+		b = b[32-d.n:]
+		d.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		nw := writeBlocks(d, b)
+		b = b[nw:]
+	}
+
+	// Store any remaining partial block.
+	copy(d.mem[:], b)
+	d.n = len(b)
+
+	return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+	s := d.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+	var h uint64
+
+	if d.total >= 32 {
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = d.v3 + prime5
+	}
+
+	h += d.total
+
+	i, end := 0, d.n
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(d.mem[i:i+8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for i < end {
+		h ^= uint64(d.mem[i]) * prime5
+		h = rol11(h) * prime1
+		i++
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+const (
+	magic         = "xxh\x06"
+	marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 0, marshaledSize)
+	b = append(b, magic...)
+	b = appendUint64(b, d.v1)
+	b = appendUint64(b, d.v2)
+	b = appendUint64(b, d.v3)
+	b = appendUint64(b, d.v4)
+	b = appendUint64(b, d.total)
+	b = append(b, d.mem[:d.n]...)
+	b = b[:len(b)+len(d.mem)-d.n]
+	return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+		return errors.New("xxhash: invalid hash state identifier")
+	}
+	if len(b) != marshaledSize {
+		return errors.New("xxhash: invalid hash state size")
+	}
+	b = b[len(magic):]
+	b, d.v1 = consumeUint64(b)
+	b, d.v2 = consumeUint64(b)
+	b, d.v3 = consumeUint64(b)
+	b, d.v4 = consumeUint64(b)
+	b, d.total = consumeUint64(b)
+	copy(d.mem[:], b)
+	d.n = int(d.total % uint64(len(d.mem)))
+	return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+	var a [8]byte
+	binary.LittleEndian.PutUint64(a[:], x)
+	return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+	x := u64(b)
+	return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/source/vendor/github.com/evanw/esbuild/internal/xxhash/xxhash_other.go b/source/vendor/github.com/evanw/esbuild/internal/xxhash/xxhash_other.go
new file mode 100644
index 0000000..ce512f7
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/internal/xxhash/xxhash_other.go
@@ -0,0 +1,74 @@
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+	// A simpler version would be
+	//   d := New()
+	//   d.Write(b)
+	//   return d.Sum64()
+	// but this is faster, particularly for small inputs.
+
+	n := len(b)
+	var h uint64
+
+	if n >= 32 {
+		v1 := prime1v + prime2
+		v2 := prime2
+		v3 := uint64(0)
+		v4 := -prime1v
+		for len(b) >= 32 {
+			v1 = round(v1, u64(b[0:8:len(b)]))
+			v2 = round(v2, u64(b[8:16:len(b)]))
+			v3 = round(v3, u64(b[16:24:len(b)]))
+			v4 = round(v4, u64(b[24:32:len(b)]))
+			b = b[32:len(b):len(b)]
+		}
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = prime5
+	}
+
+	h += uint64(n)
+
+	i, end := 0, len(b)
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(b[i:i+8:len(b)]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for ; i < end; i++ {
+		h ^= uint64(b[i]) * prime5
+		h = rol11(h) * prime1
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+	n := len(b)
+	for len(b) >= 32 {
+		v1 = round(v1, u64(b[0:8:len(b)]))
+		v2 = round(v2, u64(b[8:16:len(b)]))
+		v3 = round(v3, u64(b[16:24:len(b)]))
+		v4 = round(v4, u64(b[24:32:len(b)]))
+		b = b[32:len(b):len(b)]
+	}
+	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+	return n - len(b)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/pkg/api/api.go b/source/vendor/github.com/evanw/esbuild/pkg/api/api.go
new file mode 100644
index 0000000..08a597e
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/pkg/api/api.go
@@ -0,0 +1,718 @@
+// This API exposes esbuild's two main operations: building and transforming.
+// It's intended for integrating esbuild into other tools as a library.
+//
+// If you are just trying to run esbuild from Go without the overhead of
+// creating a child process, there is also an API for the command-line
+// interface itself: https://pkg.go.dev/github.com/evanw/esbuild/pkg/cli.
+//
+// # Build API
+//
+// This function runs an end-to-end build operation. It takes an array of file
+// paths as entry points, parses them and all of their dependencies, and
+// returns the output files to write to the file system. The available options
+// roughly correspond to esbuild's command-line flags.
+//
+// Example usage:
+//
+//	package main
+//
+//	import (
+//	    "os"
+//
+//	    "github.com/evanw/esbuild/pkg/api"
+//	)
+//
+//	func main() {
+//	    result := api.Build(api.BuildOptions{
+//	        EntryPoints: []string{"input.js"},
+//	        Outfile:     "output.js",
+//	        Bundle:      true,
+//	        Write:       true,
+//	        LogLevel:    api.LogLevelInfo,
+//	    })
+//
+//	    if len(result.Errors) > 0 {
+//	        os.Exit(1)
+//	    }
+//	}
+//
+// # Transform API
+//
+// This function transforms a string of source code into JavaScript. It can be
+// used to minify JavaScript, convert TypeScript/JSX to JavaScript, or convert
+// newer JavaScript to older JavaScript. The available options roughly
+// correspond to esbuild's command-line flags.
+//
+// Example usage:
+//
+//	package main
+//
+//	import (
+//	    "fmt"
+//	    "os"
+//
+//	    "github.com/evanw/esbuild/pkg/api"
+//	)
+//
+//	func main() {
+//	    jsx := `
+//	        import * as React from 'react'
+//	        import * as ReactDOM from 'react-dom'
+//
+//	        ReactDOM.render(
+//	            <h1>Hello, world!</h1>,
+//	            document.getElementById('root')
+//	        );
+//	    `
+//
+//	    result := api.Transform(jsx, api.TransformOptions{
+//	        Loader: api.LoaderJSX,
+//	    })
+//
+//	    fmt.Printf("%d errors and %d warnings\n",
+//	        len(result.Errors), len(result.Warnings))
+//
+//	    os.Stdout.Write(result.Code)
+//	}
+package api
+
+import (
+	"time"
+
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+type SourceMap uint8
+
+const (
+	SourceMapNone SourceMap = iota
+	SourceMapInline
+	SourceMapLinked
+	SourceMapExternal
+	SourceMapInlineAndExternal
+)
+
+type SourcesContent uint8
+
+const (
+	SourcesContentInclude SourcesContent = iota
+	SourcesContentExclude
+)
+
+type LegalComments uint8
+
+const (
+	LegalCommentsDefault LegalComments = iota
+	LegalCommentsNone
+	LegalCommentsInline
+	LegalCommentsEndOfFile
+	LegalCommentsLinked
+	LegalCommentsExternal
+)
+
+type JSX uint8
+
+const (
+	JSXTransform JSX = iota
+	JSXPreserve
+	JSXAutomatic
+)
+
+type Target uint8
+
+const (
+	DefaultTarget Target = iota
+	ESNext
+	ES5
+	ES2015
+	ES2016
+	ES2017
+	ES2018
+	ES2019
+	ES2020
+	ES2021
+	ES2022
+	ES2023
+	ES2024
+)
+
+type Loader uint16
+
+const (
+	LoaderNone Loader = iota
+	LoaderBase64
+	LoaderBinary
+	LoaderCopy
+	LoaderCSS
+	LoaderDataURL
+	LoaderDefault
+	LoaderEmpty
+	LoaderFile
+	LoaderGlobalCSS
+	LoaderJS
+	LoaderJSON
+	LoaderJSX
+	LoaderLocalCSS
+	LoaderText
+	LoaderTS
+	LoaderTSX
+)
+
+type Platform uint8
+
+const (
+	PlatformDefault Platform = iota
+	PlatformBrowser
+	PlatformNode
+	PlatformNeutral
+)
+
+type Format uint8
+
+const (
+	FormatDefault Format = iota
+	FormatIIFE
+	FormatCommonJS
+	FormatESModule
+)
+
+type Packages uint8
+
+const (
+	PackagesDefault Packages = iota
+	PackagesBundle
+	PackagesExternal
+)
+
+type Engine struct {
+	Name    EngineName
+	Version string
+}
+
+type Location struct {
+	File       string
+	Namespace  string
+	Line       int // 1-based
+	Column     int // 0-based, in bytes
+	Length     int // in bytes
+	LineText   string
+	Suggestion string
+}
+
+type Message struct {
+	ID         string
+	PluginName string
+	Text       string
+	Location   *Location
+	Notes      []Note
+
+	// Optional user-specified data that is passed through unmodified. You can
+	// use this to stash the original error, for example.
+	Detail interface{}
+}
+
+type Note struct {
+	Text     string
+	Location *Location
+}
+
+type StderrColor uint8
+
+const (
+	ColorIfTerminal StderrColor = iota
+	ColorNever
+	ColorAlways
+)
+
+type LogLevel uint8
+
+const (
+	LogLevelSilent LogLevel = iota
+	LogLevelVerbose
+	LogLevelDebug
+	LogLevelInfo
+	LogLevelWarning
+	LogLevelError
+)
+
+type Charset uint8
+
+const (
+	CharsetDefault Charset = iota
+	CharsetASCII
+	CharsetUTF8
+)
+
+type TreeShaking uint8
+
+const (
+	TreeShakingDefault TreeShaking = iota
+	TreeShakingFalse
+	TreeShakingTrue
+)
+
+type Drop uint8
+
+const (
+	DropConsole Drop = 1 << iota
+	DropDebugger
+)
+
+type MangleQuoted uint8
+
+const (
+	MangleQuotedFalse MangleQuoted = iota
+	MangleQuotedTrue
+)
+
+////////////////////////////////////////////////////////////////////////////////
+// Build API
+
+type BuildOptions struct {
+	Color       StderrColor         // Documentation: https://esbuild.github.io/api/#color
+	LogLevel    LogLevel            // Documentation: https://esbuild.github.io/api/#log-level
+	LogLimit    int                 // Documentation: https://esbuild.github.io/api/#log-limit
+	LogOverride map[string]LogLevel // Documentation: https://esbuild.github.io/api/#log-override
+
+	Sourcemap      SourceMap      // Documentation: https://esbuild.github.io/api/#sourcemap
+	SourceRoot     string         // Documentation: https://esbuild.github.io/api/#source-root
+	SourcesContent SourcesContent // Documentation: https://esbuild.github.io/api/#sources-content
+
+	Target    Target          // Documentation: https://esbuild.github.io/api/#target
+	Engines   []Engine        // Documentation: https://esbuild.github.io/api/#target
+	Supported map[string]bool // Documentation: https://esbuild.github.io/api/#supported
+
+	MangleProps       string                 // Documentation: https://esbuild.github.io/api/#mangle-props
+	ReserveProps      string                 // Documentation: https://esbuild.github.io/api/#mangle-props
+	MangleQuoted      MangleQuoted           // Documentation: https://esbuild.github.io/api/#mangle-props
+	MangleCache       map[string]interface{} // Documentation: https://esbuild.github.io/api/#mangle-props
+	Drop              Drop                   // Documentation: https://esbuild.github.io/api/#drop
+	DropLabels        []string               // Documentation: https://esbuild.github.io/api/#drop-labels
+	MinifyWhitespace  bool                   // Documentation: https://esbuild.github.io/api/#minify
+	MinifyIdentifiers bool                   // Documentation: https://esbuild.github.io/api/#minify
+	MinifySyntax      bool                   // Documentation: https://esbuild.github.io/api/#minify
+	LineLimit         int                    // Documentation: https://esbuild.github.io/api/#line-limit
+	Charset           Charset                // Documentation: https://esbuild.github.io/api/#charset
+	TreeShaking       TreeShaking            // Documentation: https://esbuild.github.io/api/#tree-shaking
+	IgnoreAnnotations bool                   // Documentation: https://esbuild.github.io/api/#ignore-annotations
+	LegalComments     LegalComments          // Documentation: https://esbuild.github.io/api/#legal-comments
+
+	JSX             JSX    // Documentation: https://esbuild.github.io/api/#jsx-mode
+	JSXFactory      string // Documentation: https://esbuild.github.io/api/#jsx-factory
+	JSXFragment     string // Documentation: https://esbuild.github.io/api/#jsx-fragment
+	JSXImportSource string // Documentation: https://esbuild.github.io/api/#jsx-import-source
+	JSXDev          bool   // Documentation: https://esbuild.github.io/api/#jsx-dev
+	JSXSideEffects  bool   // Documentation: https://esbuild.github.io/api/#jsx-side-effects
+
+	Define    map[string]string // Documentation: https://esbuild.github.io/api/#define
+	Pure      []string          // Documentation: https://esbuild.github.io/api/#pure
+	KeepNames bool              // Documentation: https://esbuild.github.io/api/#keep-names
+
+	GlobalName        string            // Documentation: https://esbuild.github.io/api/#global-name
+	Bundle            bool              // Documentation: https://esbuild.github.io/api/#bundle
+	PreserveSymlinks  bool              // Documentation: https://esbuild.github.io/api/#preserve-symlinks
+	Splitting         bool              // Documentation: https://esbuild.github.io/api/#splitting
+	Outfile           string            // Documentation: https://esbuild.github.io/api/#outfile
+	Metafile          bool              // Documentation: https://esbuild.github.io/api/#metafile
+	Outdir            string            // Documentation: https://esbuild.github.io/api/#outdir
+	Outbase           string            // Documentation: https://esbuild.github.io/api/#outbase
+	AbsWorkingDir     string            // Documentation: https://esbuild.github.io/api/#working-directory
+	Platform          Platform          // Documentation: https://esbuild.github.io/api/#platform
+	Format            Format            // Documentation: https://esbuild.github.io/api/#format
+	External          []string          // Documentation: https://esbuild.github.io/api/#external
+	Packages          Packages          // Documentation: https://esbuild.github.io/api/#packages
+	Alias             map[string]string // Documentation: https://esbuild.github.io/api/#alias
+	MainFields        []string          // Documentation: https://esbuild.github.io/api/#main-fields
+	Conditions        []string          // Documentation: https://esbuild.github.io/api/#conditions
+	Loader            map[string]Loader // Documentation: https://esbuild.github.io/api/#loader
+	ResolveExtensions []string          // Documentation: https://esbuild.github.io/api/#resolve-extensions
+	Tsconfig          string            // Documentation: https://esbuild.github.io/api/#tsconfig
+	TsconfigRaw       string            // Documentation: https://esbuild.github.io/api/#tsconfig-raw
+	OutExtension      map[string]string // Documentation: https://esbuild.github.io/api/#out-extension
+	PublicPath        string            // Documentation: https://esbuild.github.io/api/#public-path
+	Inject            []string          // Documentation: https://esbuild.github.io/api/#inject
+	Banner            map[string]string // Documentation: https://esbuild.github.io/api/#banner
+	Footer            map[string]string // Documentation: https://esbuild.github.io/api/#footer
+	NodePaths         []string          // Documentation: https://esbuild.github.io/api/#node-paths
+
+	EntryNames string // Documentation: https://esbuild.github.io/api/#entry-names
+	ChunkNames string // Documentation: https://esbuild.github.io/api/#chunk-names
+	AssetNames string // Documentation: https://esbuild.github.io/api/#asset-names
+
+	EntryPoints         []string     // Documentation: https://esbuild.github.io/api/#entry-points
+	EntryPointsAdvanced []EntryPoint // Documentation: https://esbuild.github.io/api/#entry-points
+
+	Stdin          *StdinOptions // Documentation: https://esbuild.github.io/api/#stdin
+	Write          bool          // Documentation: https://esbuild.github.io/api/#write
+	AllowOverwrite bool          // Documentation: https://esbuild.github.io/api/#allow-overwrite
+	Plugins        []Plugin      // Documentation: https://esbuild.github.io/plugins/
+}
+
+type EntryPoint struct {
+	InputPath  string
+	OutputPath string
+}
+
+type StdinOptions struct {
+	Contents   string
+	ResolveDir string
+	Sourcefile string
+	Loader     Loader
+}
+
+type BuildResult struct {
+	Errors   []Message
+	Warnings []Message
+
+	OutputFiles []OutputFile
+	Metafile    string
+	MangleCache map[string]interface{}
+}
+
+type OutputFile struct {
+	Path     string
+	Contents []byte
+	Hash     string
+}
+
+// Documentation: https://esbuild.github.io/api/#build
+func Build(options BuildOptions) BuildResult {
+	start := time.Now()
+
+	ctx, errors := contextImpl(options)
+	if ctx == nil {
+		return BuildResult{Errors: errors}
+	}
+
+	result := ctx.Rebuild()
+
+	// Print a summary of the generated files to stderr. Except don't do
+	// this if the terminal is already being used for something else.
+	if ctx.args.logOptions.LogLevel <= logger.LevelInfo && !ctx.args.options.WriteToStdout {
+		printSummary(ctx.args.logOptions.Color, result.OutputFiles, start)
+	}
+
+	ctx.Dispose()
+	return result
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Transform API
+
+type TransformOptions struct {
+	Color       StderrColor         // Documentation: https://esbuild.github.io/api/#color
+	LogLevel    LogLevel            // Documentation: https://esbuild.github.io/api/#log-level
+	LogLimit    int                 // Documentation: https://esbuild.github.io/api/#log-limit
+	LogOverride map[string]LogLevel // Documentation: https://esbuild.github.io/api/#log-override
+
+	Sourcemap      SourceMap      // Documentation: https://esbuild.github.io/api/#sourcemap
+	SourceRoot     string         // Documentation: https://esbuild.github.io/api/#source-root
+	SourcesContent SourcesContent // Documentation: https://esbuild.github.io/api/#sources-content
+
+	Target    Target          // Documentation: https://esbuild.github.io/api/#target
+	Engines   []Engine        // Documentation: https://esbuild.github.io/api/#target
+	Supported map[string]bool // Documentation: https://esbuild.github.io/api/#supported
+
+	Platform   Platform // Documentation: https://esbuild.github.io/api/#platform
+	Format     Format   // Documentation: https://esbuild.github.io/api/#format
+	GlobalName string   // Documentation: https://esbuild.github.io/api/#global-name
+
+	MangleProps       string                 // Documentation: https://esbuild.github.io/api/#mangle-props
+	ReserveProps      string                 // Documentation: https://esbuild.github.io/api/#mangle-props
+	MangleQuoted      MangleQuoted           // Documentation: https://esbuild.github.io/api/#mangle-props
+	MangleCache       map[string]interface{} // Documentation: https://esbuild.github.io/api/#mangle-props
+	Drop              Drop                   // Documentation: https://esbuild.github.io/api/#drop
+	DropLabels        []string               // Documentation: https://esbuild.github.io/api/#drop-labels
+	MinifyWhitespace  bool                   // Documentation: https://esbuild.github.io/api/#minify
+	MinifyIdentifiers bool                   // Documentation: https://esbuild.github.io/api/#minify
+	MinifySyntax      bool                   // Documentation: https://esbuild.github.io/api/#minify
+	LineLimit         int                    // Documentation: https://esbuild.github.io/api/#line-limit
+	Charset           Charset                // Documentation: https://esbuild.github.io/api/#charset
+	TreeShaking       TreeShaking            // Documentation: https://esbuild.github.io/api/#tree-shaking
+	IgnoreAnnotations bool                   // Documentation: https://esbuild.github.io/api/#ignore-annotations
+	LegalComments     LegalComments          // Documentation: https://esbuild.github.io/api/#legal-comments
+
+	JSX             JSX    // Documentation: https://esbuild.github.io/api/#jsx
+	JSXFactory      string // Documentation: https://esbuild.github.io/api/#jsx-factory
+	JSXFragment     string // Documentation: https://esbuild.github.io/api/#jsx-fragment
+	JSXImportSource string // Documentation: https://esbuild.github.io/api/#jsx-import-source
+	JSXDev          bool   // Documentation: https://esbuild.github.io/api/#jsx-dev
+	JSXSideEffects  bool   // Documentation: https://esbuild.github.io/api/#jsx-side-effects
+
+	TsconfigRaw string // Documentation: https://esbuild.github.io/api/#tsconfig-raw
+	Banner      string // Documentation: https://esbuild.github.io/api/#banner
+	Footer      string // Documentation: https://esbuild.github.io/api/#footer
+
+	Define    map[string]string // Documentation: https://esbuild.github.io/api/#define
+	Pure      []string          // Documentation: https://esbuild.github.io/api/#pure
+	KeepNames bool              // Documentation: https://esbuild.github.io/api/#keep-names
+
+	Sourcefile string // Documentation: https://esbuild.github.io/api/#sourcefile
+	Loader     Loader // Documentation: https://esbuild.github.io/api/#loader
+}
+
+type TransformResult struct {
+	Errors   []Message
+	Warnings []Message
+
+	Code          []byte
+	Map           []byte
+	LegalComments []byte
+
+	MangleCache map[string]interface{}
+}
+
+// Documentation: https://esbuild.github.io/api/#transform
+func Transform(input string, options TransformOptions) TransformResult {
+	return transformImpl(input, options)
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Context API
+
+// Documentation: https://esbuild.github.io/api/#serve-arguments
+type ServeOptions struct {
+	Port      uint16
+	Host      string
+	Servedir  string
+	Keyfile   string
+	Certfile  string
+	Fallback  string
+	OnRequest func(ServeOnRequestArgs)
+}
+
+type ServeOnRequestArgs struct {
+	RemoteAddress string
+	Method        string
+	Path          string
+	Status        int
+	TimeInMS      int // The time to generate the response, not to send it
+}
+
+// Documentation: https://esbuild.github.io/api/#serve-return-values
+type ServeResult struct {
+	Port uint16
+	Host string
+}
+
+type WatchOptions struct {
+}
+
+type BuildContext interface {
+	// Documentation: https://esbuild.github.io/api/#rebuild
+	Rebuild() BuildResult
+
+	// Documentation: https://esbuild.github.io/api/#watch
+	Watch(options WatchOptions) error
+
+	// Documentation: https://esbuild.github.io/api/#serve
+	Serve(options ServeOptions) (ServeResult, error)
+
+	Cancel()
+	Dispose()
+}
+
+type ContextError struct {
+	Errors []Message // Option validation errors are returned here
+}
+
+func (err *ContextError) Error() string {
+	if len(err.Errors) > 0 {
+		return err.Errors[0].Text
+	}
+	return "Context creation failed"
+}
+
+// Documentation: https://esbuild.github.io/api/#build
+func Context(buildOptions BuildOptions) (BuildContext, *ContextError) {
+	ctx, errors := contextImpl(buildOptions)
+	if ctx == nil {
+		return nil, &ContextError{Errors: errors}
+	}
+	return ctx, nil
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Plugin API
+
+type SideEffects uint8
+
+const (
+	SideEffectsTrue SideEffects = iota
+	SideEffectsFalse
+)
+
+type Plugin struct {
+	Name  string
+	Setup func(PluginBuild)
+}
+
+type PluginBuild struct {
+	// Documentation: https://esbuild.github.io/plugins/#build-options
+	InitialOptions *BuildOptions
+
+	// Documentation: https://esbuild.github.io/plugins/#resolve
+	Resolve func(path string, options ResolveOptions) ResolveResult
+
+	// Documentation: https://esbuild.github.io/plugins/#on-start
+	OnStart func(callback func() (OnStartResult, error))
+
+	// Documentation: https://esbuild.github.io/plugins/#on-end
+	OnEnd func(callback func(result *BuildResult) (OnEndResult, error))
+
+	// Documentation: https://esbuild.github.io/plugins/#on-resolve
+	OnResolve func(options OnResolveOptions, callback func(OnResolveArgs) (OnResolveResult, error))
+
+	// Documentation: https://esbuild.github.io/plugins/#on-load
+	OnLoad func(options OnLoadOptions, callback func(OnLoadArgs) (OnLoadResult, error))
+
+	// Documentation: https://esbuild.github.io/plugins/#on-dispose
+	OnDispose func(callback func())
+}
+
+// Documentation: https://esbuild.github.io/plugins/#resolve-options
+type ResolveOptions struct {
+	PluginName string
+	Importer   string
+	Namespace  string
+	ResolveDir string
+	Kind       ResolveKind
+	PluginData interface{}
+	With       map[string]string
+}
+
+// Documentation: https://esbuild.github.io/plugins/#resolve-results
+type ResolveResult struct {
+	Errors   []Message
+	Warnings []Message
+
+	Path        string
+	External    bool
+	SideEffects bool
+	Namespace   string
+	Suffix      string
+	PluginData  interface{}
+}
+
+type OnStartResult struct {
+	Errors   []Message
+	Warnings []Message
+}
+
+type OnEndResult struct {
+	Errors   []Message
+	Warnings []Message
+}
+
+// Documentation: https://esbuild.github.io/plugins/#on-resolve-options
+type OnResolveOptions struct {
+	Filter    string
+	Namespace string
+}
+
+// Documentation: https://esbuild.github.io/plugins/#on-resolve-arguments
+type OnResolveArgs struct {
+	Path       string
+	Importer   string
+	Namespace  string
+	ResolveDir string
+	Kind       ResolveKind
+	PluginData interface{}
+	With       map[string]string
+}
+
+// Documentation: https://esbuild.github.io/plugins/#on-resolve-results
+type OnResolveResult struct {
+	PluginName string
+
+	Errors   []Message
+	Warnings []Message
+
+	Path        string
+	External    bool
+	SideEffects SideEffects
+	Namespace   string
+	Suffix      string
+	PluginData  interface{}
+
+	WatchFiles []string
+	WatchDirs  []string
+}
+
+// Documentation: https://esbuild.github.io/plugins/#on-load-options
+type OnLoadOptions struct {
+	Filter    string
+	Namespace string
+}
+
+// Documentation: https://esbuild.github.io/plugins/#on-load-arguments
+type OnLoadArgs struct {
+	Path       string
+	Namespace  string
+	Suffix     string
+	PluginData interface{}
+	With       map[string]string
+}
+
+// Documentation: https://esbuild.github.io/plugins/#on-load-results
+type OnLoadResult struct {
+	PluginName string
+
+	Errors   []Message
+	Warnings []Message
+
+	Contents   *string
+	ResolveDir string
+	Loader     Loader
+	PluginData interface{}
+
+	WatchFiles []string
+	WatchDirs  []string
+}
+
+type ResolveKind uint8
+
+const (
+	ResolveNone ResolveKind = iota
+	ResolveEntryPoint
+	ResolveJSImportStatement
+	ResolveJSRequireCall
+	ResolveJSDynamicImport
+	ResolveJSRequireResolve
+	ResolveCSSImportRule
+	ResolveCSSComposesFrom
+	ResolveCSSURLToken
+)
+
+////////////////////////////////////////////////////////////////////////////////
+// FormatMessages API
+
+type MessageKind uint8
+
+const (
+	ErrorMessage MessageKind = iota
+	WarningMessage
+)
+
+type FormatMessagesOptions struct {
+	TerminalWidth int
+	Kind          MessageKind
+	Color         bool
+}
+
+func FormatMessages(msgs []Message, opts FormatMessagesOptions) []string {
+	return formatMsgsImpl(msgs, opts)
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// AnalyzeMetafile API
+
+type AnalyzeMetafileOptions struct {
+	Color   bool
+	Verbose bool
+}
+
+// Documentation: https://esbuild.github.io/api/#analyze
+func AnalyzeMetafile(metafile string, opts AnalyzeMetafileOptions) string {
+	return analyzeMetafileImpl(metafile, opts)
+}
diff --git a/source/vendor/github.com/evanw/esbuild/pkg/api/api_impl.go b/source/vendor/github.com/evanw/esbuild/pkg/api/api_impl.go
new file mode 100644
index 0000000..7c73777
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/pkg/api/api_impl.go
@@ -0,0 +1,2530 @@
+package api
+
+// This file implements most of the API. This includes the "Build", "Transform",
+// "FormatMessages", and "AnalyzeMetafile" functions.
+
+import (
+	"bytes"
+	"encoding/base64"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io/ioutil"
+	"math"
+	"os"
+	"path"
+	"regexp"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"time"
+	"unicode/utf8"
+
+	"github.com/evanw/esbuild/internal/api_helpers"
+	"github.com/evanw/esbuild/internal/ast"
+	"github.com/evanw/esbuild/internal/bundler"
+	"github.com/evanw/esbuild/internal/cache"
+	"github.com/evanw/esbuild/internal/compat"
+	"github.com/evanw/esbuild/internal/config"
+	"github.com/evanw/esbuild/internal/css_ast"
+	"github.com/evanw/esbuild/internal/fs"
+	"github.com/evanw/esbuild/internal/graph"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/js_ast"
+	"github.com/evanw/esbuild/internal/js_parser"
+	"github.com/evanw/esbuild/internal/linker"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/resolver"
+	"github.com/evanw/esbuild/internal/xxhash"
+)
+
+func validatePathTemplate(template string) []config.PathTemplate {
+	if template == "" {
+		return nil
+	}
+	template = "./" + strings.ReplaceAll(template, "\\", "/")
+
+	parts := make([]config.PathTemplate, 0, 4)
+	search := 0
+
+	// Split by placeholders
+	for search < len(template) {
+		// Jump to the next "["
+		if found := strings.IndexByte(template[search:], '['); found == -1 {
+			break
+		} else {
+			search += found
+		}
+		head, tail := template[:search], template[search:]
+		placeholder := config.NoPlaceholder
+
+		// Check for a placeholder
+		switch {
+		case strings.HasPrefix(tail, "[dir]"):
+			placeholder = config.DirPlaceholder
+			search += len("[dir]")
+
+		case strings.HasPrefix(tail, "[name]"):
+			placeholder = config.NamePlaceholder
+			search += len("[name]")
+
+		case strings.HasPrefix(tail, "[hash]"):
+			placeholder = config.HashPlaceholder
+			search += len("[hash]")
+
+		case strings.HasPrefix(tail, "[ext]"):
+			placeholder = config.ExtPlaceholder
+			search += len("[ext]")
+
+		default:
+			// Skip past the "[" so we don't find it again
+			search++
+			continue
+		}
+
+		// Add a part for everything up to and including this placeholder
+		parts = append(parts, config.PathTemplate{
+			Data:        head,
+			Placeholder: placeholder,
+		})
+
+		// Reset the search after this placeholder
+		template = template[search:]
+		search = 0
+	}
+
+	// Append any remaining data as a part without a placeholder
+	if search < len(template) {
+		parts = append(parts, config.PathTemplate{
+			Data:        template,
+			Placeholder: config.NoPlaceholder,
+		})
+	}
+
+	return parts
+}
+
+func validatePlatform(value Platform) config.Platform {
+	switch value {
+	case PlatformDefault, PlatformBrowser:
+		return config.PlatformBrowser
+	case PlatformNode:
+		return config.PlatformNode
+	case PlatformNeutral:
+		return config.PlatformNeutral
+	default:
+		panic("Invalid platform")
+	}
+}
+
+func validateFormat(value Format) config.Format {
+	switch value {
+	case FormatDefault:
+		return config.FormatPreserve
+	case FormatIIFE:
+		return config.FormatIIFE
+	case FormatCommonJS:
+		return config.FormatCommonJS
+	case FormatESModule:
+		return config.FormatESModule
+	default:
+		panic("Invalid format")
+	}
+}
+
+func validateSourceMap(value SourceMap) config.SourceMap {
+	switch value {
+	case SourceMapNone:
+		return config.SourceMapNone
+	case SourceMapLinked:
+		return config.SourceMapLinkedWithComment
+	case SourceMapInline:
+		return config.SourceMapInline
+	case SourceMapExternal:
+		return config.SourceMapExternalWithoutComment
+	case SourceMapInlineAndExternal:
+		return config.SourceMapInlineAndExternal
+	default:
+		panic("Invalid source map")
+	}
+}
+
+func validateLegalComments(value LegalComments, bundle bool) config.LegalComments {
+	switch value {
+	case LegalCommentsDefault:
+		if bundle {
+			return config.LegalCommentsEndOfFile
+		} else {
+			return config.LegalCommentsInline
+		}
+	case LegalCommentsNone:
+		return config.LegalCommentsNone
+	case LegalCommentsInline:
+		return config.LegalCommentsInline
+	case LegalCommentsEndOfFile:
+		return config.LegalCommentsEndOfFile
+	case LegalCommentsLinked:
+		return config.LegalCommentsLinkedWithComment
+	case LegalCommentsExternal:
+		return config.LegalCommentsExternalWithoutComment
+	default:
+		panic("Invalid source map")
+	}
+}
+
+func validateColor(value StderrColor) logger.UseColor {
+	switch value {
+	case ColorIfTerminal:
+		return logger.ColorIfTerminal
+	case ColorNever:
+		return logger.ColorNever
+	case ColorAlways:
+		return logger.ColorAlways
+	default:
+		panic("Invalid color")
+	}
+}
+
+func validateLogLevel(value LogLevel) logger.LogLevel {
+	switch value {
+	case LogLevelVerbose:
+		return logger.LevelVerbose
+	case LogLevelDebug:
+		return logger.LevelDebug
+	case LogLevelInfo:
+		return logger.LevelInfo
+	case LogLevelWarning:
+		return logger.LevelWarning
+	case LogLevelError:
+		return logger.LevelError
+	case LogLevelSilent:
+		return logger.LevelSilent
+	default:
+		panic("Invalid log level")
+	}
+}
+
+func validateASCIIOnly(value Charset) bool {
+	switch value {
+	case CharsetDefault, CharsetASCII:
+		return true
+	case CharsetUTF8:
+		return false
+	default:
+		panic("Invalid charset")
+	}
+}
+
+func validateExternalPackages(value Packages) bool {
+	switch value {
+	case PackagesDefault, PackagesBundle:
+		return false
+	case PackagesExternal:
+		return true
+	default:
+		panic("Invalid packages")
+	}
+}
+
+func validateTreeShaking(value TreeShaking, bundle bool, format Format) bool {
+	switch value {
+	case TreeShakingDefault:
+		// If we're in an IIFE then there's no way to concatenate additional code
+		// to the end of our output so we assume tree shaking is safe. And when
+		// bundling we assume that tree shaking is safe because if you want to add
+		// code to the bundle, you should be doing that by including it in the
+		// bundle instead of concatenating it afterward, so we also assume tree
+		// shaking is safe then. Otherwise we assume tree shaking is not safe.
+		return bundle || format == FormatIIFE
+	case TreeShakingFalse:
+		return false
+	case TreeShakingTrue:
+		return true
+	default:
+		panic("Invalid tree shaking")
+	}
+}
+
+func validateLoader(value Loader) config.Loader {
+	switch value {
+	case LoaderBase64:
+		return config.LoaderBase64
+	case LoaderBinary:
+		return config.LoaderBinary
+	case LoaderCopy:
+		return config.LoaderCopy
+	case LoaderCSS:
+		return config.LoaderCSS
+	case LoaderDataURL:
+		return config.LoaderDataURL
+	case LoaderDefault:
+		return config.LoaderDefault
+	case LoaderEmpty:
+		return config.LoaderEmpty
+	case LoaderFile:
+		return config.LoaderFile
+	case LoaderGlobalCSS:
+		return config.LoaderGlobalCSS
+	case LoaderJS:
+		return config.LoaderJS
+	case LoaderJSON:
+		return config.LoaderJSON
+	case LoaderJSX:
+		return config.LoaderJSX
+	case LoaderLocalCSS:
+		return config.LoaderLocalCSS
+	case LoaderNone:
+		return config.LoaderNone
+	case LoaderText:
+		return config.LoaderText
+	case LoaderTS:
+		return config.LoaderTS
+	case LoaderTSX:
+		return config.LoaderTSX
+	default:
+		panic("Invalid loader")
+	}
+}
+
+var versionRegex = regexp.MustCompile(`^([0-9]+)(?:\.([0-9]+))?(?:\.([0-9]+))?(-[A-Za-z0-9]+(?:\.[A-Za-z0-9]+)*)?$`)
+
+func validateFeatures(log logger.Log, target Target, engines []Engine) (compat.JSFeature, compat.CSSFeature, map[css_ast.D]compat.CSSPrefix, string) {
+	if target == DefaultTarget && len(engines) == 0 {
+		return 0, 0, nil, ""
+	}
+
+	constraints := make(map[compat.Engine]compat.Semver)
+	targets := make([]string, 0, 1+len(engines))
+
+	switch target {
+	case ES5:
+		constraints[compat.ES] = compat.Semver{Parts: []int{5}}
+	case ES2015:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2015}}
+	case ES2016:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2016}}
+	case ES2017:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2017}}
+	case ES2018:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2018}}
+	case ES2019:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2019}}
+	case ES2020:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2020}}
+	case ES2021:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2021}}
+	case ES2022:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2022}}
+	case ES2023:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2023}}
+	case ES2024:
+		constraints[compat.ES] = compat.Semver{Parts: []int{2024}}
+	case ESNext, DefaultTarget:
+	default:
+		panic("Invalid target")
+	}
+
+	for _, engine := range engines {
+		if match := versionRegex.FindStringSubmatch(engine.Version); match != nil {
+			if major, err := strconv.Atoi(match[1]); err == nil {
+				parts := []int{major}
+				if minor, err := strconv.Atoi(match[2]); err == nil {
+					parts = append(parts, minor)
+					if patch, err := strconv.Atoi(match[3]); err == nil {
+						parts = append(parts, patch)
+					}
+				}
+				constraints[convertEngineName(engine.Name)] = compat.Semver{
+					Parts:      parts,
+					PreRelease: match[4],
+				}
+				continue
+			}
+		}
+
+		text := "All version numbers passed to esbuild must be in the format \"X\", \"X.Y\", or \"X.Y.Z\" where X, Y, and Z are non-negative integers."
+
+		log.AddErrorWithNotes(nil, logger.Range{}, fmt.Sprintf("Invalid version: %q", engine.Version),
+			[]logger.MsgData{{Text: text}})
+	}
+
+	for engine, version := range constraints {
+		targets = append(targets, engine.String()+version.String())
+	}
+	if target == ESNext {
+		targets = append(targets, "esnext")
+	}
+
+	sort.Strings(targets)
+	targetEnv := helpers.StringArrayToQuotedCommaSeparatedString(targets)
+
+	return compat.UnsupportedJSFeatures(constraints), compat.UnsupportedCSSFeatures(constraints), compat.CSSPrefixData(constraints), targetEnv
+}
+
+func validateSupported(log logger.Log, supported map[string]bool) (
+	jsFeature compat.JSFeature,
+	jsMask compat.JSFeature,
+	cssFeature compat.CSSFeature,
+	cssMask compat.CSSFeature,
+) {
+	for k, v := range supported {
+		if js, ok := compat.StringToJSFeature[k]; ok {
+			jsMask |= js
+			if !v {
+				jsFeature |= js
+			}
+		} else if css, ok := compat.StringToCSSFeature[k]; ok {
+			cssMask |= css
+			if !v {
+				cssFeature |= css
+			}
+		} else {
+			log.AddError(nil, logger.Range{}, fmt.Sprintf("%q is not a valid feature name for the \"supported\" setting", k))
+		}
+	}
+	return
+}
+
+func validateGlobalName(log logger.Log, text string) []string {
+	if text != "" {
+		source := logger.Source{
+			KeyPath:    logger.Path{Text: "(global path)"},
+			PrettyPath: "(global name)",
+			Contents:   text,
+		}
+
+		if result, ok := js_parser.ParseGlobalName(log, source); ok {
+			return result
+		}
+	}
+
+	return nil
+}
+
+func validateRegex(log logger.Log, what string, value string) *regexp.Regexp {
+	if value == "" {
+		return nil
+	}
+	regex, err := regexp.Compile(value)
+	if err != nil {
+		log.AddError(nil, logger.Range{},
+			fmt.Sprintf("The %q setting is not a valid Go regular expression: %s", what, value))
+		return nil
+	}
+	return regex
+}
+
+func validateExternals(log logger.Log, fs fs.FS, paths []string) config.ExternalSettings {
+	result := config.ExternalSettings{
+		PreResolve:  config.ExternalMatchers{Exact: make(map[string]bool)},
+		PostResolve: config.ExternalMatchers{Exact: make(map[string]bool)},
+	}
+
+	for _, path := range paths {
+		if index := strings.IndexByte(path, '*'); index != -1 {
+			// Wildcard behavior
+			if strings.ContainsRune(path[index+1:], '*') {
+				log.AddError(nil, logger.Range{}, fmt.Sprintf("External path %q cannot have more than one \"*\" wildcard", path))
+			} else {
+				result.PreResolve.Patterns = append(result.PreResolve.Patterns, config.WildcardPattern{Prefix: path[:index], Suffix: path[index+1:]})
+				if !resolver.IsPackagePath(path) {
+					if absPath := validatePath(log, fs, path, "external path"); absPath != "" {
+						if absIndex := strings.IndexByte(absPath, '*'); absIndex != -1 && !strings.ContainsRune(absPath[absIndex+1:], '*') {
+							result.PostResolve.Patterns = append(result.PostResolve.Patterns, config.WildcardPattern{Prefix: absPath[:absIndex], Suffix: absPath[absIndex+1:]})
+						}
+					}
+				}
+			}
+		} else {
+			// Non-wildcard behavior
+			result.PreResolve.Exact[path] = true
+			if resolver.IsPackagePath(path) {
+				result.PreResolve.Patterns = append(result.PreResolve.Patterns, config.WildcardPattern{Prefix: path + "/"})
+			} else if absPath := validatePath(log, fs, path, "external path"); absPath != "" {
+				result.PostResolve.Exact[absPath] = true
+			}
+		}
+	}
+
+	return result
+}
+
+func validateAlias(log logger.Log, fs fs.FS, alias map[string]string) map[string]string {
+	valid := make(map[string]string, len(alias))
+
+	for old, new := range alias {
+		if new == "" {
+			log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid alias substitution: %q", new))
+			continue
+		}
+
+		// Valid alias names:
+		//   "foo"
+		//   "foo/bar"
+		//   "@foo"
+		//   "@foo/bar"
+		//   "@foo/bar/baz"
+		//
+		// Invalid alias names:
+		//   "./foo"
+		//   "../foo"
+		//   "/foo"
+		//   "C:\\foo"
+		//   ".foo"
+		//   "foo/"
+		//   "@foo/"
+		//   "foo/../bar"
+		//
+		if !strings.HasPrefix(old, ".") && !strings.HasPrefix(old, "/") && !fs.IsAbs(old) && path.Clean(strings.ReplaceAll(old, "\\", "/")) == old {
+			valid[old] = new
+			continue
+		}
+
+		log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid alias name: %q", old))
+	}
+
+	return valid
+}
+
+func isValidExtension(ext string) bool {
+	return len(ext) >= 2 && ext[0] == '.' && ext[len(ext)-1] != '.'
+}
+
+func validateResolveExtensions(log logger.Log, order []string) []string {
+	if order == nil {
+		return []string{".tsx", ".ts", ".jsx", ".js", ".css", ".json"}
+	}
+	for _, ext := range order {
+		if !isValidExtension(ext) {
+			log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid file extension: %q", ext))
+		}
+	}
+	return order
+}
+
+func validateLoaders(log logger.Log, loaders map[string]Loader) map[string]config.Loader {
+	result := bundler.DefaultExtensionToLoaderMap()
+	for ext, loader := range loaders {
+		if ext != "" && !isValidExtension(ext) {
+			log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid file extension: %q", ext))
+		}
+		result[ext] = validateLoader(loader)
+	}
+	return result
+}
+
+func validateJSXExpr(log logger.Log, text string, name string) config.DefineExpr {
+	if text != "" {
+		if expr, _ := js_parser.ParseDefineExprOrJSON(text); len(expr.Parts) > 0 || (name == "fragment" && expr.Constant != nil) {
+			return expr
+		}
+		log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid JSX %s: %q", name, text))
+	}
+	return config.DefineExpr{}
+}
+
+func validateDefines(
+	log logger.Log,
+	defines map[string]string,
+	pureFns []string,
+	platform config.Platform,
+	isBuildAPI bool,
+	minify bool,
+	drop Drop,
+) (*config.ProcessedDefines, []config.InjectedDefine) {
+	rawDefines := make(map[string]config.DefineData)
+	var valueToInject map[string]config.InjectedDefine
+	var definesToInject []string
+
+	for key, value := range defines {
+		// The key must be a dot-separated identifier list
+		for _, part := range strings.Split(key, ".") {
+			if !js_ast.IsIdentifier(part) {
+				if part == key {
+					log.AddError(nil, logger.Range{}, fmt.Sprintf("The define key %q must be a valid identifier", key))
+				} else {
+					log.AddError(nil, logger.Range{}, fmt.Sprintf("The define key %q contains invalid identifier %q", key, part))
+				}
+				continue
+			}
+		}
+
+		// Parse the value
+		defineExpr, injectExpr := js_parser.ParseDefineExprOrJSON(value)
+
+		// Define simple expressions
+		if defineExpr.Constant != nil || len(defineExpr.Parts) > 0 {
+			rawDefines[key] = config.DefineData{DefineExpr: &defineExpr}
+
+			// Try to be helpful for common mistakes
+			if len(defineExpr.Parts) == 1 && key == "process.env.NODE_ENV" {
+				data := logger.MsgData{
+					Text: fmt.Sprintf("%q is defined as an identifier instead of a string (surround %q with quotes to get a string)", key, value),
+				}
+				part := defineExpr.Parts[0]
+
+				switch logger.API {
+				case logger.CLIAPI:
+					data.Location = &logger.MsgLocation{
+						File:       "<cli>",
+						Line:       1,
+						Column:     30,
+						Length:     len(part),
+						LineText:   fmt.Sprintf("--define:process.env.NODE_ENV=%s", part),
+						Suggestion: fmt.Sprintf("\\\"%s\\\"", part),
+					}
+
+				case logger.JSAPI:
+					data.Location = &logger.MsgLocation{
+						File:       "<js>",
+						Line:       1,
+						Column:     34,
+						Length:     len(part) + 2,
+						LineText:   fmt.Sprintf("define: { 'process.env.NODE_ENV': '%s' }", part),
+						Suggestion: fmt.Sprintf("'\"%s\"'", part),
+					}
+
+				case logger.GoAPI:
+					data.Location = &logger.MsgLocation{
+						File:       "<go>",
+						Line:       1,
+						Column:     50,
+						Length:     len(part) + 2,
+						LineText:   fmt.Sprintf("Define: map[string]string{\"process.env.NODE_ENV\": \"%s\"}", part),
+						Suggestion: fmt.Sprintf("\"\\\"%s\\\"\"", part),
+					}
+				}
+
+				log.AddMsgID(logger.MsgID_JS_SuspiciousDefine, logger.Msg{
+					Kind: logger.Warning,
+					Data: data,
+				})
+			}
+			continue
+		}
+
+		// Inject complex expressions
+		if injectExpr != nil {
+			definesToInject = append(definesToInject, key)
+			if valueToInject == nil {
+				valueToInject = make(map[string]config.InjectedDefine)
+			}
+			valueToInject[key] = config.InjectedDefine{
+				Source: logger.Source{Contents: value},
+				Data:   injectExpr,
+				Name:   key,
+			}
+			continue
+		}
+
+		// Anything else is unsupported
+		log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid define value (must be an entity name or valid JSON syntax): %s", value))
+	}
+
+	// Sort injected defines for determinism, since the imports will be injected
+	// into every file in the order that we return them from this function
+	var injectedDefines []config.InjectedDefine
+	if len(definesToInject) > 0 {
+		injectedDefines = make([]config.InjectedDefine, len(definesToInject))
+		sort.Strings(definesToInject)
+		for i, key := range definesToInject {
+			injectedDefines[i] = valueToInject[key]
+			rawDefines[key] = config.DefineData{DefineExpr: &config.DefineExpr{InjectedDefineIndex: ast.MakeIndex32(uint32(i))}}
+		}
+	}
+
+	// If we're bundling for the browser, add a special-cased define for
+	// "process.env.NODE_ENV" that is "development" when not minifying and
+	// "production" when minifying. This is a convention from the React world
+	// that must be handled to avoid all React code crashing instantly. This
+	// is only done if it's not already defined so that you can override it if
+	// necessary.
+	if isBuildAPI && platform == config.PlatformBrowser {
+		if _, process := rawDefines["process"]; !process {
+			if _, processEnv := rawDefines["process.env"]; !processEnv {
+				if _, processEnvNodeEnv := rawDefines["process.env.NODE_ENV"]; !processEnvNodeEnv {
+					var value []uint16
+					if minify {
+						value = helpers.StringToUTF16("production")
+					} else {
+						value = helpers.StringToUTF16("development")
+					}
+					rawDefines["process.env.NODE_ENV"] = config.DefineData{DefineExpr: &config.DefineExpr{Constant: &js_ast.EString{Value: value}}}
+				}
+			}
+		}
+	}
+
+	// If we're dropping all console API calls, replace each one with undefined
+	if (drop & DropConsole) != 0 {
+		define := rawDefines["console"]
+		define.Flags |= config.MethodCallsMustBeReplacedWithUndefined
+		rawDefines["console"] = define
+	}
+
+	for _, key := range pureFns {
+		// The key must be a dot-separated identifier list
+		for _, part := range strings.Split(key, ".") {
+			if !js_ast.IsIdentifier(part) {
+				log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid pure function: %q", key))
+				continue
+			}
+		}
+
+		// Merge with any previously-specified defines
+		define := rawDefines[key]
+		define.Flags |= config.CallCanBeUnwrappedIfUnused
+		rawDefines[key] = define
+	}
+
+	// Processing defines is expensive. Process them once here so the same object
+	// can be shared between all parsers we create using these arguments.
+	processed := config.ProcessDefines(rawDefines)
+	return &processed, injectedDefines
+}
+
+func validateLogOverrides(input map[string]LogLevel) (output map[logger.MsgID]logger.LogLevel) {
+	output = make(map[uint8]logger.LogLevel)
+	for k, v := range input {
+		logger.StringToMsgIDs(k, validateLogLevel(v), output)
+	}
+	return
+}
+
+func validatePath(log logger.Log, fs fs.FS, relPath string, pathKind string) string {
+	if relPath == "" {
+		return ""
+	}
+	absPath, ok := fs.Abs(relPath)
+	if !ok {
+		log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid %s: %s", pathKind, relPath))
+	}
+	return absPath
+}
+
+func validateOutputExtensions(log logger.Log, outExtensions map[string]string) (js string, css string) {
+	for key, value := range outExtensions {
+		if !isValidExtension(value) {
+			log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid output extension: %q", value))
+		}
+		switch key {
+		case ".js":
+			js = value
+		case ".css":
+			css = value
+		default:
+			log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid output extension: %q (valid: .css, .js)", key))
+		}
+	}
+	return
+}
+
+func validateBannerOrFooter(log logger.Log, name string, values map[string]string) (js string, css string) {
+	for key, value := range values {
+		switch key {
+		case "js":
+			js = value
+		case "css":
+			css = value
+		default:
+			log.AddError(nil, logger.Range{}, fmt.Sprintf("Invalid %s file type: %q (valid: css, js)", name, key))
+		}
+	}
+	return
+}
+
+func validateKeepNames(log logger.Log, options *config.Options) {
+	if options.KeepNames && options.UnsupportedJSFeatures.Has(compat.FunctionNameConfigurable) {
+		where := config.PrettyPrintTargetEnvironment(options.OriginalTargetEnv, options.UnsupportedJSFeatureOverridesMask)
+		log.AddErrorWithNotes(nil, logger.Range{}, fmt.Sprintf("The \"keep names\" setting cannot be used with %s", where), []logger.MsgData{{
+			Text: "In this environment, the \"Function.prototype.name\" property is not configurable and assigning to it will throw an error. " +
+				"Either use a newer target environment or disable the \"keep names\" setting."}})
+	}
+}
+
+func convertLocationToPublic(loc *logger.MsgLocation) *Location {
+	if loc != nil {
+		return &Location{
+			File:       loc.File,
+			Namespace:  loc.Namespace,
+			Line:       loc.Line,
+			Column:     loc.Column,
+			Length:     loc.Length,
+			LineText:   loc.LineText,
+			Suggestion: loc.Suggestion,
+		}
+	}
+	return nil
+}
+
+func convertMessagesToPublic(kind logger.MsgKind, msgs []logger.Msg) []Message {
+	var filtered []Message
+	for _, msg := range msgs {
+		if msg.Kind == kind {
+			var notes []Note
+			for _, note := range msg.Notes {
+				notes = append(notes, Note{
+					Text:     note.Text,
+					Location: convertLocationToPublic(note.Location),
+				})
+			}
+			filtered = append(filtered, Message{
+				ID:         logger.MsgIDToString(msg.ID),
+				PluginName: msg.PluginName,
+				Text:       msg.Data.Text,
+				Location:   convertLocationToPublic(msg.Data.Location),
+				Notes:      notes,
+				Detail:     msg.Data.UserDetail,
+			})
+		}
+	}
+	return filtered
+}
+
+func convertLocationToInternal(loc *Location) *logger.MsgLocation {
+	if loc != nil {
+		namespace := loc.Namespace
+		if namespace == "" {
+			namespace = "file"
+		}
+		return &logger.MsgLocation{
+			File:       loc.File,
+			Namespace:  namespace,
+			Line:       loc.Line,
+			Column:     loc.Column,
+			Length:     loc.Length,
+			LineText:   loc.LineText,
+			Suggestion: loc.Suggestion,
+		}
+	}
+	return nil
+}
+
+func convertMessagesToInternal(msgs []logger.Msg, kind logger.MsgKind, messages []Message) []logger.Msg {
+	for _, message := range messages {
+		var notes []logger.MsgData
+		for _, note := range message.Notes {
+			notes = append(notes, logger.MsgData{
+				Text:     note.Text,
+				Location: convertLocationToInternal(note.Location),
+			})
+		}
+		msgs = append(msgs, logger.Msg{
+			ID:         logger.StringToMaximumMsgID(message.ID),
+			PluginName: message.PluginName,
+			Kind:       kind,
+			Data: logger.MsgData{
+				Text:       message.Text,
+				Location:   convertLocationToInternal(message.Location),
+				UserDetail: message.Detail,
+			},
+			Notes: notes,
+		})
+	}
+	return msgs
+}
+
+func convertErrorsAndWarningsToInternal(errors []Message, warnings []Message) []logger.Msg {
+	if len(errors)+len(warnings) > 0 {
+		msgs := make(logger.SortableMsgs, 0, len(errors)+len(warnings))
+		msgs = convertMessagesToInternal(msgs, logger.Error, errors)
+		msgs = convertMessagesToInternal(msgs, logger.Warning, warnings)
+		sort.Stable(msgs)
+		return msgs
+	}
+	return nil
+}
+
+func cloneMangleCache(log logger.Log, mangleCache map[string]interface{}) map[string]interface{} {
+	if mangleCache == nil {
+		return nil
+	}
+	clone := make(map[string]interface{}, len(mangleCache))
+	for k, v := range mangleCache {
+		if v == "__proto__" {
+			// This could cause problems for our binary serialization protocol. It's
+			// also unnecessary because we already avoid mangling this property name.
+			log.AddError(nil, logger.Range{},
+				fmt.Sprintf("Invalid identifier name %q in mangle cache", k))
+		} else if _, ok := v.(string); ok || v == false {
+			clone[k] = v
+		} else {
+			log.AddError(nil, logger.Range{},
+				fmt.Sprintf("Expected %q in mangle cache to map to either a string or false", k))
+		}
+	}
+	return clone
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Build API
+
+func contextImpl(buildOpts BuildOptions) (*internalContext, []Message) {
+	logOptions := logger.OutputOptions{
+		IncludeSource: true,
+		MessageLimit:  buildOpts.LogLimit,
+		Color:         validateColor(buildOpts.Color),
+		LogLevel:      validateLogLevel(buildOpts.LogLevel),
+		Overrides:     validateLogOverrides(buildOpts.LogOverride),
+	}
+
+	// Validate that the current working directory is an absolute path
+	absWorkingDir := buildOpts.AbsWorkingDir
+	realFS, err := fs.RealFS(fs.RealFSOptions{
+		AbsWorkingDir: absWorkingDir,
+
+		// This is a long-lived file system object so do not cache calls to
+		// ReadDirectory() (they are normally cached for the duration of a build
+		// for performance).
+		DoNotCache: true,
+	})
+	if err != nil {
+		log := logger.NewStderrLog(logOptions)
+		log.AddError(nil, logger.Range{}, err.Error())
+		return nil, convertMessagesToPublic(logger.Error, log.Done())
+	}
+
+	// Do not re-evaluate plugins when rebuilding. Also make sure the working
+	// directory doesn't change, since breaking that invariant would break the
+	// validation that we just did above.
+	caches := cache.MakeCacheSet()
+	log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, logOptions.Overrides)
+	onEndCallbacks, onDisposeCallbacks, finalizeBuildOptions := loadPlugins(&buildOpts, realFS, log, caches)
+	options, entryPoints := validateBuildOptions(buildOpts, log, realFS)
+	finalizeBuildOptions(&options)
+	if buildOpts.AbsWorkingDir != absWorkingDir {
+		panic("Mutating \"AbsWorkingDir\" is not allowed")
+	}
+
+	// If we have errors already, then refuse to build any further. This only
+	// happens when the build options themselves contain validation errors.
+	msgs := log.Done()
+	if log.HasErrors() {
+		if logOptions.LogLevel < logger.LevelSilent {
+			// Print all deferred validation log messages to stderr. We defer all log
+			// messages that are generated above because warnings are re-printed for
+			// every rebuild and we don't want to double-print these warnings for the
+			// first build.
+			stderr := logger.NewStderrLog(logOptions)
+			for _, msg := range msgs {
+				stderr.AddMsg(msg)
+			}
+			stderr.Done()
+		}
+		return nil, convertMessagesToPublic(logger.Error, msgs)
+	}
+
+	args := rebuildArgs{
+		caches:             caches,
+		onEndCallbacks:     onEndCallbacks,
+		onDisposeCallbacks: onDisposeCallbacks,
+		logOptions:         logOptions,
+		logWarnings:        msgs,
+		entryPoints:        entryPoints,
+		options:            options,
+		mangleCache:        buildOpts.MangleCache,
+		absWorkingDir:      absWorkingDir,
+		write:              buildOpts.Write,
+	}
+
+	return &internalContext{
+		args:          args,
+		realFS:        realFS,
+		absWorkingDir: absWorkingDir,
+	}, nil
+}
+
+type buildInProgress struct {
+	state     rebuildState
+	waitGroup sync.WaitGroup
+	cancel    config.CancelFlag
+}
+
+type internalContext struct {
+	mutex         sync.Mutex
+	args          rebuildArgs
+	activeBuild   *buildInProgress
+	recentBuild   *BuildResult
+	realFS        fs.FS
+	absWorkingDir string
+	watcher       *watcher
+	handler       *apiHandler
+	didDispose    bool
+
+	// This saves just enough information to be able to compute a useful diff
+	// between two sets of output files. That way we don't need to hold both
+	// sets of output files in memory at once to compute a diff.
+	latestHashes map[string]string
+}
+
+func (ctx *internalContext) rebuild() rebuildState {
+	ctx.mutex.Lock()
+
+	// Ignore disposed contexts
+	if ctx.didDispose {
+		ctx.mutex.Unlock()
+		return rebuildState{}
+	}
+
+	// If there's already an active build, just return that build's result
+	if build := ctx.activeBuild; build != nil {
+		ctx.mutex.Unlock()
+		build.waitGroup.Wait()
+		return build.state
+	}
+
+	// Otherwise, start a new build
+	build := &buildInProgress{}
+	build.waitGroup.Add(1)
+	ctx.activeBuild = build
+	args := ctx.args
+	watcher := ctx.watcher
+	handler := ctx.handler
+	oldHashes := ctx.latestHashes
+	args.options.CancelFlag = &build.cancel
+	ctx.mutex.Unlock()
+
+	// Do the build without holding the mutex
+	var newHashes map[string]string
+	build.state, newHashes = rebuildImpl(args, oldHashes)
+	if handler != nil {
+		handler.broadcastBuildResult(build.state.result, newHashes)
+	}
+	if watcher != nil {
+		watcher.setWatchData(build.state.watchData)
+	}
+
+	// Store the recent build for the dev server
+	recentBuild := &build.state.result
+	ctx.mutex.Lock()
+	ctx.activeBuild = nil
+	ctx.recentBuild = recentBuild
+	ctx.latestHashes = newHashes
+	ctx.mutex.Unlock()
+
+	// Clear the recent build after it goes stale
+	go func() {
+		time.Sleep(250 * time.Millisecond)
+		ctx.mutex.Lock()
+		if ctx.recentBuild == recentBuild {
+			ctx.recentBuild = nil
+		}
+		ctx.mutex.Unlock()
+	}()
+
+	build.waitGroup.Done()
+	return build.state
+}
+
+// This is used by the dev server. The dev server does a rebuild on each
+// incoming request since a) we want incoming requests to always be up to
+// date and b) we don't necessarily know what output paths to even serve
+// without running another build (e.g. the hashes may have changed).
+//
+// However, there is a small period of time where we reuse old build results
+// instead of generating new ones. This is because page loads likely involve
+// multiple requests, and don't want to rebuild separately for each of those
+// requests.
+func (ctx *internalContext) activeBuildOrRecentBuildOrRebuild() BuildResult {
+	ctx.mutex.Lock()
+
+	// If there's already an active build, wait for it and return that
+	if build := ctx.activeBuild; build != nil {
+		ctx.mutex.Unlock()
+		build.waitGroup.Wait()
+		return build.state.result
+	}
+
+	// Then try to return a recentl already-completed build
+	if build := ctx.recentBuild; build != nil {
+		ctx.mutex.Unlock()
+		return *build
+	}
+
+	// Otherwise, fall back to rebuilding
+	ctx.mutex.Unlock()
+	return ctx.Rebuild()
+}
+
+func (ctx *internalContext) Rebuild() BuildResult {
+	return ctx.rebuild().result
+}
+
+func (ctx *internalContext) Watch(options WatchOptions) error {
+	ctx.mutex.Lock()
+	defer ctx.mutex.Unlock()
+
+	// Ignore disposed contexts
+	if ctx.didDispose {
+		return errors.New("Cannot watch a disposed context")
+	}
+
+	// Don't allow starting watch mode multiple times
+	if ctx.watcher != nil {
+		return errors.New("Watch mode has already been enabled")
+	}
+
+	logLevel := ctx.args.logOptions.LogLevel
+	ctx.watcher = &watcher{
+		fs:        ctx.realFS,
+		shouldLog: logLevel == logger.LevelInfo || logLevel == logger.LevelDebug || logLevel == logger.LevelVerbose,
+		useColor:  ctx.args.logOptions.Color,
+		rebuild: func() fs.WatchData {
+			return ctx.rebuild().watchData
+		},
+	}
+
+	// All subsequent builds will be watch mode builds
+	ctx.args.options.WatchMode = true
+
+	// Start the file watcher goroutine
+	ctx.watcher.start()
+
+	// Do the first watch mode build on another goroutine
+	go func() {
+		ctx.mutex.Lock()
+		build := ctx.activeBuild
+		ctx.mutex.Unlock()
+
+		// If there's an active build, then it's not a watch build. Wait for it to
+		// finish first so we don't just get this build when we call "Rebuild()".
+		if build != nil {
+			build.waitGroup.Wait()
+		}
+
+		// Trigger a rebuild now that we know all future builds will pick up on
+		// our watcher. This build will populate the initial watch data, which is
+		// necessary to be able to know what file system changes are relevant.
+		ctx.Rebuild()
+	}()
+	return nil
+}
+
+func (ctx *internalContext) Cancel() {
+	ctx.mutex.Lock()
+
+	// Ignore disposed contexts
+	if ctx.didDispose {
+		ctx.mutex.Unlock()
+		return
+	}
+
+	build := ctx.activeBuild
+	ctx.mutex.Unlock()
+
+	if build != nil {
+		// Tell observers to cut this build short
+		build.cancel.Cancel()
+
+		// Wait for the build to finish before returning
+		build.waitGroup.Wait()
+	}
+}
+
+func (ctx *internalContext) Dispose() {
+	// Only dispose once
+	ctx.mutex.Lock()
+	if ctx.didDispose {
+		ctx.mutex.Unlock()
+		return
+	}
+	ctx.didDispose = true
+	ctx.recentBuild = nil
+	build := ctx.activeBuild
+	ctx.mutex.Unlock()
+
+	if ctx.watcher != nil {
+		ctx.watcher.stop()
+	}
+	if ctx.handler != nil {
+		ctx.handler.stop()
+	}
+
+	// It's important to wait for the build to finish before returning. The JS
+	// API will unregister its callbacks when it returns. If that happens while
+	// the build is still in progress, that might cause the JS API to generate
+	// errors when we send it events (e.g. when it runs "onEnd" callbacks) that
+	// we then print to the terminal, which would be confusing.
+	if build != nil {
+		build.waitGroup.Wait()
+	}
+
+	// Run each "OnDispose" callback on its own goroutine
+	for _, fn := range ctx.args.onDisposeCallbacks {
+		go fn()
+	}
+}
+
+func prettyPrintByteCount(n int) string {
+	var size string
+	if n < 1024 {
+		size = fmt.Sprintf("%db ", n)
+	} else if n < 1024*1024 {
+		size = fmt.Sprintf("%.1fkb", float64(n)/(1024))
+	} else if n < 1024*1024*1024 {
+		size = fmt.Sprintf("%.1fmb", float64(n)/(1024*1024))
+	} else {
+		size = fmt.Sprintf("%.1fgb", float64(n)/(1024*1024*1024))
+	}
+	return size
+}
+
+func printSummary(color logger.UseColor, outputFiles []OutputFile, start time.Time) {
+	if len(outputFiles) == 0 {
+		return
+	}
+
+	var table logger.SummaryTable = make([]logger.SummaryTableEntry, len(outputFiles))
+
+	if cwd, err := os.Getwd(); err == nil {
+		if realFS, err := fs.RealFS(fs.RealFSOptions{AbsWorkingDir: cwd}); err == nil {
+			for i, file := range outputFiles {
+				path, ok := realFS.Rel(realFS.Cwd(), file.Path)
+				if !ok {
+					path = file.Path
+				}
+				base := realFS.Base(path)
+				n := len(file.Contents)
+				table[i] = logger.SummaryTableEntry{
+					Dir:         path[:len(path)-len(base)],
+					Base:        base,
+					Size:        prettyPrintByteCount(n),
+					Bytes:       n,
+					IsSourceMap: strings.HasSuffix(base, ".map"),
+				}
+			}
+		}
+	}
+
+	// Don't print the time taken by the build if we're running under Yarn 1
+	// since Yarn 1 always prints its own copy of the time taken by each command
+	if userAgent, ok := os.LookupEnv("npm_config_user_agent"); ok {
+		if strings.Contains(userAgent, "yarn/1.") {
+			logger.PrintSummary(color, table, nil)
+			return
+		}
+	}
+
+	logger.PrintSummary(color, table, &start)
+}
+
+func validateBuildOptions(
+	buildOpts BuildOptions,
+	log logger.Log,
+	realFS fs.FS,
+) (
+	options config.Options,
+	entryPoints []bundler.EntryPoint,
+) {
+	jsFeatures, cssFeatures, cssPrefixData, targetEnv := validateFeatures(log, buildOpts.Target, buildOpts.Engines)
+	jsOverrides, jsMask, cssOverrides, cssMask := validateSupported(log, buildOpts.Supported)
+	outJS, outCSS := validateOutputExtensions(log, buildOpts.OutExtension)
+	bannerJS, bannerCSS := validateBannerOrFooter(log, "banner", buildOpts.Banner)
+	footerJS, footerCSS := validateBannerOrFooter(log, "footer", buildOpts.Footer)
+	minify := buildOpts.MinifyWhitespace && buildOpts.MinifyIdentifiers && buildOpts.MinifySyntax
+	platform := validatePlatform(buildOpts.Platform)
+	defines, injectedDefines := validateDefines(log, buildOpts.Define, buildOpts.Pure, platform, true /* isBuildAPI */, minify, buildOpts.Drop)
+	options = config.Options{
+		CSSPrefixData:                      cssPrefixData,
+		UnsupportedJSFeatures:              jsFeatures.ApplyOverrides(jsOverrides, jsMask),
+		UnsupportedCSSFeatures:             cssFeatures.ApplyOverrides(cssOverrides, cssMask),
+		UnsupportedJSFeatureOverrides:      jsOverrides,
+		UnsupportedJSFeatureOverridesMask:  jsMask,
+		UnsupportedCSSFeatureOverrides:     cssOverrides,
+		UnsupportedCSSFeatureOverridesMask: cssMask,
+		OriginalTargetEnv:                  targetEnv,
+		JSX: config.JSXOptions{
+			Preserve:         buildOpts.JSX == JSXPreserve,
+			AutomaticRuntime: buildOpts.JSX == JSXAutomatic,
+			Factory:          validateJSXExpr(log, buildOpts.JSXFactory, "factory"),
+			Fragment:         validateJSXExpr(log, buildOpts.JSXFragment, "fragment"),
+			Development:      buildOpts.JSXDev,
+			ImportSource:     buildOpts.JSXImportSource,
+			SideEffects:      buildOpts.JSXSideEffects,
+		},
+		Defines:               defines,
+		InjectedDefines:       injectedDefines,
+		Platform:              platform,
+		SourceMap:             validateSourceMap(buildOpts.Sourcemap),
+		LegalComments:         validateLegalComments(buildOpts.LegalComments, buildOpts.Bundle),
+		SourceRoot:            buildOpts.SourceRoot,
+		ExcludeSourcesContent: buildOpts.SourcesContent == SourcesContentExclude,
+		MinifySyntax:          buildOpts.MinifySyntax,
+		MinifyWhitespace:      buildOpts.MinifyWhitespace,
+		MinifyIdentifiers:     buildOpts.MinifyIdentifiers,
+		LineLimit:             buildOpts.LineLimit,
+		MangleProps:           validateRegex(log, "mangle props", buildOpts.MangleProps),
+		ReserveProps:          validateRegex(log, "reserve props", buildOpts.ReserveProps),
+		MangleQuoted:          buildOpts.MangleQuoted == MangleQuotedTrue,
+		DropLabels:            append([]string{}, buildOpts.DropLabels...),
+		DropDebugger:          (buildOpts.Drop & DropDebugger) != 0,
+		AllowOverwrite:        buildOpts.AllowOverwrite,
+		ASCIIOnly:             validateASCIIOnly(buildOpts.Charset),
+		IgnoreDCEAnnotations:  buildOpts.IgnoreAnnotations,
+		TreeShaking:           validateTreeShaking(buildOpts.TreeShaking, buildOpts.Bundle, buildOpts.Format),
+		GlobalName:            validateGlobalName(log, buildOpts.GlobalName),
+		CodeSplitting:         buildOpts.Splitting,
+		OutputFormat:          validateFormat(buildOpts.Format),
+		AbsOutputFile:         validatePath(log, realFS, buildOpts.Outfile, "outfile path"),
+		AbsOutputDir:          validatePath(log, realFS, buildOpts.Outdir, "outdir path"),
+		AbsOutputBase:         validatePath(log, realFS, buildOpts.Outbase, "outbase path"),
+		NeedsMetafile:         buildOpts.Metafile,
+		EntryPathTemplate:     validatePathTemplate(buildOpts.EntryNames),
+		ChunkPathTemplate:     validatePathTemplate(buildOpts.ChunkNames),
+		AssetPathTemplate:     validatePathTemplate(buildOpts.AssetNames),
+		OutputExtensionJS:     outJS,
+		OutputExtensionCSS:    outCSS,
+		ExtensionToLoader:     validateLoaders(log, buildOpts.Loader),
+		ExtensionOrder:        validateResolveExtensions(log, buildOpts.ResolveExtensions),
+		ExternalSettings:      validateExternals(log, realFS, buildOpts.External),
+		ExternalPackages:      validateExternalPackages(buildOpts.Packages),
+		PackageAliases:        validateAlias(log, realFS, buildOpts.Alias),
+		TSConfigPath:          validatePath(log, realFS, buildOpts.Tsconfig, "tsconfig path"),
+		TSConfigRaw:           buildOpts.TsconfigRaw,
+		MainFields:            buildOpts.MainFields,
+		PublicPath:            buildOpts.PublicPath,
+		KeepNames:             buildOpts.KeepNames,
+		InjectPaths:           append([]string{}, buildOpts.Inject...),
+		AbsNodePaths:          make([]string, len(buildOpts.NodePaths)),
+		JSBanner:              bannerJS,
+		JSFooter:              footerJS,
+		CSSBanner:             bannerCSS,
+		CSSFooter:             footerCSS,
+		PreserveSymlinks:      buildOpts.PreserveSymlinks,
+	}
+	validateKeepNames(log, &options)
+	if buildOpts.Conditions != nil {
+		options.Conditions = append([]string{}, buildOpts.Conditions...)
+	}
+	if options.MainFields != nil {
+		options.MainFields = append([]string{}, options.MainFields...)
+	}
+	for i, path := range buildOpts.NodePaths {
+		options.AbsNodePaths[i] = validatePath(log, realFS, path, "node path")
+	}
+	entryPoints = make([]bundler.EntryPoint, 0, len(buildOpts.EntryPoints)+len(buildOpts.EntryPointsAdvanced))
+	hasEntryPointWithWildcard := false
+	for _, ep := range buildOpts.EntryPoints {
+		entryPoints = append(entryPoints, bundler.EntryPoint{InputPath: ep})
+		if strings.ContainsRune(ep, '*') {
+			hasEntryPointWithWildcard = true
+		}
+	}
+	for _, ep := range buildOpts.EntryPointsAdvanced {
+		entryPoints = append(entryPoints, bundler.EntryPoint{InputPath: ep.InputPath, OutputPath: ep.OutputPath})
+		if strings.ContainsRune(ep.InputPath, '*') {
+			hasEntryPointWithWildcard = true
+		}
+	}
+	entryPointCount := len(entryPoints)
+	if buildOpts.Stdin != nil {
+		entryPointCount++
+		options.Stdin = &config.StdinInfo{
+			Loader:        validateLoader(buildOpts.Stdin.Loader),
+			Contents:      buildOpts.Stdin.Contents,
+			SourceFile:    buildOpts.Stdin.Sourcefile,
+			AbsResolveDir: validatePath(log, realFS, buildOpts.Stdin.ResolveDir, "resolve directory path"),
+		}
+	}
+
+	if options.AbsOutputDir == "" && (entryPointCount > 1 || hasEntryPointWithWildcard) {
+		log.AddError(nil, logger.Range{},
+			"Must use \"outdir\" when there are multiple input files")
+	} else if options.AbsOutputDir == "" && options.CodeSplitting {
+		log.AddError(nil, logger.Range{},
+			"Must use \"outdir\" when code splitting is enabled")
+	} else if options.AbsOutputFile != "" && options.AbsOutputDir != "" {
+		log.AddError(nil, logger.Range{}, "Cannot use both \"outfile\" and \"outdir\"")
+	} else if options.AbsOutputFile != "" {
+		// If the output file is specified, use it to derive the output directory
+		options.AbsOutputDir = realFS.Dir(options.AbsOutputFile)
+	} else if options.AbsOutputDir == "" {
+		options.WriteToStdout = true
+
+		// Forbid certain features when writing to stdout
+		if options.SourceMap != config.SourceMapNone && options.SourceMap != config.SourceMapInline {
+			log.AddError(nil, logger.Range{}, "Cannot use an external source map without an output path")
+		}
+		if options.LegalComments.HasExternalFile() {
+			log.AddError(nil, logger.Range{}, "Cannot use linked or external legal comments without an output path")
+		}
+		for _, loader := range options.ExtensionToLoader {
+			if loader == config.LoaderFile {
+				log.AddError(nil, logger.Range{}, "Cannot use the \"file\" loader without an output path")
+				break
+			}
+			if loader == config.LoaderCopy {
+				log.AddError(nil, logger.Range{}, "Cannot use the \"copy\" loader without an output path")
+				break
+			}
+		}
+
+		// Use the current directory as the output directory instead of an empty
+		// string because external modules with relative paths need a base directory.
+		options.AbsOutputDir = realFS.Cwd()
+	}
+
+	if !buildOpts.Bundle {
+		// Disallow bundle-only options when not bundling
+		if options.ExternalSettings.PreResolve.HasMatchers() || options.ExternalSettings.PostResolve.HasMatchers() {
+			log.AddError(nil, logger.Range{}, "Cannot use \"external\" without \"bundle\"")
+		}
+		if len(options.PackageAliases) > 0 {
+			log.AddError(nil, logger.Range{}, "Cannot use \"alias\" without \"bundle\"")
+		}
+	} else if options.OutputFormat == config.FormatPreserve {
+		// If the format isn't specified, set the default format using the platform
+		switch options.Platform {
+		case config.PlatformBrowser:
+			options.OutputFormat = config.FormatIIFE
+		case config.PlatformNode:
+			options.OutputFormat = config.FormatCommonJS
+		case config.PlatformNeutral:
+			options.OutputFormat = config.FormatESModule
+		}
+	}
+
+	// Set the output mode using other settings
+	if buildOpts.Bundle {
+		options.Mode = config.ModeBundle
+	} else if options.OutputFormat != config.FormatPreserve {
+		options.Mode = config.ModeConvertFormat
+	}
+
+	// Automatically enable the "module" condition for better tree shaking
+	if options.Conditions == nil && options.Platform != config.PlatformNeutral {
+		options.Conditions = []string{"module"}
+	}
+
+	// Code splitting is experimental and currently only enabled for ES6 modules
+	if options.CodeSplitting && options.OutputFormat != config.FormatESModule {
+		log.AddError(nil, logger.Range{}, "Splitting currently only works with the \"esm\" format")
+	}
+
+	// Code splitting is experimental and currently only enabled for ES6 modules
+	if options.TSConfigPath != "" && options.TSConfigRaw != "" {
+		log.AddError(nil, logger.Range{}, "Cannot provide \"tsconfig\" as both a raw string and a path")
+	}
+
+	// If we aren't writing the output to the file system, then we can allow the
+	// output paths to be the same as the input paths. This helps when serving.
+	if !buildOpts.Write {
+		options.AllowOverwrite = true
+	}
+
+	return
+}
+
+type onEndCallback struct {
+	pluginName string
+	fn         func(*BuildResult) (OnEndResult, error)
+}
+
+type rebuildArgs struct {
+	caches             *cache.CacheSet
+	onEndCallbacks     []onEndCallback
+	onDisposeCallbacks []func()
+	logOptions         logger.OutputOptions
+	logWarnings        []logger.Msg
+	entryPoints        []bundler.EntryPoint
+	options            config.Options
+	mangleCache        map[string]interface{}
+	absWorkingDir      string
+	write              bool
+}
+
+type rebuildState struct {
+	result    BuildResult
+	watchData fs.WatchData
+	options   config.Options
+}
+
+func rebuildImpl(args rebuildArgs, oldHashes map[string]string) (rebuildState, map[string]string) {
+	log := logger.NewStderrLog(args.logOptions)
+
+	// All validation warnings are repeated for every rebuild
+	for _, msg := range args.logWarnings {
+		log.AddMsg(msg)
+	}
+
+	// Convert and validate the buildOpts
+	realFS, err := fs.RealFS(fs.RealFSOptions{
+		AbsWorkingDir: args.absWorkingDir,
+		WantWatchData: args.options.WatchMode,
+	})
+	if err != nil {
+		// This should already have been checked by the caller
+		panic(err.Error())
+	}
+
+	var result BuildResult
+	var watchData fs.WatchData
+	var toWriteToStdout []byte
+
+	var timer *helpers.Timer
+	if api_helpers.UseTimer {
+		timer = &helpers.Timer{}
+	}
+
+	// Scan over the bundle
+	bundle := bundler.ScanBundle(config.BuildCall, log, realFS, args.caches, args.entryPoints, args.options, timer)
+	watchData = realFS.WatchData()
+
+	// The new build summary remains the same as the old one when there are
+	// errors. A failed build shouldn't erase the previous successful build.
+	newHashes := oldHashes
+
+	// Stop now if there were errors
+	if !log.HasErrors() {
+		// Compile the bundle
+		result.MangleCache = cloneMangleCache(log, args.mangleCache)
+		results, metafile := bundle.Compile(log, timer, result.MangleCache, linker.Link)
+
+		// Canceling a build generates a single error at the end of the build
+		if args.options.CancelFlag.DidCancel() {
+			log.AddError(nil, logger.Range{}, "The build was canceled")
+		}
+
+		// Stop now if there were errors
+		if !log.HasErrors() {
+			result.Metafile = metafile
+
+			// Populate the results to return
+			var hashBytes [8]byte
+			result.OutputFiles = make([]OutputFile, len(results))
+			newHashes = make(map[string]string)
+			for i, item := range results {
+				if args.options.WriteToStdout {
+					item.AbsPath = "<stdout>"
+				}
+				hasher := xxhash.New()
+				hasher.Write(item.Contents)
+				binary.LittleEndian.PutUint64(hashBytes[:], hasher.Sum64())
+				hash := base64.RawStdEncoding.EncodeToString(hashBytes[:])
+				result.OutputFiles[i] = OutputFile{
+					Path:     item.AbsPath,
+					Contents: item.Contents,
+					Hash:     hash,
+				}
+				newHashes[item.AbsPath] = hash
+			}
+
+			// Write output files before "OnEnd" callbacks run so they can expect
+			// output files to exist on the file system. "OnEnd" callbacks can be
+			// used to move output files to a different location after the build.
+			if args.write {
+				timer.Begin("Write output files")
+				if args.options.WriteToStdout {
+					// Special-case writing to stdout
+					if len(results) != 1 {
+						log.AddError(nil, logger.Range{}, fmt.Sprintf(
+							"Internal error: did not expect to generate %d files when writing to stdout", len(results)))
+					} else {
+						// Print this later on, at the end of the current function
+						toWriteToStdout = results[0].Contents
+					}
+				} else {
+					// Delete old files that are no longer relevant
+					var toDelete []string
+					for absPath := range oldHashes {
+						if _, ok := newHashes[absPath]; !ok {
+							toDelete = append(toDelete, absPath)
+						}
+					}
+
+					// Process all file operations in parallel
+					waitGroup := sync.WaitGroup{}
+					waitGroup.Add(len(results) + len(toDelete))
+					for _, result := range results {
+						go func(result graph.OutputFile) {
+							defer waitGroup.Done()
+							fs.BeforeFileOpen()
+							defer fs.AfterFileClose()
+							if oldHash, ok := oldHashes[result.AbsPath]; ok && oldHash == newHashes[result.AbsPath] {
+								if contents, err := ioutil.ReadFile(result.AbsPath); err == nil && bytes.Equal(contents, result.Contents) {
+									// Skip writing out files that haven't changed since last time
+									return
+								}
+							}
+							if err := fs.MkdirAll(realFS, realFS.Dir(result.AbsPath), 0755); err != nil {
+								log.AddError(nil, logger.Range{}, fmt.Sprintf(
+									"Failed to create output directory: %s", err.Error()))
+							} else {
+								var mode os.FileMode = 0666
+								if result.IsExecutable {
+									mode = 0777
+								}
+								if err := ioutil.WriteFile(result.AbsPath, result.Contents, mode); err != nil {
+									log.AddError(nil, logger.Range{}, fmt.Sprintf(
+										"Failed to write to output file: %s", err.Error()))
+								}
+							}
+						}(result)
+					}
+					for _, absPath := range toDelete {
+						go func(absPath string) {
+							defer waitGroup.Done()
+							fs.BeforeFileOpen()
+							defer fs.AfterFileClose()
+							os.Remove(absPath)
+						}(absPath)
+					}
+					waitGroup.Wait()
+				}
+				timer.End("Write output files")
+			}
+		}
+	}
+
+	// Only return the mangle cache for a successful build
+	if log.HasErrors() {
+		result.MangleCache = nil
+	}
+
+	// Populate the result object with the messages so far
+	msgs := log.Peek()
+	result.Errors = convertMessagesToPublic(logger.Error, msgs)
+	result.Warnings = convertMessagesToPublic(logger.Warning, msgs)
+
+	// Run any registered "OnEnd" callbacks now. These always run regardless of
+	// whether the current build has bee canceled or not. They can check for
+	// errors by checking the error array in the build result, and canceled
+	// builds should always have at least one error.
+	timer.Begin("On-end callbacks")
+	for _, onEnd := range args.onEndCallbacks {
+		fromPlugin, thrown := onEnd.fn(&result)
+
+		// Report errors and warnings generated by the plugin
+		for i := range fromPlugin.Errors {
+			if fromPlugin.Errors[i].PluginName == "" {
+				fromPlugin.Errors[i].PluginName = onEnd.pluginName
+			}
+		}
+		for i := range fromPlugin.Warnings {
+			if fromPlugin.Warnings[i].PluginName == "" {
+				fromPlugin.Warnings[i].PluginName = onEnd.pluginName
+			}
+		}
+
+		// Report errors thrown by the plugin itself
+		if thrown != nil {
+			fromPlugin.Errors = append(fromPlugin.Errors, Message{
+				PluginName: onEnd.pluginName,
+				Text:       thrown.Error(),
+			})
+		}
+
+		// Log any errors and warnings generated above
+		for _, msg := range convertErrorsAndWarningsToInternal(fromPlugin.Errors, fromPlugin.Warnings) {
+			log.AddMsg(msg)
+		}
+
+		// Add the errors and warnings to the result object
+		result.Errors = append(result.Errors, fromPlugin.Errors...)
+		result.Warnings = append(result.Warnings, fromPlugin.Warnings...)
+
+		// Stop if an "onEnd" callback failed. This counts as a build failure.
+		if len(fromPlugin.Errors) > 0 {
+			break
+		}
+	}
+	timer.End("On-end callbacks")
+
+	// Log timing information now that we're all done
+	timer.Log(log)
+
+	// End the log after "OnEnd" callbacks have added any additional errors and/or
+	// warnings. This may may print any warnings that were deferred up until this
+	// point, as well as a message with the number of errors and/or warnings
+	// omitted due to the configured log limit.
+	log.Done()
+
+	// Only write to stdout after the log has been finalized. We want this output
+	// to show up in the terminal after the message that was printed above.
+	if toWriteToStdout != nil {
+		os.Stdout.Write(toWriteToStdout)
+	}
+
+	return rebuildState{
+		result:    result,
+		options:   args.options,
+		watchData: watchData,
+	}, newHashes
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Transform API
+
+func transformImpl(input string, transformOpts TransformOptions) TransformResult {
+	log := logger.NewStderrLog(logger.OutputOptions{
+		IncludeSource: true,
+		MessageLimit:  transformOpts.LogLimit,
+		Color:         validateColor(transformOpts.Color),
+		LogLevel:      validateLogLevel(transformOpts.LogLevel),
+		Overrides:     validateLogOverrides(transformOpts.LogOverride),
+	})
+	caches := cache.MakeCacheSet()
+
+	// Apply default values
+	if transformOpts.Sourcefile == "" {
+		transformOpts.Sourcefile = "<stdin>"
+	}
+	if transformOpts.Loader == LoaderNone {
+		transformOpts.Loader = LoaderJS
+	}
+
+	// Convert and validate the transformOpts
+	jsFeatures, cssFeatures, cssPrefixData, targetEnv := validateFeatures(log, transformOpts.Target, transformOpts.Engines)
+	jsOverrides, jsMask, cssOverrides, cssMask := validateSupported(log, transformOpts.Supported)
+	platform := validatePlatform(transformOpts.Platform)
+	defines, injectedDefines := validateDefines(log, transformOpts.Define, transformOpts.Pure, platform, false /* isBuildAPI */, false /* minify */, transformOpts.Drop)
+	mangleCache := cloneMangleCache(log, transformOpts.MangleCache)
+	options := config.Options{
+		CSSPrefixData:                      cssPrefixData,
+		UnsupportedJSFeatures:              jsFeatures.ApplyOverrides(jsOverrides, jsMask),
+		UnsupportedCSSFeatures:             cssFeatures.ApplyOverrides(cssOverrides, cssMask),
+		UnsupportedJSFeatureOverrides:      jsOverrides,
+		UnsupportedJSFeatureOverridesMask:  jsMask,
+		UnsupportedCSSFeatureOverrides:     cssOverrides,
+		UnsupportedCSSFeatureOverridesMask: cssMask,
+		OriginalTargetEnv:                  targetEnv,
+		TSConfigRaw:                        transformOpts.TsconfigRaw,
+		JSX: config.JSXOptions{
+			Preserve:         transformOpts.JSX == JSXPreserve,
+			AutomaticRuntime: transformOpts.JSX == JSXAutomatic,
+			Factory:          validateJSXExpr(log, transformOpts.JSXFactory, "factory"),
+			Fragment:         validateJSXExpr(log, transformOpts.JSXFragment, "fragment"),
+			Development:      transformOpts.JSXDev,
+			ImportSource:     transformOpts.JSXImportSource,
+			SideEffects:      transformOpts.JSXSideEffects,
+		},
+		Defines:               defines,
+		InjectedDefines:       injectedDefines,
+		Platform:              platform,
+		SourceMap:             validateSourceMap(transformOpts.Sourcemap),
+		LegalComments:         validateLegalComments(transformOpts.LegalComments, false /* bundle */),
+		SourceRoot:            transformOpts.SourceRoot,
+		ExcludeSourcesContent: transformOpts.SourcesContent == SourcesContentExclude,
+		OutputFormat:          validateFormat(transformOpts.Format),
+		GlobalName:            validateGlobalName(log, transformOpts.GlobalName),
+		MinifySyntax:          transformOpts.MinifySyntax,
+		MinifyWhitespace:      transformOpts.MinifyWhitespace,
+		MinifyIdentifiers:     transformOpts.MinifyIdentifiers,
+		LineLimit:             transformOpts.LineLimit,
+		MangleProps:           validateRegex(log, "mangle props", transformOpts.MangleProps),
+		ReserveProps:          validateRegex(log, "reserve props", transformOpts.ReserveProps),
+		MangleQuoted:          transformOpts.MangleQuoted == MangleQuotedTrue,
+		DropLabels:            append([]string{}, transformOpts.DropLabels...),
+		DropDebugger:          (transformOpts.Drop & DropDebugger) != 0,
+		ASCIIOnly:             validateASCIIOnly(transformOpts.Charset),
+		IgnoreDCEAnnotations:  transformOpts.IgnoreAnnotations,
+		TreeShaking:           validateTreeShaking(transformOpts.TreeShaking, false /* bundle */, transformOpts.Format),
+		AbsOutputFile:         transformOpts.Sourcefile + "-out",
+		KeepNames:             transformOpts.KeepNames,
+		Stdin: &config.StdinInfo{
+			Loader:     validateLoader(transformOpts.Loader),
+			Contents:   input,
+			SourceFile: transformOpts.Sourcefile,
+		},
+	}
+	validateKeepNames(log, &options)
+	if options.Stdin.Loader.IsCSS() {
+		options.CSSBanner = transformOpts.Banner
+		options.CSSFooter = transformOpts.Footer
+	} else {
+		options.JSBanner = transformOpts.Banner
+		options.JSFooter = transformOpts.Footer
+	}
+	if options.SourceMap == config.SourceMapLinkedWithComment {
+		// Linked source maps don't make sense because there's no output file name
+		log.AddError(nil, logger.Range{}, "Cannot transform with linked source maps")
+	}
+	if options.SourceMap != config.SourceMapNone && options.Stdin.SourceFile == "" {
+		log.AddError(nil, logger.Range{},
+			"Must use \"sourcefile\" with \"sourcemap\" to set the original file name")
+	}
+	if logger.API == logger.CLIAPI {
+		if options.LegalComments.HasExternalFile() {
+			log.AddError(nil, logger.Range{}, "Cannot transform with linked or external legal comments")
+		}
+	} else if options.LegalComments == config.LegalCommentsLinkedWithComment {
+		log.AddError(nil, logger.Range{}, "Cannot transform with linked legal comments")
+	}
+
+	// Set the output mode using other settings
+	if options.OutputFormat != config.FormatPreserve {
+		options.Mode = config.ModeConvertFormat
+	}
+
+	var results []graph.OutputFile
+
+	// Stop now if there were errors
+	if !log.HasErrors() {
+		var timer *helpers.Timer
+		if api_helpers.UseTimer {
+			timer = &helpers.Timer{}
+		}
+
+		// Scan over the bundle
+		mockFS := fs.MockFS(make(map[string]string), fs.MockUnix, "/")
+		bundle := bundler.ScanBundle(config.TransformCall, log, mockFS, caches, nil, options, timer)
+
+		// Stop now if there were errors
+		if !log.HasErrors() {
+			// Compile the bundle
+			results, _ = bundle.Compile(log, timer, mangleCache, linker.Link)
+		}
+
+		timer.Log(log)
+	}
+
+	// Return the results
+	var code []byte
+	var sourceMap []byte
+	var legalComments []byte
+
+	var shortestAbsPath string
+	for _, result := range results {
+		if shortestAbsPath == "" || len(result.AbsPath) < len(shortestAbsPath) {
+			shortestAbsPath = result.AbsPath
+		}
+	}
+
+	// Unpack the JavaScript file, the source map file, and the legal comments file
+	for _, result := range results {
+		switch result.AbsPath {
+		case shortestAbsPath:
+			code = result.Contents
+		case shortestAbsPath + ".map":
+			sourceMap = result.Contents
+		case shortestAbsPath + ".LEGAL.txt":
+			legalComments = result.Contents
+		}
+	}
+
+	// Only return the mangle cache for a successful build
+	if log.HasErrors() {
+		mangleCache = nil
+	}
+
+	msgs := log.Done()
+	return TransformResult{
+		Errors:        convertMessagesToPublic(logger.Error, msgs),
+		Warnings:      convertMessagesToPublic(logger.Warning, msgs),
+		Code:          code,
+		Map:           sourceMap,
+		LegalComments: legalComments,
+		MangleCache:   mangleCache,
+	}
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Plugin API
+
+type pluginImpl struct {
+	log    logger.Log
+	fs     fs.FS
+	plugin config.Plugin
+}
+
+func (impl *pluginImpl) onStart(callback func() (OnStartResult, error)) {
+	impl.plugin.OnStart = append(impl.plugin.OnStart, config.OnStart{
+		Name: impl.plugin.Name,
+		Callback: func() (result config.OnStartResult) {
+			response, err := callback()
+
+			if err != nil {
+				result.ThrownError = err
+				return
+			}
+
+			// Convert log messages
+			result.Msgs = convertErrorsAndWarningsToInternal(response.Errors, response.Warnings)
+			return
+		},
+	})
+}
+
+func importKindToResolveKind(kind ast.ImportKind) ResolveKind {
+	switch kind {
+	case ast.ImportEntryPoint:
+		return ResolveEntryPoint
+	case ast.ImportStmt:
+		return ResolveJSImportStatement
+	case ast.ImportRequire:
+		return ResolveJSRequireCall
+	case ast.ImportDynamic:
+		return ResolveJSDynamicImport
+	case ast.ImportRequireResolve:
+		return ResolveJSRequireResolve
+	case ast.ImportAt:
+		return ResolveCSSImportRule
+	case ast.ImportComposesFrom:
+		return ResolveCSSComposesFrom
+	case ast.ImportURL:
+		return ResolveCSSURLToken
+	default:
+		panic("Internal error")
+	}
+}
+
+func resolveKindToImportKind(kind ResolveKind) ast.ImportKind {
+	switch kind {
+	case ResolveEntryPoint:
+		return ast.ImportEntryPoint
+	case ResolveJSImportStatement:
+		return ast.ImportStmt
+	case ResolveJSRequireCall:
+		return ast.ImportRequire
+	case ResolveJSDynamicImport:
+		return ast.ImportDynamic
+	case ResolveJSRequireResolve:
+		return ast.ImportRequireResolve
+	case ResolveCSSImportRule:
+		return ast.ImportAt
+	case ResolveCSSComposesFrom:
+		return ast.ImportComposesFrom
+	case ResolveCSSURLToken:
+		return ast.ImportURL
+	default:
+		panic("Internal error")
+	}
+}
+
+func (impl *pluginImpl) onResolve(options OnResolveOptions, callback func(OnResolveArgs) (OnResolveResult, error)) {
+	filter, err := config.CompileFilterForPlugin(impl.plugin.Name, "OnResolve", options.Filter)
+	if filter == nil {
+		impl.log.AddError(nil, logger.Range{}, err.Error())
+		return
+	}
+
+	impl.plugin.OnResolve = append(impl.plugin.OnResolve, config.OnResolve{
+		Name:      impl.plugin.Name,
+		Filter:    filter,
+		Namespace: options.Namespace,
+		Callback: func(args config.OnResolveArgs) (result config.OnResolveResult) {
+			response, err := callback(OnResolveArgs{
+				Path:       args.Path,
+				Importer:   args.Importer.Text,
+				Namespace:  args.Importer.Namespace,
+				ResolveDir: args.ResolveDir,
+				Kind:       importKindToResolveKind(args.Kind),
+				PluginData: args.PluginData,
+				With:       args.With.DecodeIntoMap(),
+			})
+			result.PluginName = response.PluginName
+			result.AbsWatchFiles = impl.validatePathsArray(response.WatchFiles, "watch file")
+			result.AbsWatchDirs = impl.validatePathsArray(response.WatchDirs, "watch directory")
+
+			// Restrict the suffix to start with "?" or "#" for now to match esbuild's behavior
+			if err == nil && response.Suffix != "" && response.Suffix[0] != '?' && response.Suffix[0] != '#' {
+				err = fmt.Errorf("Invalid path suffix %q returned from plugin (must start with \"?\" or \"#\")", response.Suffix)
+			}
+
+			if err != nil {
+				result.ThrownError = err
+				return
+			}
+
+			result.Path = logger.Path{
+				Text:          response.Path,
+				Namespace:     response.Namespace,
+				IgnoredSuffix: response.Suffix,
+			}
+			result.External = response.External
+			result.IsSideEffectFree = response.SideEffects == SideEffectsFalse
+			result.PluginData = response.PluginData
+
+			// Convert log messages
+			result.Msgs = convertErrorsAndWarningsToInternal(response.Errors, response.Warnings)
+
+			// Warn if the plugin returned things without resolving the path
+			if response.Path == "" && !response.External {
+				var what string
+				if response.Namespace != "" {
+					what = "namespace"
+				} else if response.Suffix != "" {
+					what = "suffix"
+				} else if response.PluginData != nil {
+					what = "pluginData"
+				} else if response.WatchFiles != nil {
+					what = "watchFiles"
+				} else if response.WatchDirs != nil {
+					what = "watchDirs"
+				}
+				if what != "" {
+					path := "path"
+					if logger.API == logger.GoAPI {
+						what = strings.Title(what)
+						path = strings.Title(path)
+					}
+					result.Msgs = append(result.Msgs, logger.Msg{
+						Kind: logger.Warning,
+						Data: logger.MsgData{Text: fmt.Sprintf("Returning %q doesn't do anything when %q is empty", what, path)},
+					})
+				}
+			}
+			return
+		},
+	})
+}
+
+func (impl *pluginImpl) onLoad(options OnLoadOptions, callback func(OnLoadArgs) (OnLoadResult, error)) {
+	filter, err := config.CompileFilterForPlugin(impl.plugin.Name, "OnLoad", options.Filter)
+	if filter == nil {
+		impl.log.AddError(nil, logger.Range{}, err.Error())
+		return
+	}
+
+	impl.plugin.OnLoad = append(impl.plugin.OnLoad, config.OnLoad{
+		Filter:    filter,
+		Namespace: options.Namespace,
+		Callback: func(args config.OnLoadArgs) (result config.OnLoadResult) {
+			response, err := callback(OnLoadArgs{
+				Path:       args.Path.Text,
+				Namespace:  args.Path.Namespace,
+				PluginData: args.PluginData,
+				Suffix:     args.Path.IgnoredSuffix,
+				With:       args.Path.ImportAttributes.DecodeIntoMap(),
+			})
+			result.PluginName = response.PluginName
+			result.AbsWatchFiles = impl.validatePathsArray(response.WatchFiles, "watch file")
+			result.AbsWatchDirs = impl.validatePathsArray(response.WatchDirs, "watch directory")
+
+			if err != nil {
+				result.ThrownError = err
+				return
+			}
+
+			result.Contents = response.Contents
+			result.Loader = validateLoader(response.Loader)
+			result.PluginData = response.PluginData
+			pathKind := fmt.Sprintf("resolve directory path for plugin %q", impl.plugin.Name)
+			if absPath := validatePath(impl.log, impl.fs, response.ResolveDir, pathKind); absPath != "" {
+				result.AbsResolveDir = absPath
+			}
+
+			// Convert log messages
+			result.Msgs = convertErrorsAndWarningsToInternal(response.Errors, response.Warnings)
+			return
+		},
+	})
+}
+
+func (impl *pluginImpl) validatePathsArray(pathsIn []string, name string) (pathsOut []string) {
+	if len(pathsIn) > 0 {
+		pathKind := fmt.Sprintf("%s path for plugin %q", name, impl.plugin.Name)
+		for _, relPath := range pathsIn {
+			if absPath := validatePath(impl.log, impl.fs, relPath, pathKind); absPath != "" {
+				pathsOut = append(pathsOut, absPath)
+			}
+		}
+	}
+	return
+}
+
+func loadPlugins(initialOptions *BuildOptions, fs fs.FS, log logger.Log, caches *cache.CacheSet) (
+	onEndCallbacks []onEndCallback,
+	onDisposeCallbacks []func(),
+	finalizeBuildOptions func(*config.Options),
+) {
+	// Clone the plugin array to guard against mutation during iteration
+	clone := append(make([]Plugin, 0, len(initialOptions.Plugins)), initialOptions.Plugins...)
+
+	var optionsForResolve *config.Options
+	var plugins []config.Plugin
+
+	// This is called after the build options have been validated
+	finalizeBuildOptions = func(options *config.Options) {
+		options.Plugins = plugins
+		optionsForResolve = options
+	}
+
+	for i, item := range clone {
+		if item.Name == "" {
+			log.AddError(nil, logger.Range{}, fmt.Sprintf("Plugin at index %d is missing a name", i))
+			continue
+		}
+
+		impl := &pluginImpl{
+			fs:     fs,
+			log:    log,
+			plugin: config.Plugin{Name: item.Name},
+		}
+
+		resolve := func(path string, options ResolveOptions) (result ResolveResult) {
+			// If options are missing, then this is being called before plugin setup
+			// has finished. That isn't allowed because plugin setup is allowed to
+			// change the initial options object, which can affect path resolution.
+			if optionsForResolve == nil {
+				return ResolveResult{Errors: []Message{{Text: "Cannot call \"resolve\" before plugin setup has completed"}}}
+			}
+
+			if options.Kind == ResolveNone {
+				return ResolveResult{Errors: []Message{{Text: "Must specify \"kind\" when calling \"resolve\""}}}
+			}
+
+			// Make a new resolver so it has its own log
+			log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, validateLogOverrides(initialOptions.LogOverride))
+			optionsClone := *optionsForResolve
+			resolver := resolver.NewResolver(config.BuildCall, fs, log, caches, &optionsClone)
+
+			// Make sure the resolve directory is an absolute path, which can fail
+			absResolveDir := validatePath(log, fs, options.ResolveDir, "resolve directory")
+			if log.HasErrors() {
+				msgs := log.Done()
+				result.Errors = convertMessagesToPublic(logger.Error, msgs)
+				result.Warnings = convertMessagesToPublic(logger.Warning, msgs)
+				return
+			}
+
+			// Run path resolution
+			kind := resolveKindToImportKind(options.Kind)
+			resolveResult, _, _ := bundler.RunOnResolvePlugins(
+				plugins,
+				resolver,
+				log,
+				fs,
+				&caches.FSCache,
+				nil,            // importSource
+				logger.Range{}, // importPathRange
+				logger.Path{Text: options.Importer, Namespace: options.Namespace},
+				path,
+				logger.EncodeImportAttributes(options.With),
+				kind,
+				absResolveDir,
+				options.PluginData,
+			)
+			msgs := log.Done()
+
+			// Populate the result
+			result.Errors = convertMessagesToPublic(logger.Error, msgs)
+			result.Warnings = convertMessagesToPublic(logger.Warning, msgs)
+			if resolveResult != nil {
+				result.Path = resolveResult.PathPair.Primary.Text
+				result.External = resolveResult.PathPair.IsExternal
+				result.SideEffects = resolveResult.PrimarySideEffectsData == nil
+				result.Namespace = resolveResult.PathPair.Primary.Namespace
+				result.Suffix = resolveResult.PathPair.Primary.IgnoredSuffix
+				result.PluginData = resolveResult.PluginData
+			} else if len(result.Errors) == 0 {
+				// Always fail with at least one error
+				pluginName := item.Name
+				if options.PluginName != "" {
+					pluginName = options.PluginName
+				}
+				text, _, notes := bundler.ResolveFailureErrorTextSuggestionNotes(resolver, path, kind, pluginName, fs, absResolveDir, optionsForResolve.Platform, "", "")
+				result.Errors = append(result.Errors, convertMessagesToPublic(logger.Error, []logger.Msg{{
+					Data:  logger.MsgData{Text: text},
+					Notes: notes,
+				}})...)
+			}
+			return
+		}
+
+		onEnd := func(fn func(*BuildResult) (OnEndResult, error)) {
+			onEndCallbacks = append(onEndCallbacks, onEndCallback{
+				pluginName: item.Name,
+				fn:         fn,
+			})
+		}
+
+		onDispose := func(fn func()) {
+			onDisposeCallbacks = append(onDisposeCallbacks, fn)
+		}
+
+		item.Setup(PluginBuild{
+			InitialOptions: initialOptions,
+			Resolve:        resolve,
+			OnStart:        impl.onStart,
+			OnEnd:          onEnd,
+			OnDispose:      onDispose,
+			OnResolve:      impl.onResolve,
+			OnLoad:         impl.onLoad,
+		})
+
+		plugins = append(plugins, impl.plugin)
+	}
+
+	return
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// FormatMessages API
+
+func formatMsgsImpl(msgs []Message, opts FormatMessagesOptions) []string {
+	kind := logger.Error
+	if opts.Kind == WarningMessage {
+		kind = logger.Warning
+	}
+	logMsgs := convertMessagesToInternal(nil, kind, msgs)
+	strings := make([]string, len(logMsgs))
+	for i, msg := range logMsgs {
+		strings[i] = msg.String(
+			logger.OutputOptions{
+				IncludeSource: true,
+			},
+			logger.TerminalInfo{
+				UseColorEscapes: opts.Color,
+				Width:           opts.TerminalWidth,
+			},
+		)
+	}
+	return strings
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// AnalyzeMetafile API
+
+type metafileEntry struct {
+	name       string
+	entryPoint string
+	entries    []metafileEntry
+	size       int
+}
+
+// This type is just so we can use Go's native sort function
+type metafileArray []metafileEntry
+
+func (a metafileArray) Len() int          { return len(a) }
+func (a metafileArray) Swap(i int, j int) { a[i], a[j] = a[j], a[i] }
+
+func (a metafileArray) Less(i int, j int) bool {
+	ai := a[i]
+	aj := a[j]
+	return ai.size > aj.size || (ai.size == aj.size && ai.name < aj.name)
+}
+
+func getObjectProperty(expr js_ast.Expr, key string) js_ast.Expr {
+	if obj, ok := expr.Data.(*js_ast.EObject); ok {
+		for _, prop := range obj.Properties {
+			if helpers.UTF16EqualsString(prop.Key.Data.(*js_ast.EString).Value, key) {
+				return prop.ValueOrNil
+			}
+		}
+	}
+	return js_ast.Expr{}
+}
+
+func getObjectPropertyNumber(expr js_ast.Expr, key string) *js_ast.ENumber {
+	value, _ := getObjectProperty(expr, key).Data.(*js_ast.ENumber)
+	return value
+}
+
+func getObjectPropertyString(expr js_ast.Expr, key string) *js_ast.EString {
+	value, _ := getObjectProperty(expr, key).Data.(*js_ast.EString)
+	return value
+}
+
+func getObjectPropertyObject(expr js_ast.Expr, key string) *js_ast.EObject {
+	value, _ := getObjectProperty(expr, key).Data.(*js_ast.EObject)
+	return value
+}
+
+func getObjectPropertyArray(expr js_ast.Expr, key string) *js_ast.EArray {
+	value, _ := getObjectProperty(expr, key).Data.(*js_ast.EArray)
+	return value
+}
+
+func analyzeMetafileImpl(metafile string, opts AnalyzeMetafileOptions) string {
+	log := logger.NewDeferLog(logger.DeferLogNoVerboseOrDebug, nil)
+	source := logger.Source{Contents: metafile}
+
+	if result, ok := js_parser.ParseJSON(log, source, js_parser.JSONOptions{}); ok {
+		if outputs := getObjectPropertyObject(result, "outputs"); outputs != nil {
+			var entries metafileArray
+			var entryPoints []string
+
+			// Scan over the "outputs" object
+			for _, output := range outputs.Properties {
+				if key := helpers.UTF16ToString(output.Key.Data.(*js_ast.EString).Value); !strings.HasSuffix(key, ".map") {
+					entryPointPath := ""
+					if entryPoint := getObjectPropertyString(output.ValueOrNil, "entryPoint"); entryPoint != nil {
+						entryPointPath = helpers.UTF16ToString(entryPoint.Value)
+						entryPoints = append(entryPoints, entryPointPath)
+					}
+
+					if bytes := getObjectPropertyNumber(output.ValueOrNil, "bytes"); bytes != nil {
+						if inputs := getObjectPropertyObject(output.ValueOrNil, "inputs"); inputs != nil {
+							var children metafileArray
+
+							for _, input := range inputs.Properties {
+								if bytesInOutput := getObjectPropertyNumber(input.ValueOrNil, "bytesInOutput"); bytesInOutput != nil && bytesInOutput.Value > 0 {
+									children = append(children, metafileEntry{
+										name: helpers.UTF16ToString(input.Key.Data.(*js_ast.EString).Value),
+										size: int(bytesInOutput.Value),
+									})
+								}
+							}
+
+							sort.Sort(children)
+
+							entries = append(entries, metafileEntry{
+								name:       key,
+								size:       int(bytes.Value),
+								entries:    children,
+								entryPoint: entryPointPath,
+							})
+						}
+					}
+				}
+			}
+
+			sort.Sort(entries)
+
+			type importData struct {
+				imports []string
+			}
+
+			type graphData struct {
+				parent string
+				depth  uint32
+			}
+
+			importsForPath := make(map[string]importData)
+
+			// Scan over the "inputs" object
+			if inputs := getObjectPropertyObject(result, "inputs"); inputs != nil {
+				for _, prop := range inputs.Properties {
+					if imports := getObjectPropertyArray(prop.ValueOrNil, "imports"); imports != nil {
+						var data importData
+
+						for _, item := range imports.Items {
+							if path := getObjectPropertyString(item, "path"); path != nil {
+								data.imports = append(data.imports, helpers.UTF16ToString(path.Value))
+							}
+						}
+
+						importsForPath[helpers.UTF16ToString(prop.Key.Data.(*js_ast.EString).Value)] = data
+					}
+				}
+			}
+
+			// Returns a graph with links pointing from imports to importers
+			graphForEntryPoints := func(worklist []string) map[string]graphData {
+				if !opts.Verbose {
+					return nil
+				}
+
+				graph := make(map[string]graphData)
+
+				for _, entryPoint := range worklist {
+					graph[entryPoint] = graphData{}
+				}
+
+				for len(worklist) > 0 {
+					top := worklist[len(worklist)-1]
+					worklist = worklist[:len(worklist)-1]
+					childDepth := graph[top].depth + 1
+
+					for _, importPath := range importsForPath[top].imports {
+						imported, ok := graph[importPath]
+						if !ok {
+							imported.depth = math.MaxUint32
+						}
+
+						if imported.depth > childDepth {
+							imported.depth = childDepth
+							imported.parent = top
+							graph[importPath] = imported
+							worklist = append(worklist, importPath)
+						}
+					}
+				}
+
+				return graph
+			}
+
+			graphForAllEntryPoints := graphForEntryPoints(entryPoints)
+
+			type tableEntry struct {
+				first      string
+				second     string
+				third      string
+				firstLen   int
+				secondLen  int
+				thirdLen   int
+				isTopLevel bool
+			}
+
+			var table []tableEntry
+			var colors logger.Colors
+
+			if opts.Color {
+				colors = logger.TerminalColors
+			}
+
+			// Build up the table with an entry for each output file (other than ".map" files)
+			for _, entry := range entries {
+				second := prettyPrintByteCount(entry.size)
+				third := "100.0%"
+
+				table = append(table, tableEntry{
+					first:      entry.name,
+					firstLen:   utf8.RuneCountInString(entry.name),
+					second:     second,
+					secondLen:  len(second),
+					third:      third,
+					thirdLen:   len(third),
+					isTopLevel: true,
+				})
+
+				graph := graphForAllEntryPoints
+				if entry.entryPoint != "" {
+					// If there are multiple entry points and this output file is from an
+					// entry point, prefer import paths for this entry point. This is less
+					// confusing than showing import paths for another entry point.
+					graph = graphForEntryPoints([]string{entry.entryPoint})
+				}
+
+				// Add a sub-entry for each input file in this output file
+				for j, child := range entry.entries {
+					indent := " ├ "
+					if j+1 == len(entry.entries) {
+						indent = " └ "
+					}
+					percent := 100.0 * float64(child.size) / float64(entry.size)
+
+					first := indent + child.name
+					second := prettyPrintByteCount(child.size)
+					third := fmt.Sprintf("%.1f%%", percent)
+
+					table = append(table, tableEntry{
+						first:     first,
+						firstLen:  utf8.RuneCountInString(first),
+						second:    second,
+						secondLen: len(second),
+						third:     third,
+						thirdLen:  len(third),
+					})
+
+					// If we're in verbose mode, also print the import chain from this file
+					// up toward an entry point to show why this file is in the bundle
+					if opts.Verbose {
+						indent = " │ "
+						if j+1 == len(entry.entries) {
+							indent = "   "
+						}
+						data := graph[child.name]
+						depth := 0
+
+						for data.depth != 0 {
+							table = append(table, tableEntry{
+								first: fmt.Sprintf("%s%s%s └ %s%s", indent, colors.Dim, strings.Repeat(" ", depth), data.parent, colors.Reset),
+							})
+							data = graph[data.parent]
+							depth += 3
+						}
+					}
+				}
+			}
+
+			maxFirstLen := 0
+			maxSecondLen := 0
+			maxThirdLen := 0
+
+			// Calculate column widths
+			for _, entry := range table {
+				if maxFirstLen < entry.firstLen {
+					maxFirstLen = entry.firstLen
+				}
+				if maxSecondLen < entry.secondLen {
+					maxSecondLen = entry.secondLen
+				}
+				if maxThirdLen < entry.thirdLen {
+					maxThirdLen = entry.thirdLen
+				}
+			}
+
+			sb := strings.Builder{}
+
+			// Render the columns now that we know the widths
+			for _, entry := range table {
+				prefix := "\n"
+				color := colors.Bold
+				if !entry.isTopLevel {
+					prefix = ""
+					color = ""
+				}
+
+				// Import paths don't have second and third columns
+				if entry.second == "" && entry.third == "" {
+					sb.WriteString(fmt.Sprintf("%s  %s\n",
+						prefix,
+						entry.first,
+					))
+					continue
+				}
+
+				second := entry.second
+				secondTrimmed := strings.TrimRight(second, " ")
+				lineChar := " "
+				extraSpace := 0
+
+				if opts.Verbose {
+					lineChar = "─"
+					extraSpace = 1
+				}
+
+				sb.WriteString(fmt.Sprintf("%s  %s%s%s %s%s%s %s%s%s %s%s%s %s%s%s\n",
+					prefix,
+					color,
+					entry.first,
+					colors.Reset,
+					colors.Dim,
+					strings.Repeat(lineChar, extraSpace+maxFirstLen-entry.firstLen+maxSecondLen-entry.secondLen),
+					colors.Reset,
+					color,
+					secondTrimmed,
+					colors.Reset,
+					colors.Dim,
+					strings.Repeat(lineChar, extraSpace+maxThirdLen-entry.thirdLen+len(second)-len(secondTrimmed)),
+					colors.Reset,
+					color,
+					entry.third,
+					colors.Reset,
+				))
+			}
+
+			return sb.String()
+		}
+	}
+
+	return ""
+}
+
+func stripDirPrefix(path string, prefix string, allowedSlashes string) (string, bool) {
+	if strings.HasPrefix(path, prefix) {
+		pathLen := len(path)
+		prefixLen := len(prefix)
+
+		// Just return the path if there is no prefix
+		if prefixLen == 0 {
+			return path, true
+		}
+
+		// Return the empty string if the path equals the prefix
+		if pathLen == prefixLen {
+			return "", true
+		}
+
+		if strings.IndexByte(allowedSlashes, prefix[prefixLen-1]) >= 0 {
+			// Return the suffix if the prefix ends in a slash. Examples:
+			//
+			//   stripDirPrefix(`/foo`, `/`, `/`) => `foo`
+			//   stripDirPrefix(`C:\foo`, `C:\`, `\/`) => `foo`
+			//
+			return path[prefixLen:], true
+		} else if strings.IndexByte(allowedSlashes, path[prefixLen]) >= 0 {
+			// Return the suffix if there's a slash after the prefix. Examples:
+			//
+			//   stripDirPrefix(`/foo/bar`, `/foo`, `/`) => `bar`
+			//   stripDirPrefix(`C:\foo\bar`, `C:\foo`, `\/`) => `bar`
+			//
+			return path[prefixLen+1:], true
+		}
+	}
+
+	return "", false
+}
diff --git a/source/vendor/github.com/evanw/esbuild/pkg/api/api_js_table.go b/source/vendor/github.com/evanw/esbuild/pkg/api/api_js_table.go
new file mode 100644
index 0000000..72177f1
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/pkg/api/api_js_table.go
@@ -0,0 +1,50 @@
+// This file was automatically generated by "js_table.ts"
+
+package api
+
+import "github.com/evanw/esbuild/internal/compat"
+
+type EngineName uint8
+
+const (
+	EngineChrome EngineName = iota
+	EngineDeno
+	EngineEdge
+	EngineFirefox
+	EngineHermes
+	EngineIE
+	EngineIOS
+	EngineNode
+	EngineOpera
+	EngineRhino
+	EngineSafari
+)
+
+func convertEngineName(engine EngineName) compat.Engine {
+	switch engine {
+	case EngineChrome:
+		return compat.Chrome
+	case EngineDeno:
+		return compat.Deno
+	case EngineEdge:
+		return compat.Edge
+	case EngineFirefox:
+		return compat.Firefox
+	case EngineHermes:
+		return compat.Hermes
+	case EngineIE:
+		return compat.IE
+	case EngineIOS:
+		return compat.IOS
+	case EngineNode:
+		return compat.Node
+	case EngineOpera:
+		return compat.Opera
+	case EngineRhino:
+		return compat.Rhino
+	case EngineSafari:
+		return compat.Safari
+	default:
+		panic("Invalid engine name")
+	}
+}
diff --git a/source/vendor/github.com/evanw/esbuild/pkg/api/favicon.go b/source/vendor/github.com/evanw/esbuild/pkg/api/favicon.go
new file mode 100644
index 0000000..c3ec13a
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/pkg/api/favicon.go
@@ -0,0 +1,31 @@
+package api
+
+// This is the "favicon.ico" file used by esbuild's built-in development server
+var favicon_ico_gz = []byte{
+	0x1F, 0x8B, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x03, 0x63, 0x60, 0x60, 0x64, 0x60, 0x62,
+	0x10, 0x10, 0x60, 0x00, 0xD2, 0x0A, 0x0C, 0x19, 0x2C, 0x0C, 0x0C, 0x6A, 0x0C, 0x0C, 0x0C, 0x0A,
+	0x0A, 0x10, 0xBE, 0x86, 0x20, 0x03, 0x43, 0x1F, 0x50, 0x4C, 0x03, 0x28, 0x26, 0x00, 0x12, 0x67,
+	0x80, 0x88, 0x13, 0x04, 0xE7, 0xFF, 0xFF, 0x27, 0x09, 0xD3, 0x4A, 0xFF, 0xC9, 0xEF, 0xFF, 0x59,
+	0x97, 0x9F, 0x81, 0xAB, 0x63, 0x5B, 0x72, 0xF2, 0x3F, 0xC3, 0x99, 0xDF, 0x44, 0xEB, 0xE7, 0x29,
+	0xE9, 0xF9, 0x2F, 0xAE, 0xA8, 0x02, 0xD6, 0xC7, 0x74, 0xE0, 0xCD, 0x7F, 0x09, 0x45, 0xE5, 0xFF,
+	0x02, 0xA1, 0xA9, 0x98, 0x66, 0xE0, 0xB1, 0x5F, 0xC8, 0x27, 0x12, 0x6E, 0x06, 0xFB, 0xEC, 0x7D,
+	0xFF, 0x25, 0xE4, 0x14, 0x30, 0xCD, 0xC0, 0xE7, 0x7F, 0xA0, 0x19, 0xC2, 0x0E, 0xDE, 0x60, 0x33,
+	0x58, 0x36, 0x5C, 0xFF, 0xCF, 0x31, 0x79, 0xF3, 0x7F, 0x49, 0x49, 0xC9, 0xFF, 0xFC, 0xB1, 0xF9,
+	0x44, 0xE9, 0x47, 0xB6, 0x93, 0xF1, 0xD8, 0x17, 0x14, 0xF7, 0x10, 0xD2, 0x4F, 0x94, 0x5E, 0x02,
+	0xFA, 0x05, 0x42, 0x53, 0xC0, 0x7E, 0x85, 0xE9, 0xC7, 0xD0, 0x4B, 0xCB, 0xF8, 0xA7, 0x85, 0xFE,
+	0x9A, 0x99, 0x68, 0x78, 0x56, 0x3D, 0xF9, 0xFA, 0xB1, 0xE8, 0x25, 0x5A, 0x3F, 0x0E, 0xBD, 0x44,
+	0xE9, 0xC7, 0xA3, 0x97, 0xA0, 0x7E, 0x02, 0x7A, 0x29, 0x00, 0x1A, 0xD0, 0x32, 0xC6, 0x81, 0xD8,
+	0x72, 0x86, 0xDC, 0xF8, 0xA6, 0x34, 0x7D, 0x8C, 0xDA, 0x3F, 0x6A, 0x3F, 0x01, 0xFB, 0x99, 0x77,
+	0x3C, 0xFA, 0x2F, 0xEC, 0x1E, 0x0C, 0xA6, 0xD1, 0xE5, 0x58, 0xD7, 0x5C, 0x06, 0xCB, 0x31, 0x1D,
+	0xF9, 0x48, 0x13, 0xFB, 0x41, 0x76, 0x8A, 0x19, 0x98, 0x81, 0xEB, 0x09, 0x11, 0x1B, 0x57, 0x14,
+	0x7B, 0x40, 0x76, 0x4B, 0xA8, 0xA8, 0x63, 0x95, 0xA3, 0x85, 0xFD, 0xE8, 0xF6, 0xE0, 0x93, 0xA3,
+	0x76, 0xF8, 0x53, 0xCD, 0x0D, 0x64, 0xA6, 0x3F, 0xAA, 0xB9, 0x81, 0x82, 0xF4, 0x4F, 0x15, 0x37,
+	0x50, 0x98, 0xFF, 0xD8, 0x16, 0x1E, 0x85, 0xDB, 0x01, 0xC2, 0x02, 0x71, 0x05, 0x70, 0x39, 0x8E,
+	0x69, 0xDB, 0x71, 0xCA, 0x0D, 0x75, 0xFF, 0x0F, 0x64, 0xFC, 0x0F, 0x64, 0xFA, 0x1F, 0xE8, 0xFC,
+	0x3F, 0xD0, 0xE5, 0xDF, 0x40, 0x97, 0xFF, 0xA3, 0xF5, 0xEF, 0xA8, 0xFD, 0x44, 0x61, 0x8C, 0xBE,
+	0x0C, 0x36, 0x3C, 0xA7, 0x7E, 0xE0, 0xEC, 0x9F, 0x53, 0x4F, 0xD3, 0xF6, 0x3F, 0x35, 0xEC, 0xA6,
+	0x89, 0xFD, 0x73, 0x48, 0xEB, 0x63, 0x51, 0xD5, 0xFE, 0x39, 0xA4, 0xF7, 0xEF, 0xA8, 0x66, 0xFF,
+	0x1C, 0xF2, 0xFA, 0x96, 0x54, 0xB1, 0x7F, 0x0E, 0xF9, 0xFD, 0x5A, 0x8A, 0xED, 0x9F, 0x43, 0x59,
+	0x9F, 0x9A, 0x22, 0xFB, 0xE7, 0x50, 0xDE, 0x9F, 0x27, 0xDB, 0xFE, 0x39, 0x34, 0x1B, 0x4B, 0x18,
+	0xCE, 0x00, 0x00, 0xDA, 0xEB, 0x61, 0xFD, 0xB6, 0x15, 0x00, 0x00,
+}
diff --git a/source/vendor/github.com/evanw/esbuild/pkg/api/serve_other.go b/source/vendor/github.com/evanw/esbuild/pkg/api/serve_other.go
new file mode 100644
index 0000000..0f5f3aa
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/pkg/api/serve_other.go
@@ -0,0 +1,990 @@
+//go:build !js || !wasm
+// +build !js !wasm
+
+package api
+
+// This file implements the "Serve()" function in esbuild's public API. It
+// provides a basic web server that can serve a directory tree over HTTP. When
+// a directory is visited the "index.html" will be served if present, otherwise
+// esbuild will automatically generate a directory listing page with links for
+// each file in the directory. If there is a build configured that generates
+// output files, those output files are not written to disk but are instead
+// "overlayed" virtually on top of the real file system. The server responds to
+// HTTP requests for output files from the build with the latest in-memory
+// build results.
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/http"
+	"os"
+	"path"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"time"
+
+	"github.com/evanw/esbuild/internal/fs"
+	"github.com/evanw/esbuild/internal/helpers"
+	"github.com/evanw/esbuild/internal/logger"
+)
+
+////////////////////////////////////////////////////////////////////////////////
+// Serve API
+
+type apiHandler struct {
+	onRequest        func(ServeOnRequestArgs)
+	rebuild          func() BuildResult
+	stop             func()
+	fs               fs.FS
+	absOutputDir     string
+	outdirPathPrefix string
+	publicPath       string
+	servedir         string
+	keyfileToLower   string
+	certfileToLower  string
+	fallback         string
+	serveWaitGroup   sync.WaitGroup
+	activeStreams    []chan serverSentEvent
+	currentHashes    map[string]string
+	mutex            sync.Mutex
+}
+
+type serverSentEvent struct {
+	event string
+	data  string
+}
+
+func escapeForHTML(text string) string {
+	text = strings.ReplaceAll(text, "&", "&amp;")
+	text = strings.ReplaceAll(text, "<", "&lt;")
+	text = strings.ReplaceAll(text, ">", "&gt;")
+	return text
+}
+
+func escapeForAttribute(text string) string {
+	text = escapeForHTML(text)
+	text = strings.ReplaceAll(text, "\"", "&quot;")
+	text = strings.ReplaceAll(text, "'", "&apos;")
+	return text
+}
+
+func (h *apiHandler) notifyRequest(duration time.Duration, req *http.Request, status int) {
+	if h.onRequest != nil {
+		h.onRequest(ServeOnRequestArgs{
+			RemoteAddress: req.RemoteAddr,
+			Method:        req.Method,
+			Path:          req.URL.Path,
+			Status:        status,
+			TimeInMS:      int(duration.Milliseconds()),
+		})
+	}
+}
+
+func errorsToString(errors []Message) string {
+	stderrOptions := logger.OutputOptions{IncludeSource: true}
+	terminalOptions := logger.TerminalInfo{}
+	sb := strings.Builder{}
+	limit := 5
+	for i, msg := range convertMessagesToInternal(nil, logger.Error, errors) {
+		if i == limit {
+			sb.WriteString(fmt.Sprintf("%d out of %d errors shown\n", limit, len(errors)))
+			break
+		}
+		sb.WriteString(msg.String(stderrOptions, terminalOptions))
+	}
+	return sb.String()
+}
+
+func (h *apiHandler) ServeHTTP(res http.ResponseWriter, req *http.Request) {
+	start := time.Now()
+
+	// Special-case the esbuild event stream
+	if req.Method == "GET" && req.URL.Path == "/esbuild" && req.Header.Get("Accept") == "text/event-stream" {
+		h.serveEventStream(start, req, res)
+		return
+	}
+
+	// HEAD requests omit the body
+	maybeWriteResponseBody := func(bytes []byte) { res.Write(bytes) }
+	isHEAD := req.Method == "HEAD"
+	if isHEAD {
+		maybeWriteResponseBody = func([]byte) { res.Write(nil) }
+	}
+
+	// Handle GET and HEAD requests
+	if (isHEAD || req.Method == "GET") && strings.HasPrefix(req.URL.Path, "/") {
+		res.Header().Set("Access-Control-Allow-Origin", "*")
+		queryPath := path.Clean(req.URL.Path)[1:]
+		result := h.rebuild()
+
+		// Requests fail if the build had errors
+		if len(result.Errors) > 0 {
+			res.Header().Set("Content-Type", "text/plain; charset=utf-8")
+			go h.notifyRequest(time.Since(start), req, http.StatusServiceUnavailable)
+			res.WriteHeader(http.StatusServiceUnavailable)
+			maybeWriteResponseBody([]byte(errorsToString(result.Errors)))
+			return
+		}
+
+		type fileToServe struct {
+			absPath  string
+			contents fs.OpenedFile
+		}
+
+		var kind fs.EntryKind
+		var file fileToServe
+		dirEntries := make(map[string]bool)
+		fileEntries := make(map[string]bool)
+
+		// Check for a match with the results if we're within the output directory
+		if outdirQueryPath, ok := stripDirPrefix(queryPath, h.outdirPathPrefix, "/"); ok {
+			resultKind, inMemoryBytes, absPath, isImplicitIndexHTML := h.matchQueryPathToResult(outdirQueryPath, &result, dirEntries, fileEntries)
+			kind = resultKind
+			file = fileToServe{
+				absPath:  absPath,
+				contents: &fs.InMemoryOpenedFile{Contents: inMemoryBytes},
+			}
+			if isImplicitIndexHTML {
+				queryPath = path.Join(queryPath, "index.html")
+			}
+		} else {
+			// Create a fake directory entry for the output path so that it appears to be a real directory
+			p := h.outdirPathPrefix
+			for p != "" {
+				var dir string
+				var base string
+				if slash := strings.IndexByte(p, '/'); slash == -1 {
+					base = p
+				} else {
+					dir = p[:slash]
+					base = p[slash+1:]
+				}
+				if dir == queryPath {
+					kind = fs.DirEntry
+					dirEntries[base] = true
+					break
+				}
+				p = dir
+			}
+		}
+
+		// Check for a file in the "servedir" directory
+		if h.servedir != "" && kind != fs.FileEntry {
+			absPath := h.fs.Join(h.servedir, queryPath)
+			if absDir := h.fs.Dir(absPath); absDir != absPath {
+				if entries, err, _ := h.fs.ReadDirectory(absDir); err == nil {
+					if entry, _ := entries.Get(h.fs.Base(absPath)); entry != nil && entry.Kind(h.fs) == fs.FileEntry {
+						if h.keyfileToLower != "" || h.certfileToLower != "" {
+							if toLower := strings.ToLower(absPath); toLower == h.keyfileToLower || toLower == h.certfileToLower {
+								// Don't serve the HTTPS key or certificate. This uses a case-
+								// insensitive check because some file systems are case-sensitive.
+								go h.notifyRequest(time.Since(start), req, http.StatusForbidden)
+								res.WriteHeader(http.StatusForbidden)
+								maybeWriteResponseBody([]byte("403 - Forbidden"))
+								return
+							}
+						}
+						if contents, err, _ := h.fs.OpenFile(absPath); err == nil {
+							defer contents.Close()
+							file = fileToServe{absPath: absPath, contents: contents}
+							kind = fs.FileEntry
+						} else if err != syscall.ENOENT {
+							go h.notifyRequest(time.Since(start), req, http.StatusInternalServerError)
+							res.WriteHeader(http.StatusInternalServerError)
+							maybeWriteResponseBody([]byte(fmt.Sprintf("500 - Internal server error: %s", err.Error())))
+							return
+						}
+					}
+				}
+			}
+		}
+
+		// Check for a directory in the "servedir" directory
+		var servedirIndexName string
+		if h.servedir != "" && kind != fs.FileEntry {
+			if entries, err, _ := h.fs.ReadDirectory(h.fs.Join(h.servedir, queryPath)); err == nil {
+				kind = fs.DirEntry
+				for _, name := range entries.SortedKeys() {
+					entry, _ := entries.Get(name)
+					switch entry.Kind(h.fs) {
+					case fs.DirEntry:
+						dirEntries[name] = true
+					case fs.FileEntry:
+						fileEntries[name] = true
+						if name == "index.html" {
+							servedirIndexName = name
+						}
+					}
+				}
+			} else if err != syscall.ENOENT {
+				go h.notifyRequest(time.Since(start), req, http.StatusInternalServerError)
+				res.WriteHeader(http.StatusInternalServerError)
+				maybeWriteResponseBody([]byte(fmt.Sprintf("500 - Internal server error: %s", err.Error())))
+				return
+			}
+		}
+
+		// Redirect to a trailing slash for directories
+		if kind == fs.DirEntry && !strings.HasSuffix(req.URL.Path, "/") {
+			res.Header().Set("Location", path.Clean(req.URL.Path)+"/")
+			go h.notifyRequest(time.Since(start), req, http.StatusFound)
+			res.WriteHeader(http.StatusFound)
+			maybeWriteResponseBody(nil)
+			return
+		}
+
+		// Serve an "index.html" file if present
+		if kind == fs.DirEntry && servedirIndexName != "" {
+			queryPath += "/" + servedirIndexName
+			absPath := h.fs.Join(h.servedir, queryPath)
+			if contents, err, _ := h.fs.OpenFile(absPath); err == nil {
+				defer contents.Close()
+				file = fileToServe{absPath: absPath, contents: contents}
+				kind = fs.FileEntry
+			} else if err != syscall.ENOENT {
+				go h.notifyRequest(time.Since(start), req, http.StatusInternalServerError)
+				res.WriteHeader(http.StatusInternalServerError)
+				maybeWriteResponseBody([]byte(fmt.Sprintf("500 - Internal server error: %s", err.Error())))
+				return
+			}
+		}
+
+		// Serve the fallback HTML page if one was provided
+		if kind != fs.FileEntry && h.fallback != "" {
+			if contents, err, _ := h.fs.OpenFile(h.fallback); err == nil {
+				defer contents.Close()
+				file = fileToServe{absPath: h.fallback, contents: contents}
+				kind = fs.FileEntry
+			} else if err != syscall.ENOENT {
+				go h.notifyRequest(time.Since(start), req, http.StatusInternalServerError)
+				res.WriteHeader(http.StatusInternalServerError)
+				maybeWriteResponseBody([]byte(fmt.Sprintf("500 - Internal server error: %s", err.Error())))
+				return
+			}
+		}
+
+		// Serve a file
+		if kind == fs.FileEntry {
+			// Default to serving the whole file
+			status := http.StatusOK
+			fileContentsLen := file.contents.Len()
+			begin := 0
+			end := fileContentsLen
+			isRange := false
+
+			// Handle range requests so that video playback works in Safari
+			if rangeBegin, rangeEnd, ok := parseRangeHeader(req.Header.Get("Range"), fileContentsLen); ok && rangeBegin < rangeEnd {
+				// Note: The content range is inclusive so subtract 1 from the end
+				isRange = true
+				begin = rangeBegin
+				end = rangeEnd
+				status = http.StatusPartialContent
+			}
+
+			// Try to read the range from the file, which may fail
+			fileBytes, err := file.contents.Read(begin, end)
+			if err != nil {
+				go h.notifyRequest(time.Since(start), req, http.StatusInternalServerError)
+				res.WriteHeader(http.StatusInternalServerError)
+				maybeWriteResponseBody([]byte(fmt.Sprintf("500 - Internal server error: %s", err.Error())))
+				return
+			}
+
+			// If we get here, the request was successful
+			if contentType := helpers.MimeTypeByExtension(h.fs.Ext(file.absPath)); contentType != "" {
+				res.Header().Set("Content-Type", contentType)
+			} else {
+				res.Header().Set("Content-Type", "application/octet-stream")
+			}
+			if isRange {
+				res.Header().Set("Content-Range", fmt.Sprintf("bytes %d-%d/%d", begin, end-1, fileContentsLen))
+			}
+			res.Header().Set("Content-Length", fmt.Sprintf("%d", len(fileBytes)))
+			go h.notifyRequest(time.Since(start), req, status)
+			res.WriteHeader(status)
+			maybeWriteResponseBody(fileBytes)
+			return
+		}
+
+		// Serve a directory listing
+		if kind == fs.DirEntry {
+			html := respondWithDirList(queryPath, dirEntries, fileEntries)
+			res.Header().Set("Content-Type", "text/html; charset=utf-8")
+			res.Header().Set("Content-Length", fmt.Sprintf("%d", len(html)))
+			go h.notifyRequest(time.Since(start), req, http.StatusOK)
+			maybeWriteResponseBody(html)
+			return
+		}
+	}
+
+	// Satisfy requests for "favicon.ico" to avoid errors in Firefox developer tools
+	if req.Method == "GET" && req.URL.Path == "/favicon.ico" {
+		for _, encoding := range strings.Split(req.Header.Get("Accept-Encoding"), ",") {
+			if semi := strings.IndexByte(encoding, ';'); semi >= 0 {
+				encoding = encoding[:semi]
+			}
+			if strings.TrimSpace(encoding) == "gzip" {
+				res.Header().Set("Content-Encoding", "gzip")
+				res.Header().Set("Content-Type", "image/vnd.microsoft.icon")
+				go h.notifyRequest(time.Since(start), req, http.StatusOK)
+				maybeWriteResponseBody(favicon_ico_gz)
+				return
+			}
+		}
+	}
+
+	// Default to a 404
+	res.Header().Set("Content-Type", "text/plain; charset=utf-8")
+	go h.notifyRequest(time.Since(start), req, http.StatusNotFound)
+	res.WriteHeader(http.StatusNotFound)
+	maybeWriteResponseBody([]byte("404 - Not Found"))
+}
+
+// This exposes an event stream to clients using server-sent events:
+// https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events
+func (h *apiHandler) serveEventStream(start time.Time, req *http.Request, res http.ResponseWriter) {
+	if flusher, ok := res.(http.Flusher); ok {
+		if closer, ok := res.(http.CloseNotifier); ok {
+			// Add a new stream to the array of active streams
+			stream := make(chan serverSentEvent)
+			h.mutex.Lock()
+			h.activeStreams = append(h.activeStreams, stream)
+			h.mutex.Unlock()
+
+			// Start the event stream
+			res.Header().Set("Content-Type", "text/event-stream")
+			res.Header().Set("Connection", "keep-alive")
+			res.Header().Set("Cache-Control", "no-cache")
+			res.Header().Set("Access-Control-Allow-Origin", "*")
+			go h.notifyRequest(time.Since(start), req, http.StatusOK)
+			res.WriteHeader(http.StatusOK)
+			res.Write([]byte("retry: 500\n"))
+			flusher.Flush()
+
+			// Send incoming messages over the stream
+			streamWasClosed := make(chan struct{}, 1)
+			go func() {
+				for {
+					var msg []byte
+					select {
+					case next, ok := <-stream:
+						if !ok {
+							streamWasClosed <- struct{}{}
+							return
+						}
+						msg = []byte(fmt.Sprintf("event: %s\ndata: %s\n\n", next.event, next.data))
+					case <-time.After(30 * time.Second):
+						// Send an occasional keep-alive
+						msg = []byte(":\n\n")
+					}
+					if _, err := res.Write(msg); err != nil {
+						return
+					}
+					flusher.Flush()
+				}
+			}()
+
+			// When the stream is closed (either by them or by us), remove it
+			// from the array and end the response body to clean up resources
+			select {
+			case <-closer.CloseNotify():
+			case <-streamWasClosed:
+			}
+			h.mutex.Lock()
+			for i := range h.activeStreams {
+				if h.activeStreams[i] == stream {
+					end := len(h.activeStreams) - 1
+					h.activeStreams[i] = h.activeStreams[end]
+					h.activeStreams = h.activeStreams[:end]
+
+					// Only close the stream if it's present in the list of active
+					// streams. Stopping the server can also call close on this
+					// stream and Go only lets you close a channel once before
+					// panicking, so we don't want to close it twice.
+					close(stream)
+					break
+				}
+			}
+			h.mutex.Unlock()
+			return
+		}
+	}
+
+	// If we get here, then event streaming isn't possible
+	go h.notifyRequest(time.Since(start), req, http.StatusInternalServerError)
+	res.WriteHeader(http.StatusInternalServerError)
+	res.Write([]byte("500 - Event stream error"))
+}
+
+func (h *apiHandler) broadcastBuildResult(result BuildResult, newHashes map[string]string) {
+	h.mutex.Lock()
+
+	var added []string
+	var removed []string
+	var updated []string
+
+	urlForPath := func(absPath string) (string, bool) {
+		if relPath, ok := stripDirPrefix(absPath, h.absOutputDir, "\\/"); ok {
+			relPath = strings.ReplaceAll(relPath, "\\", "/")
+			relPath = path.Join(h.outdirPathPrefix, relPath)
+			publicPath := h.publicPath
+			slash := "/"
+			if publicPath != "" && strings.HasSuffix(h.publicPath, "/") {
+				slash = ""
+			}
+			return fmt.Sprintf("%s%s%s", publicPath, slash, relPath), true
+		}
+		return "", false
+	}
+
+	// Diff the old and new states, but only if the build succeeded. We shouldn't
+	// make it appear as if all files were removed when there is a build error.
+	if len(result.Errors) == 0 {
+		oldHashes := h.currentHashes
+		h.currentHashes = newHashes
+
+		for absPath, newHash := range newHashes {
+			if oldHash, ok := oldHashes[absPath]; !ok {
+				if url, ok := urlForPath(absPath); ok {
+					added = append(added, url)
+				}
+			} else if newHash != oldHash {
+				if url, ok := urlForPath(absPath); ok {
+					updated = append(updated, url)
+				}
+			}
+		}
+
+		for absPath := range oldHashes {
+			if _, ok := newHashes[absPath]; !ok {
+				if url, ok := urlForPath(absPath); ok {
+					removed = append(removed, url)
+				}
+			}
+		}
+	}
+
+	// Only notify listeners if there's a change that's worth sending. That way
+	// you can implement a simple "reload on any change" script without having
+	// to do this check in the script.
+	if len(added) > 0 || len(removed) > 0 || len(updated) > 0 {
+		sort.Strings(added)
+		sort.Strings(removed)
+		sort.Strings(updated)
+
+		// Assemble the diff
+		var sb strings.Builder
+		sb.WriteString("{\"added\":[")
+		for i, path := range added {
+			if i > 0 {
+				sb.WriteRune(',')
+			}
+			sb.Write(helpers.QuoteForJSON(path, false))
+		}
+		sb.WriteString("],\"removed\":[")
+		for i, path := range removed {
+			if i > 0 {
+				sb.WriteRune(',')
+			}
+			sb.Write(helpers.QuoteForJSON(path, false))
+		}
+		sb.WriteString("],\"updated\":[")
+		for i, path := range updated {
+			if i > 0 {
+				sb.WriteRune(',')
+			}
+			sb.Write(helpers.QuoteForJSON(path, false))
+		}
+		sb.WriteString("]}")
+		json := sb.String()
+
+		// Broadcast the diff to all streams
+		for _, stream := range h.activeStreams {
+			stream <- serverSentEvent{event: "change", data: json}
+		}
+	}
+
+	h.mutex.Unlock()
+}
+
+// Handle enough of the range specification so that video playback works in Safari
+func parseRangeHeader(r string, contentLength int) (int, int, bool) {
+	if strings.HasPrefix(r, "bytes=") {
+		r = r[len("bytes="):]
+		if dash := strings.IndexByte(r, '-'); dash != -1 {
+			// Note: The range is inclusive so the limit is deliberately "length - 1"
+			if begin, ok := parseRangeInt(r[:dash], contentLength-1); ok {
+				if end, ok := parseRangeInt(r[dash+1:], contentLength-1); ok {
+					// Note: The range is inclusive so a range of "0-1" is two bytes long
+					return begin, end + 1, true
+				}
+			}
+		}
+	}
+	return 0, 0, false
+}
+
+func parseRangeInt(text string, maxValue int) (int, bool) {
+	if text == "" {
+		return 0, false
+	}
+	value := 0
+	for _, c := range text {
+		if c < '0' || c > '9' {
+			return 0, false
+		}
+		value = value*10 + int(c-'0')
+		if value > maxValue {
+			return 0, false
+		}
+	}
+	return value, true
+}
+
+func (h *apiHandler) matchQueryPathToResult(
+	queryPath string,
+	result *BuildResult,
+	dirEntries map[string]bool,
+	fileEntries map[string]bool,
+) (fs.EntryKind, []byte, string, bool) {
+	queryIsDir := false
+	queryDir := queryPath
+	if queryDir != "" {
+		queryDir += "/"
+	}
+
+	// Check the output files for a match
+	for _, file := range result.OutputFiles {
+		if relPath, ok := h.fs.Rel(h.absOutputDir, file.Path); ok {
+			relPath = strings.ReplaceAll(relPath, "\\", "/")
+
+			// An exact match
+			if relPath == queryPath {
+				return fs.FileEntry, file.Contents, file.Path, false
+			}
+
+			// Serve an "index.html" file if present
+			if dir, base := path.Split(relPath); base == "index.html" && queryDir == dir {
+				return fs.FileEntry, file.Contents, file.Path, true
+			}
+
+			// A match inside this directory
+			if strings.HasPrefix(relPath, queryDir) {
+				entry := relPath[len(queryDir):]
+				queryIsDir = true
+				if slash := strings.IndexByte(entry, '/'); slash == -1 {
+					fileEntries[entry] = true
+				} else if dir := entry[:slash]; !dirEntries[dir] {
+					dirEntries[dir] = true
+				}
+			}
+		}
+	}
+
+	// Treat this as a directory if it's non-empty
+	if queryIsDir {
+		return fs.DirEntry, nil, "", false
+	}
+
+	return 0, nil, "", false
+}
+
+func respondWithDirList(queryPath string, dirEntries map[string]bool, fileEntries map[string]bool) []byte {
+	queryPath = "/" + queryPath
+	queryDir := queryPath
+	if queryDir != "/" {
+		queryDir += "/"
+	}
+	html := strings.Builder{}
+	html.WriteString("<!doctype html>\n")
+	html.WriteString("<meta charset=\"utf8\">\n")
+	html.WriteString("<style>\n")
+	html.WriteString("body { margin: 30px; color: #222; background: #fff; font: 16px/22px sans-serif; }\n")
+	html.WriteString("a { color: inherit; text-decoration: none; }\n")
+	html.WriteString("a:hover { text-decoration: underline; }\n")
+	html.WriteString("a:visited { color: #777; }\n")
+	html.WriteString("@media (prefers-color-scheme: dark) {\n")
+	html.WriteString("  body { color: #fff; background: #222; }\n")
+	html.WriteString("  a:visited { color: #aaa; }\n")
+	html.WriteString("}\n")
+	html.WriteString("</style>\n")
+	html.WriteString("<title>Directory: ")
+	html.WriteString(escapeForHTML(queryDir))
+	html.WriteString("</title>\n")
+	html.WriteString("<h1>Directory: ")
+	var parts []string
+	if queryPath == "/" {
+		parts = []string{""}
+	} else {
+		parts = strings.Split(queryPath, "/")
+	}
+	for i, part := range parts {
+		if i+1 < len(parts) {
+			html.WriteString("<a href=\"")
+			html.WriteString(escapeForHTML(strings.Join(parts[:i+1], "/")))
+			html.WriteString("/\">")
+		}
+		html.WriteString(escapeForHTML(part))
+		html.WriteString("/")
+		if i+1 < len(parts) {
+			html.WriteString("</a>")
+		}
+	}
+	html.WriteString("</h1>\n")
+
+	// Link to the parent directory
+	if queryPath != "/" {
+		parentDir := path.Dir(queryPath)
+		if parentDir != "/" {
+			parentDir += "/"
+		}
+		html.WriteString(fmt.Sprintf("<div>📁 <a href=\"%s\">../</a></div>\n", escapeForAttribute(parentDir)))
+	}
+
+	// Link to child directories
+	strings := make([]string, 0, len(dirEntries)+len(fileEntries))
+	for entry := range dirEntries {
+		strings = append(strings, entry)
+	}
+	sort.Strings(strings)
+	for _, entry := range strings {
+		html.WriteString(fmt.Sprintf("<div>📁 <a href=\"%s/\">%s/</a></div>\n", escapeForAttribute(path.Join(queryPath, entry)), escapeForHTML(entry)))
+	}
+
+	// Link to files in the directory
+	strings = strings[:0]
+	for entry := range fileEntries {
+		strings = append(strings, entry)
+	}
+	sort.Strings(strings)
+	for _, entry := range strings {
+		html.WriteString(fmt.Sprintf("<div>📄 <a href=\"%s\">%s</a></div>\n", escapeForAttribute(path.Join(queryPath, entry)), escapeForHTML(entry)))
+	}
+
+	return []byte(html.String())
+}
+
+// This is used to make error messages platform-independent
+func prettyPrintPath(fs fs.FS, path string) string {
+	if relPath, ok := fs.Rel(fs.Cwd(), path); ok {
+		return strings.ReplaceAll(relPath, "\\", "/")
+	}
+	return path
+}
+
+func (ctx *internalContext) Serve(serveOptions ServeOptions) (ServeResult, error) {
+	ctx.mutex.Lock()
+	defer ctx.mutex.Unlock()
+
+	// Ignore disposed contexts
+	if ctx.didDispose {
+		return ServeResult{}, errors.New("Cannot serve a disposed context")
+	}
+
+	// Don't allow starting serve mode multiple times
+	if ctx.handler != nil {
+		return ServeResult{}, errors.New("Serve mode has already been enabled")
+	}
+
+	// Don't allow starting serve mode multiple times
+	if (serveOptions.Keyfile != "") != (serveOptions.Certfile != "") {
+		return ServeResult{}, errors.New("Must specify both key and certificate for HTTPS")
+	}
+
+	// Validate the "servedir" path
+	if serveOptions.Servedir != "" {
+		if absPath, ok := ctx.realFS.Abs(serveOptions.Servedir); ok {
+			serveOptions.Servedir = absPath
+		} else {
+			return ServeResult{}, fmt.Errorf("Invalid serve path: %s", serveOptions.Servedir)
+		}
+	}
+
+	// Validate the "fallback" path
+	if serveOptions.Fallback != "" {
+		if absPath, ok := ctx.realFS.Abs(serveOptions.Fallback); ok {
+			serveOptions.Fallback = absPath
+		} else {
+			return ServeResult{}, fmt.Errorf("Invalid fallback path: %s", serveOptions.Fallback)
+		}
+	}
+
+	// Stuff related to the output directory only matters if there are entry points
+	outdirPathPrefix := ""
+	if len(ctx.args.entryPoints) > 0 {
+		// Don't allow serving when builds are written to stdout
+		if ctx.args.options.WriteToStdout {
+			what := "entry points"
+			if len(ctx.args.entryPoints) == 1 {
+				what = "an entry point"
+			}
+			return ServeResult{}, fmt.Errorf("Cannot serve %s without an output path", what)
+		}
+
+		// Compute the output path prefix
+		if serveOptions.Servedir != "" && ctx.args.options.AbsOutputDir != "" {
+			// Make sure the output directory is contained in the "servedir" directory
+			relPath, ok := ctx.realFS.Rel(serveOptions.Servedir, ctx.args.options.AbsOutputDir)
+			if !ok {
+				return ServeResult{}, fmt.Errorf(
+					"Cannot compute relative path from %q to %q\n", serveOptions.Servedir, ctx.args.options.AbsOutputDir)
+			}
+			relPath = strings.ReplaceAll(relPath, "\\", "/") // Fix paths on Windows
+			if relPath == ".." || strings.HasPrefix(relPath, "../") {
+				return ServeResult{}, fmt.Errorf(
+					"Output directory %q must be contained in serve directory %q",
+					prettyPrintPath(ctx.realFS, ctx.args.options.AbsOutputDir),
+					prettyPrintPath(ctx.realFS, serveOptions.Servedir),
+				)
+			}
+			if relPath != "." {
+				outdirPathPrefix = relPath
+			}
+		}
+	}
+
+	// Determine the host
+	var listener net.Listener
+	network := "tcp4"
+	host := "0.0.0.0"
+	if serveOptions.Host != "" {
+		host = serveOptions.Host
+
+		// Only use "tcp4" if this is an IPv4 address, otherwise use "tcp"
+		if ip := net.ParseIP(host); ip == nil || ip.To4() == nil {
+			network = "tcp"
+		}
+	}
+
+	// Pick the port
+	if serveOptions.Port == 0 {
+		// Default to picking a "800X" port
+		for port := 8000; port <= 8009; port++ {
+			if result, err := net.Listen(network, net.JoinHostPort(host, fmt.Sprintf("%d", port))); err == nil {
+				listener = result
+				break
+			}
+		}
+	}
+	if listener == nil {
+		// Otherwise pick the provided port
+		if result, err := net.Listen(network, net.JoinHostPort(host, fmt.Sprintf("%d", serveOptions.Port))); err != nil {
+			return ServeResult{}, err
+		} else {
+			listener = result
+		}
+	}
+
+	// Try listening on the provided port
+	addr := listener.Addr().String()
+
+	// Extract the real port in case we passed a port of "0"
+	var result ServeResult
+	if host, text, err := net.SplitHostPort(addr); err == nil {
+		if port, err := strconv.ParseInt(text, 10, 32); err == nil {
+			result.Port = uint16(port)
+			result.Host = host
+		}
+	}
+
+	// HTTPS-related files should be absolute paths
+	isHTTPS := serveOptions.Keyfile != "" && serveOptions.Certfile != ""
+	if isHTTPS {
+		serveOptions.Keyfile, _ = ctx.realFS.Abs(serveOptions.Keyfile)
+		serveOptions.Certfile, _ = ctx.realFS.Abs(serveOptions.Certfile)
+	}
+
+	var shouldStop int32
+
+	// The first build will just build normally
+	handler := &apiHandler{
+		onRequest:        serveOptions.OnRequest,
+		outdirPathPrefix: outdirPathPrefix,
+		absOutputDir:     ctx.args.options.AbsOutputDir,
+		publicPath:       ctx.args.options.PublicPath,
+		servedir:         serveOptions.Servedir,
+		keyfileToLower:   strings.ToLower(serveOptions.Keyfile),
+		certfileToLower:  strings.ToLower(serveOptions.Certfile),
+		fallback:         serveOptions.Fallback,
+		rebuild: func() BuildResult {
+			if atomic.LoadInt32(&shouldStop) != 0 {
+				// Don't start more rebuilds if we were told to stop
+				return BuildResult{}
+			} else {
+				return ctx.activeBuildOrRecentBuildOrRebuild()
+			}
+		},
+		fs: ctx.realFS,
+	}
+
+	// Create the server
+	server := &http.Server{Addr: addr, Handler: handler}
+
+	// When stop is called, block further rebuilds and then close the server
+	handler.stop = func() {
+		atomic.StoreInt32(&shouldStop, 1)
+
+		// Close the server and wait for it to close
+		server.Close()
+
+		// Close all open event streams
+		handler.mutex.Lock()
+		for _, stream := range handler.activeStreams {
+			close(stream)
+		}
+		handler.activeStreams = nil
+		handler.mutex.Unlock()
+
+		handler.serveWaitGroup.Wait()
+	}
+
+	// HACK: Go's HTTP API doesn't appear to provide a way to separate argument
+	// validation errors from eventual network errors. Specifically "ServeTLS"
+	// blocks for an arbitrarily long time before returning an error. So we
+	// intercept the first call to "Accept" on the listener and say that the
+	// serve call succeeded without an error if we get to that point.
+	hack := &hackListener{Listener: listener}
+	hack.waitGroup.Add(1)
+
+	// Start the server and signal on "serveWaitGroup" when it stops
+	handler.serveWaitGroup.Add(1)
+	go func() {
+		var err error
+		if isHTTPS {
+			err = server.ServeTLS(hack, serveOptions.Certfile, serveOptions.Keyfile)
+		} else {
+			err = server.Serve(hack)
+		}
+		if err != http.ErrServerClosed {
+			hack.mutex.Lock()
+			if !hack.done {
+				hack.done = true
+				hack.err = err
+				hack.waitGroup.Done()
+			}
+			hack.mutex.Unlock()
+		}
+		handler.serveWaitGroup.Done()
+	}()
+
+	// Return an error if the server failed to start accepting connections
+	hack.waitGroup.Wait()
+	if hack.err != nil {
+		return ServeResult{}, hack.err
+	}
+
+	// There appears to be some issue with Linux (but not with macOS) where
+	// destroying and recreating a server with the same port as the previous
+	// server had sometimes causes subsequent connections to fail with
+	// ECONNRESET (shows up in node as "Error: socket hang up").
+	//
+	// I think the problem is sort of that Go sets SO_REUSEADDR to 1 for listener
+	// sockets (specifically in "setDefaultListenerSockopts"). In some ways this
+	// is good, because it's more convenient for the user if the port is the
+	// same. However, I believe this sends a TCP RST packet to kill any previous
+	// connections. That can then be received by clients attempting to connect
+	// to the new server.
+	//
+	// As a hack to work around this problem, we wait for an additional short
+	// amount of time before returning. I observed this problem even with a 5ms
+	// timeout but I did not observe this problem with a 10ms timeout. So I'm
+	// setting this timeout to 50ms to be extra safe.
+	time.Sleep(50 * time.Millisecond)
+
+	// Only set the context handler if the server started successfully
+	ctx.handler = handler
+
+	// Print the URL(s) that the server can be reached at
+	if ctx.args.logOptions.LogLevel <= logger.LevelInfo {
+		printURLs(result.Host, result.Port, isHTTPS, ctx.args.logOptions.Color)
+	}
+
+	// Start the first build shortly after this function returns (but not
+	// immediately so that stuff we print right after this will come first).
+	//
+	// This also helps the CLI not do two builds when serve and watch mode
+	// are enabled together. Watch mode is enabled after serve mode because
+	// we want the stderr output for watch to come after the stderr output for
+	// serve, but watch mode will do another build if the current build is
+	// not a watch mode build.
+	go func() {
+		time.Sleep(10 * time.Millisecond)
+		handler.rebuild()
+	}()
+	return result, nil
+}
+
+type hackListener struct {
+	net.Listener
+	mutex     sync.Mutex
+	waitGroup sync.WaitGroup
+	err       error
+	done      bool
+}
+
+func (hack *hackListener) Accept() (net.Conn, error) {
+	hack.mutex.Lock()
+	if !hack.done {
+		hack.done = true
+		hack.waitGroup.Done()
+	}
+	hack.mutex.Unlock()
+	return hack.Listener.Accept()
+}
+
+func printURLs(host string, port uint16, https bool, useColor logger.UseColor) {
+	logger.PrintTextWithColor(os.Stderr, useColor, func(colors logger.Colors) string {
+		var hosts []string
+		sb := strings.Builder{}
+		sb.WriteString(colors.Reset)
+
+		// If this is "0.0.0.0" or "::", list all relevant IP addresses
+		if ip := net.ParseIP(host); ip != nil && ip.IsUnspecified() {
+			if addrs, err := net.InterfaceAddrs(); err == nil {
+				for _, addr := range addrs {
+					if addr, ok := addr.(*net.IPNet); ok && (addr.IP.To4() != nil) == (ip.To4() != nil) && !addr.IP.IsLinkLocalUnicast() {
+						hosts = append(hosts, addr.IP.String())
+					}
+				}
+			}
+		}
+
+		// Otherwise, just list the one IP address
+		if len(hosts) == 0 {
+			hosts = append(hosts, host)
+		}
+
+		// Determine the host kinds
+		kinds := make([]string, len(hosts))
+		maxLen := 0
+		for i, host := range hosts {
+			kind := "Network"
+			if ip := net.ParseIP(host); ip != nil && ip.IsLoopback() {
+				kind = "Local"
+			}
+			kinds[i] = kind
+			if len(kind) > maxLen {
+				maxLen = len(kind)
+			}
+		}
+
+		// Pretty-print the host list
+		protocol := "http"
+		if https {
+			protocol = "https"
+		}
+		for i, kind := range kinds {
+			sb.WriteString(fmt.Sprintf("\n > %s:%s %s%s://%s/%s",
+				kind, strings.Repeat(" ", maxLen-len(kind)), colors.Underline, protocol,
+				net.JoinHostPort(hosts[i], fmt.Sprintf("%d", port)), colors.Reset))
+		}
+
+		sb.WriteString("\n\n")
+		return sb.String()
+	})
+}
diff --git a/source/vendor/github.com/evanw/esbuild/pkg/api/serve_wasm.go b/source/vendor/github.com/evanw/esbuild/pkg/api/serve_wasm.go
new file mode 100644
index 0000000..bdae038
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/pkg/api/serve_wasm.go
@@ -0,0 +1,21 @@
+//go:build js && wasm
+// +build js,wasm
+
+package api
+
+import "fmt"
+
+// Remove the serve API in the WebAssembly build. This removes 2.7mb of stuff.
+
+func (*internalContext) Serve(ServeOptions) (ServeResult, error) {
+	return ServeResult{}, fmt.Errorf("The \"serve\" API is not supported when using WebAssembly")
+}
+
+type apiHandler struct {
+}
+
+func (*apiHandler) broadcastBuildResult(BuildResult, map[string]string) {
+}
+
+func (*apiHandler) stop() {
+}
diff --git a/source/vendor/github.com/evanw/esbuild/pkg/api/watcher.go b/source/vendor/github.com/evanw/esbuild/pkg/api/watcher.go
new file mode 100644
index 0000000..efe6277
--- /dev/null
+++ b/source/vendor/github.com/evanw/esbuild/pkg/api/watcher.go
@@ -0,0 +1,187 @@
+package api
+
+// This file implements a polling file watcher for esbuild (i.e. it detects
+// when files are changed by repeatedly checking their contents). Polling is
+// used instead of more efficient platform-specific file system APIs because:
+//
+//   * Go's standard library doesn't have built-in APIs for file watching
+//   * Using platform-specific APIs means using cgo, which I want to avoid
+//   * Polling is cross-platform and esbuild needs to work on 20+ platforms
+//   * Platform-specific APIs might be unreliable and could introduce bugs
+//
+// That said, this polling system is designed to use relatively little CPU vs.
+// a more traditional polling system that scans the whole directory tree at
+// once. The file system is still scanned regularly but each scan only checks
+// a random subset of your files, which means a change to a file will be picked
+// up soon after the change is made but not necessarily instantly.
+//
+// With the current heuristics, large projects should be completely scanned
+// around every 2 seconds so in the worst case it could take up to 2 seconds
+// for a change to be noticed. However, after a change has been noticed the
+// change's path goes on a short list of recently changed paths which are
+// checked on every scan, so further changes to recently changed files should
+// be noticed almost instantly.
+
+import (
+	"fmt"
+	"math/rand"
+	"os"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/evanw/esbuild/internal/fs"
+	"github.com/evanw/esbuild/internal/logger"
+	"github.com/evanw/esbuild/internal/resolver"
+)
+
+// The time to wait between watch intervals
+const watchIntervalSleep = 100 * time.Millisecond
+
+// The maximum number of recently-edited items to check every interval
+const maxRecentItemCount = 16
+
+// The minimum number of non-recent items to check every interval
+const minItemCountPerIter = 64
+
+// The maximum number of intervals before a change is detected
+const maxIntervalsBeforeUpdate = 20
+
+type watcher struct {
+	data              fs.WatchData
+	fs                fs.FS
+	rebuild           func() fs.WatchData
+	recentItems       []string
+	itemsToScan       []string
+	mutex             sync.Mutex
+	itemsPerIteration int
+	shouldStop        int32
+	shouldLog         bool
+	useColor          logger.UseColor
+	stopWaitGroup     sync.WaitGroup
+}
+
+func (w *watcher) setWatchData(data fs.WatchData) {
+	defer w.mutex.Unlock()
+	w.mutex.Lock()
+
+	// Print something for the end of the first build
+	if w.shouldLog && w.data.Paths == nil {
+		logger.PrintTextWithColor(os.Stderr, w.useColor, func(colors logger.Colors) string {
+			return fmt.Sprintf("%s[watch] build finished, watching for changes...%s\n", colors.Dim, colors.Reset)
+		})
+	}
+
+	w.data = data
+	w.itemsToScan = w.itemsToScan[:0] // Reuse memory
+
+	// Remove any recent items that weren't a part of the latest build
+	end := 0
+	for _, path := range w.recentItems {
+		if data.Paths[path] != nil {
+			w.recentItems[end] = path
+			end++
+		}
+	}
+	w.recentItems = w.recentItems[:end]
+}
+
+func (w *watcher) start() {
+	w.stopWaitGroup.Add(1)
+
+	go func() {
+		// Note: Do not change these log messages without a breaking version change.
+		// People want to run regexes over esbuild's stderr stream to look for these
+		// messages instead of using esbuild's API.
+
+		for atomic.LoadInt32(&w.shouldStop) == 0 {
+			// Sleep for the watch interval
+			time.Sleep(watchIntervalSleep)
+
+			// Rebuild if we're dirty
+			if absPath := w.tryToFindDirtyPath(); absPath != "" {
+				if w.shouldLog {
+					logger.PrintTextWithColor(os.Stderr, w.useColor, func(colors logger.Colors) string {
+						prettyPath := resolver.PrettyPath(w.fs, logger.Path{Text: absPath, Namespace: "file"})
+						return fmt.Sprintf("%s[watch] build started (change: %q)%s\n", colors.Dim, prettyPath, colors.Reset)
+					})
+				}
+
+				// Run the build
+				w.setWatchData(w.rebuild())
+
+				if w.shouldLog {
+					logger.PrintTextWithColor(os.Stderr, w.useColor, func(colors logger.Colors) string {
+						return fmt.Sprintf("%s[watch] build finished%s\n", colors.Dim, colors.Reset)
+					})
+				}
+			}
+		}
+
+		w.stopWaitGroup.Done()
+	}()
+}
+
+func (w *watcher) stop() {
+	atomic.StoreInt32(&w.shouldStop, 1)
+	w.stopWaitGroup.Wait()
+}
+
+func (w *watcher) tryToFindDirtyPath() string {
+	defer w.mutex.Unlock()
+	w.mutex.Lock()
+
+	// If we ran out of items to scan, fill the items back up in a random order
+	if len(w.itemsToScan) == 0 {
+		items := w.itemsToScan[:0] // Reuse memory
+		for path := range w.data.Paths {
+			items = append(items, path)
+		}
+		rand.Seed(time.Now().UnixNano())
+		for i := int32(len(items) - 1); i > 0; i-- { // Fisher-Yates shuffle
+			j := rand.Int31n(i + 1)
+			items[i], items[j] = items[j], items[i]
+		}
+		w.itemsToScan = items
+
+		// Determine how many items to check every iteration, rounded up
+		perIter := (len(items) + maxIntervalsBeforeUpdate - 1) / maxIntervalsBeforeUpdate
+		if perIter < minItemCountPerIter {
+			perIter = minItemCountPerIter
+		}
+		w.itemsPerIteration = perIter
+	}
+
+	// Always check all recent items every iteration
+	for i, path := range w.recentItems {
+		if dirtyPath := w.data.Paths[path](); dirtyPath != "" {
+			// Move this path to the back of the list (i.e. the "most recent" position)
+			copy(w.recentItems[i:], w.recentItems[i+1:])
+			w.recentItems[len(w.recentItems)-1] = path
+			return dirtyPath
+		}
+	}
+
+	// Check a constant number of items every iteration
+	remainingCount := len(w.itemsToScan) - w.itemsPerIteration
+	if remainingCount < 0 {
+		remainingCount = 0
+	}
+	toCheck, remaining := w.itemsToScan[remainingCount:], w.itemsToScan[:remainingCount]
+	w.itemsToScan = remaining
+
+	// Check if any of the entries in this iteration have been modified
+	for _, path := range toCheck {
+		if dirtyPath := w.data.Paths[path](); dirtyPath != "" {
+			// Mark this item as recent by adding it to the back of the list
+			w.recentItems = append(w.recentItems, path)
+			if len(w.recentItems) > maxRecentItemCount {
+				// Remove items from the front of the list when we hit the limit
+				copy(w.recentItems, w.recentItems[1:])
+				w.recentItems = w.recentItems[:maxRecentItemCount]
+			}
+			return dirtyPath
+		}
+	}
+	return ""
+}
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.envrc b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/.envrc
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.envrc
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/.envrc
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.gitignore b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/.gitignore
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.gitignore
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/.gitignore
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.gitlab-ci.yml b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/.gitlab-ci.yml
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/.gitlab-ci.yml
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/.gitlab-ci.yml
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/CHANGELOG.md b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/CHANGELOG.md
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/CHANGELOG.md
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/CHANGELOG.md
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/CONTRIBUTING.md b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/CONTRIBUTING.md
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/CONTRIBUTING.md
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/CONTRIBUTING.md
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/LICENSE b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/LICENSE
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/LICENSE
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/LICENSE
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/README.md b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/README.md
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/README.md
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/README.md
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/api.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/api.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/api.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/api.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/command.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/command.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/command.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/command.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/devenv.lock b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/devenv.lock
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/devenv.lock
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/devenv.lock
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/devenv.nix b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/devenv.nix
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/devenv.nix
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/devenv.nix
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/devenv.yaml b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/devenv.yaml
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/devenv.yaml
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/devenv.yaml
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/doc.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/doc.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/doc.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/doc.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/error.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/error.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/error.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/error.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/execute.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/execute.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/execute.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/execute.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/flake.lock b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/flake.lock
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/flake.lock
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/flake.lock
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/flake.nix b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/flake.nix
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/flake.nix
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/flake.nix
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help-util.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/help-util.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help-util.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/help-util.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/help.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/help.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/help.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/hint.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/hint.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/hint.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/hint.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/mapping.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/mapping.go
similarity index 97%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/mapping.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/mapping.go
index f4f0438..b1efa64 100644
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/mapping.go
+++ b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/mapping.go
@@ -5,7 +5,7 @@ package xflags
 
 import (
 	"flag"
-	"gitlab.schukai.com/oss/libraries/go/utilities/pathfinder"
+	"gitlab.schukai.com/oss/libraries/go/utilities/pathfinder.git"
 	"reflect"
 	"strconv"
 	"strings"
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/parse.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/parse.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/parse.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/parse.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/release.json b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/release.json
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/release.json
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/release.json
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/setting.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/setting.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/setting.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/setting.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/tags.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/tags.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/tags.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/tags.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/type.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/type.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags/type.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/application/xflags.git/type.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/LICENSE b/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html.git/LICENSE
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/LICENSE
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html.git/LICENSE
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/engine.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html.git/engine/engine.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/engine.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html.git/engine/engine.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/error.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html.git/engine/error.go
similarity index 100%
rename from source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html/engine/error.go
rename to source/vendor/gitlab.schukai.com/oss/libraries/go/markup/html.git/engine/error.go
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.envrc b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.envrc
deleted file mode 100644
index 0da5bcc..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.envrc
+++ /dev/null
@@ -1,2 +0,0 @@
-watch_file ./flake.nix ./nix/scripts/*.nix ./nix/config/*.nix ./nix/packages/*.nix 
-use flake
\ No newline at end of file
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitignore b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitignore
deleted file mode 100644
index 13bb7b5..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitignore
+++ /dev/null
@@ -1,155 +0,0 @@
-# Created by https://www.toptal.com/developers/gitignore/api/intellij,go
-# Edit at https://www.toptal.com/developers/gitignore?templates=intellij,go
-
-### Go ###
-# If you prefer the allow list template instead of the deny list, see community template:
-# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
-#
-# Binaries for programs and plugins
-*.exe
-*.exe~
-*.dll
-*.so
-*.dylib
-
-# Test binary, built with `go test -c`
-*.test
-
-# Output of the go coverage tool, specifically when used with LiteIDE
-*.out
-
-# Dependency directories (remove the comment below to include it)
-# vendor/
-
-# Go workspace file
-go.work
-
-### Go Patch ###
-/vendor/
-/Godeps/
-
-### Intellij ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-# AWS User-specific
-.idea/**/aws.xml
-
-# Generated files
-.idea/**/contentModel.xml
-
-# Sensitive or high-churn files
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-.idea/**/dbnavigator.xml
-
-# Gradle
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn.  Uncomment if using
-# auto-import.
-# .idea/artifacts
-# .idea/compiler.xml
-# .idea/jarRepositories.xml
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-cmake-build-*/
-
-# Mongo Explorer plugin
-.idea/**/mongoSettings.xml
-
-# File-based project format
-*.iws
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# SonarLint plugin
-.idea/sonarlint/
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
-
-# Editor-based Rest Client
-.idea/httpRequests
-
-# Android studio 3.1+ serialized cache file
-.idea/caches/build_file_checksums.ser
-
-### Intellij Patch ###
-# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
-
-# *.iml
-# modules.xml
-# .idea/misc.xml
-# *.ipr
-
-# Sonarlint plugin
-# https://plugins.jetbrains.com/plugin/7973-sonarlint
-.idea/**/sonarlint/
-
-# SonarQube Plugin
-# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
-.idea/**/sonarIssues.xml
-
-# Markdown Navigator plugin
-# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
-.idea/**/markdown-navigator.xml
-.idea/**/markdown-navigator-enh.xml
-.idea/**/markdown-navigator/
-
-# Cache file creation bug
-# See https://youtrack.jetbrains.com/issue/JBR-2257
-.idea/$CACHE_FILE$
-
-# CodeStream plugin
-# https://plugins.jetbrains.com/plugin/12206-codestream
-.idea/codestream.xml
-
-# Azure Toolkit for IntelliJ plugin
-# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
-.idea/**/azureSettings.xml
-
-# End of https://www.toptal.com/developers/gitignore/api/intellij,go
-# Devenv
-.devenv*
-devenv.local.nix
-
-# direnv
-.direnv
-
-# pre-commit
-.pre-commit-config.yaml
-
-/Session.vim
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitlab-ci.yml b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitlab-ci.yml
deleted file mode 100644
index 78b9b3f..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/.gitlab-ci.yml
+++ /dev/null
@@ -1,43 +0,0 @@
-variables:
-  NIXPKGS_ALLOW_UNFREE: "1"
-  NIXPKGS_ALLOW_INSECURE: "1"
-  GIT_DEPTH: 10
-  GIT_SUBMODULE_STRATEGY: normal
-  GIT_SUBMODULE_DEPTH: 1
-
-
-stages:
-  - test
-  - release
-
-before_script:
-  - git config --global user.email "${GITLAB_USER_EMAIL}"
-  - git config --global user.name "${GITLAB_USER_NAME:-"Gitlab CI"}"
-  - git config --global credential.helper '!f() { echo "username=gitlab-ci-token"; echo "password=${CI_JOB_TOKEN}"; }; f'
-  - git config --global pull.rebase true
-  - git config --global http.sslVerify "false"
-
-
-after_script:
-  - nix develop .#gitlab --command clean-up
-
-tests:
-  stage: test
-  tags:
-    - nixos-gen3
-  script:
-    - nix develop .#gitlab --command run-ci-tests
-  artifacts:
-    paths:
-      - last-phpunit-result.xml
-
-release:
-  stage: release
-  tags:
-    - nixos-gen3
-  script:
-    - nix develop .#gitlab --command release
-  when: on_success
-  rules:
-    - if: $CI_COMMIT_BRANCH == "master"
-  
\ No newline at end of file
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/CONTRIBUTING.md b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/CONTRIBUTING.md
deleted file mode 100644
index 2713a85..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/CONTRIBUTING.md
+++ /dev/null
@@ -1,58 +0,0 @@
-# Contributing to schukai GmbH Projects
-
-## Code of Conduct
-
-Be a human, not an asshole. Common sense and basic human decency apply.
-
-## Getting Started
-
-### Setting up the Project
-
-1. Fork the project on GitLab.
-2. Clone your fork locally. Replace `[your-username]` with your GitLab username and `[project-name]` with the actual project name:
-    ```bash
-    git clone $(git config --get remote.origin.url)
-    ```
-3. Add the upstream repository. Replace `[original-username]` and `[project-name]` with the original repository's username and project name:
-    ```bash
-    git remote add upstream https://gitlab.schukai.com/[original-username]/[project-name].git
-    ```
-
-### Making Changes
-
-1. Create a new branch:
-    ```bash
-    git checkout -b new-feature-branch
-    ```
-2. Make your changes.
-3. Commit your changes:
-    ```bash
-    git commit -m "Description of change"
-    ```
-
-### Submitting a Merge Request
-
-1. Push your changes to your fork:
-    ```bash
-    git push origin new-feature-branch
-    ```
-2. Navigate to the original project repository on `gitlab.schukai.com`.
-3. Open a Merge Request and provide a clear description of the changes.
-
-## Coding Guidelines
-
-- Follow the coding style used in the project.
-- Write unit tests for new features.
-- Ensure that all tests pass before submitting a Merge Request.
-
-## Reporting Issues
-
-If you find an issue, please create a new issue on `gitlab.schukai.com`.
-
-## Additional Resources
-
-- [GitLab Flow](https://docs.gitlab.com/ee/topics/gitlab_flow.html)
-- [GitLab Merge Request Guidelines](https://docs.gitlab.com/ee/user/project/merge_requests/)
-
-Thank you for your contribution! 
-    
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/LICENSE b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/LICENSE
deleted file mode 100644
index 5694d30..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/LICENSE
+++ /dev/null
@@ -1,14 +0,0 @@
-Copyright (C) 2023 schukai GmbH 
-
-This program is free software: you can redistribute it and/or modify
-it under the terms of the GNU Affero General Public License as published
-by the Free Software Foundation, either version 3 of the License.
-
-This program is distributed in the hope that it will be useful,
-but WITHOUT ANY WARRANTY; without even the implied warranty of
-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
-GNU Affero General Public License for more details.
-
-You should have received a copy of the GNU Affero General Public License
-along with this program.  If not, see <https://www.gnu.org/licenses/>.  
-    
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/README.md b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/README.md
deleted file mode 100644
index 4d1522c..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/README.md
+++ /dev/null
@@ -1,69 +0,0 @@
-## Pathfinder
-
-## What does this library?
-
-This library provides a simple way to get and set values in a nested structure.
-
-It supports:
-
-* [X]  Set values in a nested structure
-* [X]  Get values from a nested structure
-
-## Installation
-
-```shell
-go get gitlab.schukai.com/oss/libraries/go/utilities/pathfinder
-```
-
-**Note:** This library uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
-
-## Usage
-
-### Set values
-
-```go
-s := &StructA{}
-err := GetValue[*StructA](s, "my.key")
-```
-
-### Get values
-
-```go
-s := &StructA{}
-err := SetValue[*StructA](s, "my.key", "value")
-```
-
-## Contributing
-
-Merge requests are welcome. For major changes, please open an issue first to discuss what
-you would like to change. **Please make sure to update tests as appropriate.**
-
-Versioning is done with [SemVer](https://semver.org/).
-Changelog is generated with [git-chglog](https://github.com/git-chglog/git-chglog#git-chglog)
-
-Commit messages should follow the [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/) specification.
-Messages are started with a type, which is one of the following:
-
-- **feat**: A new feature
-- **fix**: A bug fix
-- **doc**: Documentation only changes
-- **refactor**: A code change that neither fixes a bug nor adds a feature
-- **perf**: A code change that improves performance
-- **test**: Adding missing or correcting existing tests
-- **chore**: Other changes that don't modify src or test files
-
-The footer would be used for a reference to an issue or a breaking change.
-
-A commit that has a footer `BREAKING CHANGE:`, or appends a ! after the type/scope,
-introduces a breaking API change (correlating with MAJOR in semantic versioning).
-A BREAKING CHANGE can be part of commits of any type.
-
-the following is an example of a commit message:
-
-```text
-feat: add 'extras' field
-```
-
-## License
-
-[AGPL-3.0](https://choosealicense.com/licenses/agpl-3.0/)
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/error.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/error.go
deleted file mode 100644
index 614b13e..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/error.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package pathfinder
-
-import (
-	"errors"
-	"reflect"
-)
-
-type InvalidPathError error
-
-func newInvalidPathError(path string) InvalidPathError {
-	return InvalidPathError(errors.New("invalid path " + path))
-}
-
-type UnsupportedTypeAtTopOfPathError error
-
-func newUnsupportedTypeAtTopOfPathError(path string, t reflect.Type) UnsupportedTypeAtTopOfPathError {
-	return UnsupportedTypeAtTopOfPathError(errors.New("unsupported type " + t.String() + " at top of path " + path))
-}
-
-type UnsupportedTypePathError error
-
-func newUnsupportedTypePathError(path string, t reflect.Type) UnsupportedTypePathError {
-	return UnsupportedTypePathError(errors.New("unsupported type " + t.String() + " at path " + path))
-}
-
-type CannotSetError error
-
-func newCannotSetError(name string) CannotSetError {
-	return CannotSetError(errors.New("cannot set " + name))
-}
-
-type InvalidTypeForPathError error
-
-func newInvalidTypeForPathError(path string, pt string, nt string) InvalidTypeForPathError {
-	return InvalidTypeForPathError(errors.New("invalid type for path " + path + ": expected " + pt + ", got " + nt))
-}
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/find.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/find.go
deleted file mode 100644
index 0495ecf..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/find.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2024 schukai GmbH
-// SPDX-License-Identifier: proprietary
-
-package pathfinder
-
-import (
-	"fmt"
-	"reflect"
-	"strings"
-)
-
-func FindPaths(v reflect.Value, targetType reflect.Type, path []string, paths *[]string) {
-
-	if v.Kind() == reflect.Invalid {
-		return
-	}
-
-	vType := v.Type()
-
-	switch v.Kind() {
-	case reflect.Ptr:
-		FindPaths(v.Elem(), targetType, path, paths)
-	case reflect.Struct:
-		for i := 0; i < v.NumField(); i++ {
-			newPath := append(path, vType.Field(i).Name)
-			FindPaths(v.Field(i), targetType, newPath, paths)
-		}
-	case reflect.Map:
-		for _, key := range v.MapKeys() {
-			newPath := append(path, fmt.Sprint(key))
-			FindPaths(v.MapIndex(key), targetType, newPath, paths)
-		}
-	case reflect.Slice, reflect.Array:
-		for i := 0; i < v.Len(); i++ {
-			newPath := append(path, fmt.Sprint(i))
-			FindPaths(v.Index(i), targetType, newPath, paths)
-		}
-
-	case reflect.String, reflect.Bool, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8, reflect.Int, reflect.Uint64, reflect.Uint32, reflect.Uint16, reflect.Uint8, reflect.Uint:
-		if vType != targetType {
-			return
-		}
-
-	default:
-		return
-	}
-
-	if vType == targetType {
-		*paths = append(*paths, strings.Join(path, "."))
-	}
-}
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/flake.lock b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/flake.lock
deleted file mode 100644
index fd83cb2..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/flake.lock
+++ /dev/null
@@ -1,181 +0,0 @@
-{
-  "nodes": {
-    "commonFlake": {
-      "inputs": {
-        "nixpkgs": "nixpkgs"
-      },
-      "locked": {
-        "dir": "common",
-        "lastModified": 1718788884,
-        "narHash": "sha256-PefMbkGNMK9TN1qcNL9OkFVTNdv6wo6XoaS8eTdsY04=",
-        "ref": "refs/heads/master",
-        "rev": "abda2dc723e13dfc835535593321c514666e679e",
-        "revCount": 39,
-        "type": "git",
-        "url": "https://gitlab.schukai.com/schukai/entwicklung/nix-flakes.git?dir=common"
-      },
-      "original": {
-        "dir": "common",
-        "type": "git",
-        "url": "https://gitlab.schukai.com/schukai/entwicklung/nix-flakes.git?dir=common"
-      }
-    },
-    "flake-utils": {
-      "inputs": {
-        "systems": "systems"
-      },
-      "locked": {
-        "lastModified": 1726560853,
-        "narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
-        "owner": "numtide",
-        "repo": "flake-utils",
-        "rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
-        "type": "github"
-      },
-      "original": {
-        "id": "flake-utils",
-        "type": "indirect"
-      }
-    },
-    "flakeUtils": {
-      "inputs": {
-        "systems": "systems_2"
-      },
-      "locked": {
-        "lastModified": 1726560853,
-        "narHash": "sha256-X6rJYSESBVr3hBoH0WbKE5KvhPU5bloyZ2L4K60/fPQ=",
-        "owner": "numtide",
-        "repo": "flake-utils",
-        "rev": "c1dfcf08411b08f6b8615f7d8971a2bfa81d5e8a",
-        "type": "github"
-      },
-      "original": {
-        "owner": "numtide",
-        "repo": "flake-utils",
-        "type": "github"
-      }
-    },
-    "nixpkgs": {
-      "locked": {
-        "lastModified": 1714971268,
-        "narHash": "sha256-IKwMSwHj9+ec660l+I4tki/1NRoeGpyA2GdtdYpAgEw=",
-        "owner": "NixOS",
-        "repo": "nixpkgs",
-        "rev": "27c13997bf450a01219899f5a83bd6ffbfc70d3c",
-        "type": "github"
-      },
-      "original": {
-        "id": "nixpkgs",
-        "ref": "nixos-23.11",
-        "type": "indirect"
-      }
-    },
-    "nixpkgsUnstable": {
-      "locked": {
-        "lastModified": 1727348695,
-        "narHash": "sha256-J+PeFKSDV+pHL7ukkfpVzCOO7mBSrrpJ3svwBFABbhI=",
-        "owner": "nixos",
-        "repo": "nixpkgs",
-        "rev": "1925c603f17fc89f4c8f6bf6f631a802ad85d784",
-        "type": "github"
-      },
-      "original": {
-        "owner": "nixos",
-        "ref": "nixos-unstable",
-        "repo": "nixpkgs",
-        "type": "github"
-      }
-    },
-    "nixpkgs_2": {
-      "locked": {
-        "lastModified": 1727397532,
-        "narHash": "sha256-pojbL/qteElw/nIXlN8kmHn/w6PQbEHr7Iz+WOXs0EM=",
-        "owner": "nixos",
-        "repo": "nixpkgs",
-        "rev": "f65141456289e81ea0d5a05af8898333cab5c53d",
-        "type": "github"
-      },
-      "original": {
-        "owner": "nixos",
-        "ref": "nixos-24.05",
-        "repo": "nixpkgs",
-        "type": "github"
-      }
-    },
-    "nixpkgs_3": {
-      "locked": {
-        "lastModified": 1704145853,
-        "narHash": "sha256-G/1AMt9ibpeMlcxvD1vNaC8imGaK+g7zZ99e29BLgWw=",
-        "owner": "NixOS",
-        "repo": "nixpkgs",
-        "rev": "2d2ea8eab9e400618748ab1a6a108255233b602c",
-        "type": "github"
-      },
-      "original": {
-        "id": "nixpkgs",
-        "ref": "nixos-23.11",
-        "type": "indirect"
-      }
-    },
-    "root": {
-      "inputs": {
-        "commonFlake": "commonFlake",
-        "flake-utils": "flake-utils",
-        "flakeUtils": "flakeUtils",
-        "nixpkgs": "nixpkgs_2",
-        "nixpkgsUnstable": "nixpkgsUnstable",
-        "versionFlake": "versionFlake"
-      }
-    },
-    "systems": {
-      "locked": {
-        "lastModified": 1681028828,
-        "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
-        "owner": "nix-systems",
-        "repo": "default",
-        "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
-        "type": "github"
-      },
-      "original": {
-        "owner": "nix-systems",
-        "repo": "default",
-        "type": "github"
-      }
-    },
-    "systems_2": {
-      "locked": {
-        "lastModified": 1681028828,
-        "narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
-        "owner": "nix-systems",
-        "repo": "default",
-        "rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
-        "type": "github"
-      },
-      "original": {
-        "owner": "nix-systems",
-        "repo": "default",
-        "type": "github"
-      }
-    },
-    "versionFlake": {
-      "inputs": {
-        "nixpkgs": "nixpkgs_3"
-      },
-      "locked": {
-        "lastModified": 1716914109,
-        "narHash": "sha256-JY0PLGWzYRDQ9daKLGOBWHHuYun9nSpH9J3aSk8iDmQ=",
-        "ref": "refs/heads/master",
-        "rev": "fe8dd932d6c414a93b4a69c470792b2db038e0fb",
-        "revCount": 129,
-        "type": "git",
-        "url": "https://gitlab.schukai.com/oss/utilities/version.git"
-      },
-      "original": {
-        "type": "git",
-        "url": "https://gitlab.schukai.com/oss/utilities/version.git"
-      }
-    }
-  },
-  "root": "root",
-  "version": 7
-}
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/flake.nix b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/flake.nix
deleted file mode 100644
index 64f1699..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/flake.nix
+++ /dev/null
@@ -1,158 +0,0 @@
-{
-  description = "Configuration is a library";
-
-  inputs = {
-    nixpkgs = {url = "github:nixos/nixpkgs/nixos-24.05";};
-    nixpkgsUnstable = {url = "github:nixos/nixpkgs/nixos-unstable";};
-    flakeUtils = {url = "github:numtide/flake-utils";};
-
-    commonFlake = {
-      url = "git+https://gitlab.schukai.com/schukai/entwicklung/nix-flakes.git?dir=common";
-      flake = true;
-    };
-
-    versionFlake = {
-      url = "git+https://gitlab.schukai.com/oss/utilities/version.git";
-      flake = true;
-    };
-  };
-
-  outputs = {
-    self,
-    nixpkgs,
-    nixpkgsUnstable,
-    flake-utils,
-    versionFlake,
-    commonFlake,
-    ...
-  } @ inputs:
-    flake-utils.lib.eachDefaultSystem (system: let
-      inherit (nixpkgs.lib) optional;
-
-      commonPck = commonFlake.packages.${system}.common;
-      versionPck = versionFlake.packages.${system}.version;
-
-      pkgs' = import nixpkgs {
-        inherit system;
-        overlays = [
-          (final: prev: {
-            common = commonPck;
-          })
-
-          (final: prev: {
-            version = versionPck;
-          })
-
-          (final: prev: {
-            dolt =
-              (import nixpkgsUnstable {
-                inherit system;
-              })
-              .dolt;
-          })
-        ];
-      };
-    in {
-      packages = rec {
-        manual = pkgs'.callPackage ./nix/packages/manual.nix {inherit self pkgs';};
-      };
-
-      devShells = {
-        default = let
-          commonPck = commonFlake.packages.${system}.common;
-          commonScript = commonPck + "/bin/common";
-
-          versionPck = versionFlake.packages.${system}.version;
-          versionBin = versionPck + "/bin/version";
-
-          scriptGoTask = import ./nix/scripts/go-task.nix {inherit self pkgs' system;};
-
-          commonPackages = import ./nix/config/common-packages.nix {inherit pkgs';};
-
-          extendedPackages = [
-            scriptGoTask
-          ];
-
-          scriptPackages = [
-            versionPck
-          ];
-
-          shellPackages =
-            commonPackages
-            ++ extendedPackages
-            ++ scriptPackages;
-        in
-          pkgs'.mkShell {
-            nativeBuildInputs = shellPackages;
-
-            shellHook = ''
-              source ${commonScript}
-
-              if [ -n "$CI_JOB_TOKEN" ]; then
-                  echo_fail "You are in a CI environment, this shell is not intended for CI, but for local development"
-                  exit 1
-              fi
-
-              echo_header "Configuration Lib development shell"
-              readonly worktree=$(get_working_dir)
-              echo_hint "Working directory: ''${worktree}"
-              currentVersion=$(${versionBin} print -g)
-              if [ -z "''${currentVersion}" ]; then
-                  echo_fail "No version found, check your git tags"
-              else
-                  echo_hint "Current version: ''${currentVersion}"
-              fi
-
-              currentGitBranch=$(git rev-parse --abbrev-ref HEAD)
-              echo_hint "Current branch: ''${currentGitBranch}"
-              echo_hint "You can run the task command to see the available tasks"
-
-              echo_section "Happy hacking!"
-            '';
-          };
-
-        gitlab = let
-          commonPck = commonFlake.packages.${system}.common;
-          commonScript = commonPck + "/bin/common";
-
-          versionPck = versionFlake.packages.${system}.version;
-
-          scriptCleanUp = pkgs'.callPackage ./nix/scripts/clean-up.nix {inherit pkgs';};
-          scriptRunCITests = pkgs'.callPackage ./nix/scripts/run-ci-tests.nix {inherit pkgs';};
-          scriptRelease = pkgs'.callPackage ./nix/scripts/release.nix {inherit pkgs';};
-
-          commonPackages = import ./nix/config/common-packages.nix {inherit pkgs';};
-
-          extendedPackages = [
-            scriptCleanUp
-            scriptRunCITests
-            scriptRelease
-          ];
-
-          scriptPackages = [
-            versionPck
-          ];
-
-          shellPackages =
-            commonPackages
-            ++ extendedPackages
-            ++ scriptPackages;
-        in
-          pkgs'.mkShell {
-            nativeBuildInputs = shellPackages;
-
-            shellHook = ''
-              source ${commonScript}
-
-              if [ -z "$CI_JOB_TOKEN" ]; then
-                 echo_fail "You are not in a CI environment, this shell is intended for CI, but for local development"
-                 exit 1
-              fi
-
-              cd ''${CI_PROJECT_DIR} || exit 1
-
-            '';
-          };
-      };
-    });
-}
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/get.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/get.go
deleted file mode 100644
index 3d53650..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/get.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package pathfinder
-
-import (
-	"reflect"
-	"strconv"
-	"strings"
-)
-
-// GetValue returns the value of a field in a struct, given a path to the field.
-// The path can contain dots to access nested fields.
-// The object must be a pointer to a struct, a struct, a map, a slice or an array,
-// otherwise an error is returned.
-func GetValue[D any](obj D, keyWithDots string) (any, error) {
-	keySlice := strings.Split(keyWithDots, ".")
-	v := reflect.ValueOf(obj)
-
-	for _, key := range keySlice[0:] {
-
-		if !v.IsValid() {
-			return nil, newInvalidPathError(keyWithDots)
-		}
-
-		switch v.Kind() {
-		case reflect.Ptr, reflect.Interface:
-			v = v.Elem()
-		}
-
-		switch v.Kind() {
-		case reflect.Map:
-			v = v.MapIndex(reflect.ValueOf(key))
-			if !v.IsValid() {
-				return nil, newInvalidPathError(keyWithDots)
-			}
-
-		case reflect.Slice, reflect.Array:
-			index, err := strconv.Atoi(key)
-			if err != nil {
-				return nil, newInvalidPathError(keyWithDots)
-			}
-			// check if index is in range
-			if index >= v.Len() {
-				return nil, newInvalidPathError(keyWithDots)
-			}
-			v = v.Index(index)
-		case reflect.Struct:
-			v = v.FieldByName(key)
-			if !v.IsValid() {
-				return nil, newInvalidPathError(keyWithDots)
-			}
-		default:
-			return nil, newInvalidPathError(keyWithDots)
-		}
-
-	}
-
-	if v.Kind() == reflect.Invalid {
-		return nil, newInvalidPathError(keyWithDots)
-	}
-
-	//for v.Kind() == reflect.Ptr {
-	//	v = v.Elem()
-	//}
-
-	// check if v can interface
-	if !v.CanInterface() {
-		return nil, newInvalidPathError(keyWithDots)
-	}
-
-	return v.Interface(), nil
-
-}
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/pathfinder.iml b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/pathfinder.iml
deleted file mode 100644
index 789c0e8..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/pathfinder.iml
+++ /dev/null
@@ -1,12 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<module type="JAVA_MODULE" version="4">
-  <component name="NewModuleRootManager" inherit-compiler-output="true">
-    <exclude-output />
-    <content url="file://$MODULE_DIR$">
-      <sourceFolder url="file://$MODULE_DIR$/.devenv/state/go/pkg/mod/github.com/google/addlicense@v1.1.1/testdata/expected" isTestSource="false" />
-      <sourceFolder url="file://$MODULE_DIR$/.devenv/state/go/pkg/mod/github.com/google/addlicense@v1.1.1/testdata/initial" isTestSource="false" />
-    </content>
-    <orderEntry type="inheritedJdk" />
-    <orderEntry type="sourceFolder" forTests="false" />
-  </component>
-</module>
\ No newline at end of file
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/release.json b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/release.json
deleted file mode 100644
index ccd00c2..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/release.json
+++ /dev/null
@@ -1 +0,0 @@
-{"version":"0.5.2"}
diff --git a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/set.go b/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/set.go
deleted file mode 100644
index 7c37da1..0000000
--- a/source/vendor/gitlab.schukai.com/oss/libraries/go/utilities/pathfinder/set.go
+++ /dev/null
@@ -1,293 +0,0 @@
-// Copyright 2022 schukai GmbH
-// SPDX-License-Identifier: AGPL-3.0
-
-package pathfinder
-
-import (
-	"bytes"
-	"encoding/gob"
-	"fmt"
-	"reflect"
-	"strconv"
-	"strings"
-)
-
-func deepCopy(src, dst interface{}) error {
-	var buf bytes.Buffer
-	enc := gob.NewEncoder(&buf)
-	dec := gob.NewDecoder(&buf)
-
-	if err := enc.Encode(src); err != nil {
-		return err
-	}
-
-	return dec.Decode(dst)
-}
-
-// SetValue sets the value of a field in a struct, given a path to the field.
-// The object must be a pointer to a struct, otherwise an error is returned.
-func SetValue[D any](obj D, keyWithDots string, newValue any) error {
-
-	keySlice := strings.Split(keyWithDots, ".")
-	reflectionOfObject := reflect.ValueOf(obj)
-
-	for keyIndex, key := range keySlice[0 : len(keySlice)-1] {
-
-		if reflectionOfObject.Kind() == reflect.Map {
-
-			if reflectionOfObject.IsNil() {
-				return newInvalidPathError(keyWithDots)
-			}
-
-			currentValue := reflectionOfObject.MapIndex(reflect.ValueOf(key)).Interface()
-			newValueCopy := reflect.New(reflect.TypeOf(currentValue)).Interface()
-			if err := deepCopy(currentValue, newValueCopy); err != nil {
-				return err
-			}
-
-			newValueCopyPtr := &newValueCopy
-			newValueCopyReflect := reflect.ValueOf(newValueCopyPtr).Elem()
-			if !newValueCopyReflect.CanAddr() {
-				return newCannotSetError("Wert ist nicht adressierbar")
-			}
-			newKey := strings.Join(keySlice[keyIndex+1:], ".")
-
-			err := SetValue(newValueCopyPtr, newKey, newValue)
-			if err != nil {
-				return err
-			}
-
-			reflectionOfObject.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(newValueCopy).Elem())
-			return nil
-
-		}
-
-		if reflectionOfObject.Kind() == reflect.Ptr && reflectionOfObject.Elem().Kind() == reflect.Interface {
-			reflectionOfObject = reflectionOfObject.Elem().Elem()
-		}
-
-		for reflectionOfObject.Kind() != reflect.Ptr {
-			if reflectionOfObject.Kind() == reflect.Invalid {
-				return newInvalidPathError(keyWithDots)
-			}
-
-			if reflectionOfObject.CanAddr() {
-				reflectionOfObject = reflectionOfObject.Addr()
-			} else {
-				return newCannotSetError(keyWithDots)
-			}
-
-		}
-
-		if reflectionOfObject.Kind() != reflect.Ptr {
-			return newUnsupportedTypePathError(keyWithDots, reflectionOfObject.Type())
-		}
-
-		switch reflectionOfObject.Elem().Kind() {
-		case reflect.Struct:
-			reflectionOfObject = reflectionOfObject.Elem().FieldByName(key)
-
-		case reflect.Slice:
-			// index is a number and get reflectionOfObject from slice with index
-			index, err := strconv.Atoi(key)
-			if err != nil {
-				return newInvalidPathError(keyWithDots)
-			}
-
-			if index >= reflectionOfObject.Elem().Len() {
-				return newInvalidPathError(keyWithDots)
-			}
-
-			reflectionOfObject = reflectionOfObject.Elem().Index(index)
-		default:
-			return newUnsupportedTypePathError(keyWithDots, reflectionOfObject.Type())
-		}
-
-	}
-
-	if reflectionOfObject.Kind() == reflect.Invalid {
-		return newInvalidPathError(keyWithDots)
-	}
-
-	for reflectionOfObject.Kind() == reflect.Ptr {
-		reflectionOfObject = reflectionOfObject.Elem()
-	}
-
-	// non-supporter type at the top of the path
-	switch reflectionOfObject.Kind() {
-	case reflect.Struct:
-
-		reflectionOfObject = reflectionOfObject.FieldByName(keySlice[len(keySlice)-1])
-		if !reflectionOfObject.IsValid() {
-			return newInvalidPathError(keyWithDots)
-		}
-
-		if !reflectionOfObject.CanSet() {
-			return newCannotSetError(keyWithDots)
-		}
-
-	case reflect.Map:
-
-		key := keySlice[len(keySlice)-1]
-		m := reflectionOfObject
-
-		keyVal := reflect.ValueOf(key)
-		newVal := reflect.ValueOf(newValue)
-
-		if !keyVal.Type().ConvertibleTo(m.Type().Key()) {
-			return fmt.Errorf("key type mismatch")
-		}
-
-		if !newVal.Type().ConvertibleTo(m.Type().Elem()) {
-			return fmt.Errorf("value type mismatch")
-		}
-
-		keyValConverted := keyVal.Convert(m.Type().Key())
-		newValConverted := newVal.Convert(m.Type().Elem())
-		m.SetMapIndex(keyValConverted, newValConverted)
-		return nil
-
-	case reflect.Slice:
-
-		index, err := strconv.Atoi(keySlice[len(keySlice)-1])
-		if err != nil {
-			return newInvalidPathError(keyWithDots)
-		}
-
-		if index >= reflectionOfObject.Len() {
-			return newInvalidPathError(keyWithDots)
-		}
-
-		reflectionOfObject = reflectionOfObject.Index(index)
-
-	case reflect.Array:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	case reflect.Ptr:
-		if newValue == nil {
-			reflectionOfObject.Set(reflect.Zero(reflectionOfObject.Type()))
-		} else {
-			reflectionOfObject.Set(reflect.ValueOf(&newValue))
-		}
-		return nil
-	case reflect.Interface:
-
-		// check if reflectionOfObject is an interface to an struct pointer
-		if reflectionOfObject.Elem().Kind() == reflect.Ptr && reflectionOfObject.Elem().Elem().Kind() == reflect.Struct {
-			return SetValue(reflectionOfObject.Elem().Interface(), keySlice[len(keySlice)-1], newValue)
-		}
-
-	case reflect.Chan:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	case reflect.Func:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	case reflect.UnsafePointer:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	case reflect.Uintptr:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	case reflect.Complex64:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	case reflect.Complex128:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	case reflect.Invalid:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	default:
-		return newUnsupportedTypeAtTopOfPathError(keyWithDots, reflectionOfObject.Type())
-	}
-
-	newValueType := reflect.TypeOf(newValue)
-	if newValueType == nil {
-		return newUnsupportedTypePathError(keyWithDots, reflectionOfObject.Type())
-	}
-
-	newValueKind := reflect.TypeOf(newValue).Kind()
-
-	switch reflectionOfObject.Kind() {
-	case reflect.String:
-		if reflectionOfObject.Kind() == reflect.Ptr || reflectionOfObject.Kind() == reflect.Interface {
-			if reflectionOfObject.Elem().CanSet() && reflectionOfObject.Elem().Kind() == reflect.String {
-				if newValueKind == reflect.String {
-					reflectionOfObject.Elem().SetString(newValue.(string))
-				} else {
-					reflectionOfObject.Elem().SetString(fmt.Sprintf("%v", newValue))
-				}
-			}
-		} else if newValueKind == reflect.String {
-
-			if reflect.TypeOf(newValue).ConvertibleTo(reflect.TypeOf("")) {
-				newValueString := reflect.ValueOf(newValue).Convert(reflect.TypeOf("")).Interface().(string)
-				reflectionOfObject.SetString(newValueString)
-			} else {
-				return newUnsupportedTypePathError(keyWithDots, reflectionOfObject.Type())
-			}
-		} else {
-			reflectionOfObject.SetString(fmt.Sprintf("%v", newValue))
-		}
-
-	case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
-
-		if newValueKind == reflect.Int {
-			reflectionOfObject.SetInt(int64(newValue.(int)))
-		} else {
-			s, err := strconv.ParseInt(fmt.Sprintf("%v", newValue), 10, 64)
-			if err != nil {
-				return err
-			}
-			reflectionOfObject.SetInt(s)
-		}
-
-	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
-
-		if newValueKind == reflect.Int {
-			reflectionOfObject.SetUint(uint64(newValue.(int)))
-		} else {
-			s, err := strconv.ParseInt(fmt.Sprintf("%v", newValue), 10, 64)
-			if err != nil {
-				return err
-			}
-			reflectionOfObject.SetUint(uint64(s))
-		}
-
-	case reflect.Bool:
-
-		if newValueKind == reflect.Bool {
-			reflectionOfObject.SetBool(newValue.(bool))
-		} else {
-			b, err := strconv.ParseBool(fmt.Sprintf("%v", newValue))
-			if err != nil {
-				return err
-			}
-
-			reflectionOfObject.SetBool(b)
-		}
-
-	case reflect.Float64, reflect.Float32:
-
-		if newValueKind == reflect.Float64 {
-			reflectionOfObject.SetFloat(newValue.(float64))
-		} else {
-			s, err := strconv.ParseFloat(fmt.Sprintf("%v", newValue), 64)
-			if err != nil {
-				return err
-			}
-
-			reflectionOfObject.SetFloat(s)
-		}
-
-	case reflect.Slice, reflect.Array:
-
-		if newValueKind == reflect.Ptr {
-			newValue = reflect.ValueOf(newValue).Elem().Interface()
-			reflectionOfObject.Set(reflect.ValueOf(newValue))
-		} else if newValueKind == reflect.Slice {
-			reflectionOfObject.Set(reflect.ValueOf(newValue))
-		} else {
-			return newUnsupportedTypePathError(keyWithDots, reflectionOfObject.Type())
-		}
-
-	default:
-		return newInvalidTypeForPathError(keyWithDots, reflectionOfObject.Type().String(), newValueKind.String())
-	}
-
-	return nil
-
-}
diff --git a/source/vendor/golang.org/x/net/html/doc.go b/source/vendor/golang.org/x/net/html/doc.go
index 3a7e5ab..885c4c5 100644
--- a/source/vendor/golang.org/x/net/html/doc.go
+++ b/source/vendor/golang.org/x/net/html/doc.go
@@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order:
 	if err != nil {
 		// ...
 	}
-	var f func(*html.Node)
-	f = func(n *html.Node) {
+	for n := range doc.Descendants() {
 		if n.Type == html.ElementNode && n.Data == "a" {
 			// Do something with n...
 		}
-		for c := n.FirstChild; c != nil; c = c.NextSibling {
-			f(c)
-		}
 	}
-	f(doc)
 
 The relevant specifications include:
 https://html.spec.whatwg.org/multipage/syntax.html and
diff --git a/source/vendor/golang.org/x/net/html/iter.go b/source/vendor/golang.org/x/net/html/iter.go
new file mode 100644
index 0000000..54be8fd
--- /dev/null
+++ b/source/vendor/golang.org/x/net/html/iter.go
@@ -0,0 +1,56 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.23
+
+package html
+
+import "iter"
+
+// Ancestors returns an iterator over the ancestors of n, starting with n.Parent.
+//
+// Mutating a Node or its parents while iterating may have unexpected results.
+func (n *Node) Ancestors() iter.Seq[*Node] {
+	_ = n.Parent // eager nil check
+
+	return func(yield func(*Node) bool) {
+		for p := n.Parent; p != nil && yield(p); p = p.Parent {
+		}
+	}
+}
+
+// ChildNodes returns an iterator over the immediate children of n,
+// starting with n.FirstChild.
+//
+// Mutating a Node or its children while iterating may have unexpected results.
+func (n *Node) ChildNodes() iter.Seq[*Node] {
+	_ = n.FirstChild // eager nil check
+
+	return func(yield func(*Node) bool) {
+		for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling {
+		}
+	}
+
+}
+
+// Descendants returns an iterator over all nodes recursively beneath
+// n, excluding n itself. Nodes are visited in depth-first preorder.
+//
+// Mutating a Node or its descendants while iterating may have unexpected results.
+func (n *Node) Descendants() iter.Seq[*Node] {
+	_ = n.FirstChild // eager nil check
+
+	return func(yield func(*Node) bool) {
+		n.descendants(yield)
+	}
+}
+
+func (n *Node) descendants(yield func(*Node) bool) bool {
+	for c := range n.ChildNodes() {
+		if !yield(c) || !c.descendants(yield) {
+			return false
+		}
+	}
+	return true
+}
diff --git a/source/vendor/golang.org/x/net/html/node.go b/source/vendor/golang.org/x/net/html/node.go
index 1350eef..77741a1 100644
--- a/source/vendor/golang.org/x/net/html/node.go
+++ b/source/vendor/golang.org/x/net/html/node.go
@@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode}
 // that it looks like "a<b" rather than "a&lt;b". For element nodes, DataAtom
 // is the atom for Data, or zero if Data is not a known tag name.
 //
+// Node trees may be navigated using the link fields (Parent,
+// FirstChild, and so on) or a range loop over iterators such as
+// [Node.Descendants].
+//
 // An empty Namespace implies a "http://www.w3.org/1999/xhtml" namespace.
 // Similarly, "math" is short for "http://www.w3.org/1998/Math/MathML", and
 // "svg" is short for "http://www.w3.org/2000/svg".
diff --git a/source/vendor/golang.org/x/sys/unix/ioctl_linux.go b/source/vendor/golang.org/x/sys/unix/ioctl_linux.go
index dbe680e..7ca4fa1 100644
--- a/source/vendor/golang.org/x/sys/unix/ioctl_linux.go
+++ b/source/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
 	return &value, err
 }
 
+// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC
+// association for the network device specified by ifname.
+func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) {
+	ifr, err := NewIfreq(ifname)
+	if err != nil {
+		return nil, err
+	}
+
+	value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO}
+	ifrd := ifr.withData(unsafe.Pointer(&value))
+
+	err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
+	return &value, err
+}
+
+// IoctlGetHwTstamp retrieves the hardware timestamping configuration
+// for the network device specified by ifname.
+func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) {
+	ifr, err := NewIfreq(ifname)
+	if err != nil {
+		return nil, err
+	}
+
+	value := HwTstampConfig{}
+	ifrd := ifr.withData(unsafe.Pointer(&value))
+
+	err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd)
+	return &value, err
+}
+
+// IoctlSetHwTstamp updates the hardware timestamping configuration for
+// the network device specified by ifname.
+func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error {
+	ifr, err := NewIfreq(ifname)
+	if err != nil {
+		return err
+	}
+	ifrd := ifr.withData(unsafe.Pointer(cfg))
+	return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd)
+}
+
+// FdToClockID derives the clock ID from the file descriptor number
+// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is
+// suitable for system calls like ClockGettime.
+func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) }
+
+// IoctlPtpClockGetcaps returns the description of a given PTP device.
+func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) {
+	var value PtpClockCaps
+	err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value))
+	return &value, err
+}
+
+// IoctlPtpSysOffsetPrecise returns a description of the clock
+// offset compared to the system clock.
+func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) {
+	var value PtpSysOffsetPrecise
+	err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value))
+	return &value, err
+}
+
+// IoctlPtpSysOffsetExtended returns an extended description of the
+// clock offset compared to the system clock. The samples parameter
+// specifies the desired number of measurements.
+func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) {
+	value := PtpSysOffsetExtended{Samples: uint32(samples)}
+	err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value))
+	return &value, err
+}
+
+// IoctlPtpPinGetfunc returns the configuration of the specified
+// I/O pin on given PTP device.
+func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) {
+	value := PtpPinDesc{Index: uint32(index)}
+	err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value))
+	return &value, err
+}
+
+// IoctlPtpPinSetfunc updates configuration of the specified PTP
+// I/O pin.
+func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error {
+	return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd))
+}
+
+// IoctlPtpPeroutRequest configures the periodic output mode of the
+// PTP I/O pins.
+func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error {
+	return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r))
+}
+
+// IoctlPtpExttsRequest configures the external timestamping mode
+// of the PTP I/O pins.
+func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error {
+	return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r))
+}
+
 // IoctlGetWatchdogInfo fetches information about a watchdog device from the
 // Linux watchdog API. For more information, see:
 // https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
diff --git a/source/vendor/golang.org/x/sys/unix/mkerrors.sh b/source/vendor/golang.org/x/sys/unix/mkerrors.sh
index ac54eca..6ab02b6 100644
--- a/source/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/source/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -158,6 +158,16 @@ includes_Linux='
 #endif
 #define _GNU_SOURCE
 
+// See the description in unix/linux/types.go
+#if defined(__ARM_EABI__) || \
+	(defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \
+	(defined(__powerpc__) && (!defined(__powerpc64__)))
+# ifdef   _TIME_BITS
+#  undef  _TIME_BITS
+# endif
+# define  _TIME_BITS 32
+#endif
+
 // <sys/ioctl.h> is broken on powerpc64, as it fails to include definitions of
 // these structures. We just include them copied from <bits/termios.h>.
 #if defined(__powerpc__)
@@ -256,6 +266,7 @@ struct ltchars {
 #include <linux/nsfs.h>
 #include <linux/perf_event.h>
 #include <linux/pps.h>
+#include <linux/ptp_clock.h>
 #include <linux/ptrace.h>
 #include <linux/random.h>
 #include <linux/reboot.h>
@@ -527,6 +538,7 @@ ccflags="$@"
 		$2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|TCP|MCAST|EVFILT|NOTE|SHUT|PROT|MAP|MREMAP|MFD|T?PACKET|MSG|SCM|MCL|DT|MADV|PR|LOCAL|TCPOPT|UDP)_/ ||
 		$2 ~ /^NFC_(GENL|PROTO|COMM|RF|SE|DIRECTION|LLCP|SOCKPROTO)_/ ||
 		$2 ~ /^NFC_.*_(MAX)?SIZE$/ ||
+		$2 ~ /^PTP_/ ||
 		$2 ~ /^RAW_PAYLOAD_/ ||
 		$2 ~ /^[US]F_/ ||
 		$2 ~ /^TP_STATUS_/ ||
diff --git a/source/vendor/golang.org/x/sys/unix/syscall_linux.go b/source/vendor/golang.org/x/sys/unix/syscall_linux.go
index f08abd4..230a945 100644
--- a/source/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/source/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -1860,6 +1860,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
 //sys	ClockAdjtime(clockid int32, buf *Timex) (state int, err error)
 //sys	ClockGetres(clockid int32, res *Timespec) (err error)
 //sys	ClockGettime(clockid int32, time *Timespec) (err error)
+//sys	ClockSettime(clockid int32, time *Timespec) (err error)
 //sys	ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error)
 //sys	Close(fd int) (err error)
 //sys	CloseRange(first uint, last uint, flags uint) (err error)
diff --git a/source/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go b/source/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
index 312ae6a..7bf5c04 100644
--- a/source/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
+++ b/source/vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
@@ -768,6 +768,15 @@ func Munmap(b []byte) (err error) {
 	return mapper.Munmap(b)
 }
 
+func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) {
+	xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset)
+	return unsafe.Pointer(xaddr), err
+}
+
+func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) {
+	return mapper.munmap(uintptr(addr), length)
+}
+
 //sys   Gethostname(buf []byte) (err error) = SYS___GETHOSTNAME_A
 //sysnb	Getgid() (gid int)
 //sysnb	Getpid() (pid int)
@@ -816,10 +825,10 @@ func Lstat(path string, stat *Stat_t) (err error) {
 // for checking symlinks begins with $VERSION/ $SYSNAME/ $SYSSYMR/ $SYSSYMA/
 func isSpecialPath(path []byte) (v bool) {
 	var special = [4][8]byte{
-		[8]byte{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'},
-		[8]byte{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'},
-		[8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'},
-		[8]byte{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}}
+		{'V', 'E', 'R', 'S', 'I', 'O', 'N', '/'},
+		{'S', 'Y', 'S', 'N', 'A', 'M', 'E', '/'},
+		{'S', 'Y', 'S', 'S', 'Y', 'M', 'R', '/'},
+		{'S', 'Y', 'S', 'S', 'Y', 'M', 'A', '/'}}
 
 	var i, j int
 	for i = 0; i < len(special); i++ {
@@ -3115,3 +3124,90 @@ func legacy_Mkfifoat(dirfd int, path string, mode uint32) (err error) {
 //sys	Posix_openpt(oflag int) (fd int, err error) = SYS_POSIX_OPENPT
 //sys	Grantpt(fildes int) (rc int, err error) = SYS_GRANTPT
 //sys	Unlockpt(fildes int) (rc int, err error) = SYS_UNLOCKPT
+
+func fcntlAsIs(fd uintptr, cmd int, arg uintptr) (val int, err error) {
+	runtime.EnterSyscall()
+	r0, e2, e1 := CallLeFuncWithErr(GetZosLibVec()+SYS_FCNTL<<4, uintptr(fd), uintptr(cmd), arg)
+	runtime.ExitSyscall()
+	val = int(r0)
+	if int64(r0) == -1 {
+		err = errnoErr2(e1, e2)
+	}
+	return
+}
+
+func Fcntl(fd uintptr, cmd int, op interface{}) (ret int, err error) {
+	switch op.(type) {
+	case *Flock_t:
+		err = FcntlFlock(fd, cmd, op.(*Flock_t))
+		if err != nil {
+			ret = -1
+		}
+		return
+	case int:
+		return FcntlInt(fd, cmd, op.(int))
+	case *F_cnvrt:
+		return fcntlAsIs(fd, cmd, uintptr(unsafe.Pointer(op.(*F_cnvrt))))
+	case unsafe.Pointer:
+		return fcntlAsIs(fd, cmd, uintptr(op.(unsafe.Pointer)))
+	default:
+		return -1, EINVAL
+	}
+	return
+}
+
+func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+	if raceenabled {
+		raceReleaseMerge(unsafe.Pointer(&ioSync))
+	}
+	return sendfile(outfd, infd, offset, count)
+}
+
+func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
+	// TODO: use LE call instead if the call is implemented
+	originalOffset, err := Seek(infd, 0, SEEK_CUR)
+	if err != nil {
+		return -1, err
+	}
+	//start reading data from in_fd
+	if offset != nil {
+		_, err := Seek(infd, *offset, SEEK_SET)
+		if err != nil {
+			return -1, err
+		}
+	}
+
+	buf := make([]byte, count)
+	readBuf := make([]byte, 0)
+	var n int = 0
+	for i := 0; i < count; i += n {
+		n, err := Read(infd, buf)
+		if n == 0 {
+			if err != nil {
+				return -1, err
+			} else { // EOF
+				break
+			}
+		}
+		readBuf = append(readBuf, buf...)
+		buf = buf[0:0]
+	}
+
+	n2, err := Write(outfd, readBuf)
+	if err != nil {
+		return -1, err
+	}
+
+	//When sendfile() returns, this variable will be set to the
+	// offset of the byte following the last byte that was read.
+	if offset != nil {
+		*offset = *offset + int64(n)
+		// If offset is not NULL, then sendfile() does not modify the file
+		// offset of in_fd
+		_, err := Seek(infd, originalOffset, SEEK_SET)
+		if err != nil {
+			return -1, err
+		}
+	}
+	return n2, nil
+}
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux.go
index de3b462..ccba391 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -2625,6 +2625,28 @@ const (
 	PR_UNALIGN_NOPRINT                          = 0x1
 	PR_UNALIGN_SIGBUS                           = 0x2
 	PSTOREFS_MAGIC                              = 0x6165676c
+	PTP_CLK_MAGIC                               = '='
+	PTP_ENABLE_FEATURE                          = 0x1
+	PTP_EXTTS_EDGES                             = 0x6
+	PTP_EXTTS_EVENT_VALID                       = 0x1
+	PTP_EXTTS_V1_VALID_FLAGS                    = 0x7
+	PTP_EXTTS_VALID_FLAGS                       = 0x1f
+	PTP_EXT_OFFSET                              = 0x10
+	PTP_FALLING_EDGE                            = 0x4
+	PTP_MAX_SAMPLES                             = 0x19
+	PTP_PEROUT_DUTY_CYCLE                       = 0x2
+	PTP_PEROUT_ONE_SHOT                         = 0x1
+	PTP_PEROUT_PHASE                            = 0x4
+	PTP_PEROUT_V1_VALID_FLAGS                   = 0x0
+	PTP_PEROUT_VALID_FLAGS                      = 0x7
+	PTP_PIN_GETFUNC                             = 0xc0603d06
+	PTP_PIN_GETFUNC2                            = 0xc0603d0f
+	PTP_RISING_EDGE                             = 0x2
+	PTP_STRICT_FLAGS                            = 0x8
+	PTP_SYS_OFFSET_EXTENDED                     = 0xc4c03d09
+	PTP_SYS_OFFSET_EXTENDED2                    = 0xc4c03d12
+	PTP_SYS_OFFSET_PRECISE                      = 0xc0403d08
+	PTP_SYS_OFFSET_PRECISE2                     = 0xc0403d11
 	PTRACE_ATTACH                               = 0x10
 	PTRACE_CONT                                 = 0x7
 	PTRACE_DETACH                               = 0x11
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index 8aa6d77..0c00cb3 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETFPXREGS                = 0x12
 	PTRACE_GET_THREAD_AREA           = 0x19
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index da428f4..dfb3645 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_ARCH_PRCTL                = 0x1e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETFPXREGS                = 0x12
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index bf45bfe..d46dcf7 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_GETCRUNCHREGS             = 0x19
 	PTRACE_GETFDPIC                  = 0x1f
 	PTRACE_GETFDPIC_EXEC             = 0x0
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index 71c6716..3af3248 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -240,6 +240,20 @@ const (
 	PROT_BTI                         = 0x10
 	PROT_MTE                         = 0x20
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_PEEKMTETAGS               = 0x21
 	PTRACE_POKEMTETAGS               = 0x22
 	PTRACE_SYSEMU                    = 0x1f
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 9476628..292bcf0 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -238,6 +238,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_SYSEMU                    = 0x1f
 	PTRACE_SYSEMU_SINGLESTEP         = 0x20
 	RLIMIT_AS                        = 0x9
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index b9e85f3..782b711 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GET_THREAD_AREA           = 0x19
 	PTRACE_GET_THREAD_AREA_3264      = 0xc4
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index a48b68a..84973fd 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GET_THREAD_AREA           = 0x19
 	PTRACE_GET_THREAD_AREA_3264      = 0xc4
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ea00e85..6d9cbc3 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GET_THREAD_AREA           = 0x19
 	PTRACE_GET_THREAD_AREA_3264      = 0xc4
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 91c6468..5f9fedb 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GET_THREAD_AREA           = 0x19
 	PTRACE_GET_THREAD_AREA_3264      = 0xc4
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 8cbf38d..bb0026e 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PROT_SAO                         = 0x10
 	PR_SET_PTRACER_ANY               = 0xffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETEVRREGS                = 0x14
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETREGS64                 = 0x16
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index a2df734..46120db 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PROT_SAO                         = 0x10
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETEVRREGS                = 0x14
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETREGS64                 = 0x16
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index 2479137..5c95163 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -237,6 +237,20 @@ const (
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PROT_SAO                         = 0x10
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETEVRREGS                = 0x14
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETREGS64                 = 0x16
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index d265f14..11a84d5 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_GETFDPIC                  = 0x21
 	PTRACE_GETFDPIC_EXEC             = 0x0
 	PTRACE_GETFDPIC_INTERP           = 0x1
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 3f2d644..f78c461 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -234,6 +234,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x7434
 	PPPIOCXFERUNIT                   = 0x744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x80503d01
+	PTP_CLOCK_GETCAPS2               = 0x80503d0a
+	PTP_ENABLE_PPS                   = 0x40043d04
+	PTP_ENABLE_PPS2                  = 0x40043d0d
+	PTP_EXTTS_REQUEST                = 0x40103d02
+	PTP_EXTTS_REQUEST2               = 0x40103d0b
+	PTP_MASK_CLEAR_ALL               = 0x3d13
+	PTP_MASK_EN_SINGLE               = 0x40043d14
+	PTP_PEROUT_REQUEST               = 0x40383d03
+	PTP_PEROUT_REQUEST2              = 0x40383d0c
+	PTP_PIN_SETFUNC                  = 0x40603d07
+	PTP_PIN_SETFUNC2                 = 0x40603d10
+	PTP_SYS_OFFSET                   = 0x43403d05
+	PTP_SYS_OFFSET2                  = 0x43403d0e
 	PTRACE_DISABLE_TE                = 0x5010
 	PTRACE_ENABLE_TE                 = 0x5009
 	PTRACE_GET_LAST_BREAK            = 0x5006
diff --git a/source/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/source/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index 5d8b727..aeb777c 100644
--- a/source/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/source/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -239,6 +239,20 @@ const (
 	PPPIOCUNBRIDGECHAN               = 0x20007434
 	PPPIOCXFERUNIT                   = 0x2000744e
 	PR_SET_PTRACER_ANY               = 0xffffffffffffffff
+	PTP_CLOCK_GETCAPS                = 0x40503d01
+	PTP_CLOCK_GETCAPS2               = 0x40503d0a
+	PTP_ENABLE_PPS                   = 0x80043d04
+	PTP_ENABLE_PPS2                  = 0x80043d0d
+	PTP_EXTTS_REQUEST                = 0x80103d02
+	PTP_EXTTS_REQUEST2               = 0x80103d0b
+	PTP_MASK_CLEAR_ALL               = 0x20003d13
+	PTP_MASK_EN_SINGLE               = 0x80043d14
+	PTP_PEROUT_REQUEST               = 0x80383d03
+	PTP_PEROUT_REQUEST2              = 0x80383d0c
+	PTP_PIN_SETFUNC                  = 0x80603d07
+	PTP_PIN_SETFUNC2                 = 0x80603d10
+	PTP_SYS_OFFSET                   = 0x83403d05
+	PTP_SYS_OFFSET2                  = 0x83403d0e
 	PTRACE_GETFPAREGS                = 0x14
 	PTRACE_GETFPREGS                 = 0xe
 	PTRACE_GETFPREGS64               = 0x19
diff --git a/source/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/source/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index af30da5..5cc1e8e 100644
--- a/source/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/source/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -592,6 +592,16 @@ func ClockGettime(clockid int32, time *Timespec) (err error) {
 
 // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
 
+func ClockSettime(clockid int32, time *Timespec) (err error) {
+	_, _, e1 := Syscall(SYS_CLOCK_SETTIME, uintptr(clockid), uintptr(unsafe.Pointer(time)), 0)
+	if e1 != 0 {
+		err = errnoErr(e1)
+	}
+	return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
 func ClockNanosleep(clockid int32, flags int, request *Timespec, remain *Timespec) (err error) {
 	_, _, e1 := Syscall6(SYS_CLOCK_NANOSLEEP, uintptr(clockid), uintptr(flags), uintptr(unsafe.Pointer(request)), uintptr(unsafe.Pointer(remain)), 0, 0)
 	if e1 != 0 {
diff --git a/source/vendor/golang.org/x/sys/unix/ztypes_linux.go b/source/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 3a69e45..8daaf3f 100644
--- a/source/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/source/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -1752,12 +1752,6 @@ const (
 	IFLA_IPVLAN_UNSPEC                         = 0x0
 	IFLA_IPVLAN_MODE                           = 0x1
 	IFLA_IPVLAN_FLAGS                          = 0x2
-	NETKIT_NEXT                                = -0x1
-	NETKIT_PASS                                = 0x0
-	NETKIT_DROP                                = 0x2
-	NETKIT_REDIRECT                            = 0x7
-	NETKIT_L2                                  = 0x0
-	NETKIT_L3                                  = 0x1
 	IFLA_NETKIT_UNSPEC                         = 0x0
 	IFLA_NETKIT_PEER_INFO                      = 0x1
 	IFLA_NETKIT_PRIMARY                        = 0x2
@@ -1796,6 +1790,7 @@ const (
 	IFLA_VXLAN_DF                              = 0x1d
 	IFLA_VXLAN_VNIFILTER                       = 0x1e
 	IFLA_VXLAN_LOCALBYPASS                     = 0x1f
+	IFLA_VXLAN_LABEL_POLICY                    = 0x20
 	IFLA_GENEVE_UNSPEC                         = 0x0
 	IFLA_GENEVE_ID                             = 0x1
 	IFLA_GENEVE_REMOTE                         = 0x2
@@ -1825,6 +1820,8 @@ const (
 	IFLA_GTP_ROLE                              = 0x4
 	IFLA_GTP_CREATE_SOCKETS                    = 0x5
 	IFLA_GTP_RESTART_COUNT                     = 0x6
+	IFLA_GTP_LOCAL                             = 0x7
+	IFLA_GTP_LOCAL6                            = 0x8
 	IFLA_BOND_UNSPEC                           = 0x0
 	IFLA_BOND_MODE                             = 0x1
 	IFLA_BOND_ACTIVE_SLAVE                     = 0x2
@@ -1857,6 +1854,7 @@ const (
 	IFLA_BOND_AD_LACP_ACTIVE                   = 0x1d
 	IFLA_BOND_MISSED_MAX                       = 0x1e
 	IFLA_BOND_NS_IP6_TARGET                    = 0x1f
+	IFLA_BOND_COUPLED_CONTROL                  = 0x20
 	IFLA_BOND_AD_INFO_UNSPEC                   = 0x0
 	IFLA_BOND_AD_INFO_AGGREGATOR               = 0x1
 	IFLA_BOND_AD_INFO_NUM_PORTS                = 0x2
@@ -1925,6 +1923,7 @@ const (
 	IFLA_HSR_SEQ_NR                            = 0x5
 	IFLA_HSR_VERSION                           = 0x6
 	IFLA_HSR_PROTOCOL                          = 0x7
+	IFLA_HSR_INTERLINK                         = 0x8
 	IFLA_STATS_UNSPEC                          = 0x0
 	IFLA_STATS_LINK_64                         = 0x1
 	IFLA_STATS_LINK_XSTATS                     = 0x2
@@ -1977,6 +1976,15 @@ const (
 	IFLA_DSA_MASTER                            = 0x1
 )
 
+const (
+	NETKIT_NEXT     = -0x1
+	NETKIT_PASS     = 0x0
+	NETKIT_DROP     = 0x2
+	NETKIT_REDIRECT = 0x7
+	NETKIT_L2       = 0x0
+	NETKIT_L3       = 0x1
+)
+
 const (
 	NF_INET_PRE_ROUTING  = 0x0
 	NF_INET_LOCAL_IN     = 0x1
@@ -4110,6 +4118,106 @@ type EthtoolDrvinfo struct {
 	Regdump_len  uint32
 }
 
+type EthtoolTsInfo struct {
+	Cmd             uint32
+	So_timestamping uint32
+	Phc_index       int32
+	Tx_types        uint32
+	Tx_reserved     [3]uint32
+	Rx_filters      uint32
+	Rx_reserved     [3]uint32
+}
+
+type HwTstampConfig struct {
+	Flags     int32
+	Tx_type   int32
+	Rx_filter int32
+}
+
+const (
+	HWTSTAMP_FILTER_NONE            = 0x0
+	HWTSTAMP_FILTER_ALL             = 0x1
+	HWTSTAMP_FILTER_SOME            = 0x2
+	HWTSTAMP_FILTER_PTP_V1_L4_EVENT = 0x3
+	HWTSTAMP_FILTER_PTP_V2_L4_EVENT = 0x6
+	HWTSTAMP_FILTER_PTP_V2_L2_EVENT = 0x9
+	HWTSTAMP_FILTER_PTP_V2_EVENT    = 0xc
+)
+
+const (
+	HWTSTAMP_TX_OFF          = 0x0
+	HWTSTAMP_TX_ON           = 0x1
+	HWTSTAMP_TX_ONESTEP_SYNC = 0x2
+)
+
+type (
+	PtpClockCaps struct {
+		Max_adj            int32
+		N_alarm            int32
+		N_ext_ts           int32
+		N_per_out          int32
+		Pps                int32
+		N_pins             int32
+		Cross_timestamping int32
+		Adjust_phase       int32
+		Max_phase_adj      int32
+		Rsv                [11]int32
+	}
+	PtpClockTime struct {
+		Sec      int64
+		Nsec     uint32
+		Reserved uint32
+	}
+	PtpExttsEvent struct {
+		T     PtpClockTime
+		Index uint32
+		Flags uint32
+		Rsv   [2]uint32
+	}
+	PtpExttsRequest struct {
+		Index uint32
+		Flags uint32
+		Rsv   [2]uint32
+	}
+	PtpPeroutRequest struct {
+		StartOrPhase PtpClockTime
+		Period       PtpClockTime
+		Index        uint32
+		Flags        uint32
+		On           PtpClockTime
+	}
+	PtpPinDesc struct {
+		Name  [64]byte
+		Index uint32
+		Func  uint32
+		Chan  uint32
+		Rsv   [5]uint32
+	}
+	PtpSysOffset struct {
+		Samples uint32
+		Rsv     [3]uint32
+		Ts      [51]PtpClockTime
+	}
+	PtpSysOffsetExtended struct {
+		Samples uint32
+		Rsv     [3]uint32
+		Ts      [25][3]PtpClockTime
+	}
+	PtpSysOffsetPrecise struct {
+		Device   PtpClockTime
+		Realtime PtpClockTime
+		Monoraw  PtpClockTime
+		Rsv      [4]uint32
+	}
+)
+
+const (
+	PTP_PF_NONE    = 0x0
+	PTP_PF_EXTTS   = 0x1
+	PTP_PF_PEROUT  = 0x2
+	PTP_PF_PHYSYNC = 0x3
+)
+
 type (
 	HIDRawReportDescriptor struct {
 		Size  uint32
diff --git a/source/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go b/source/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
index d9a13af..2e5d5a4 100644
--- a/source/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
+++ b/source/vendor/golang.org/x/sys/unix/ztypes_zos_s390x.go
@@ -377,6 +377,12 @@ type Flock_t struct {
 	Pid    int32
 }
 
+type F_cnvrt struct {
+	Cvtcmd int32
+	Pccsid int16
+	Fccsid int16
+}
+
 type Termios struct {
 	Cflag uint32
 	Iflag uint32
diff --git a/source/vendor/golang.org/x/sys/windows/syscall_windows.go b/source/vendor/golang.org/x/sys/windows/syscall_windows.go
index 5cee9a3..4510bfc 100644
--- a/source/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/source/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -725,20 +725,12 @@ func DurationSinceBoot() time.Duration {
 }
 
 func Ftruncate(fd Handle, length int64) (err error) {
-	curoffset, e := Seek(fd, 0, 1)
-	if e != nil {
-		return e
-	}
-	defer Seek(fd, curoffset, 0)
-	_, e = Seek(fd, length, 0)
-	if e != nil {
-		return e
+	type _FILE_END_OF_FILE_INFO struct {
+		EndOfFile int64
 	}
-	e = SetEndOfFile(fd)
-	if e != nil {
-		return e
-	}
-	return nil
+	var info _FILE_END_OF_FILE_INFO
+	info.EndOfFile = length
+	return SetFileInformationByHandle(fd, FileEndOfFileInfo, (*byte)(unsafe.Pointer(&info)), uint32(unsafe.Sizeof(info)))
 }
 
 func Gettimeofday(tv *Timeval) (err error) {
@@ -894,6 +886,11 @@ const socket_error = uintptr(^uint32(0))
 //sys	GetACP() (acp uint32) = kernel32.GetACP
 //sys	MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
 //sys	getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
+//sys   GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
+//sys   GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
+//sys   NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
+//sys   NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
+//sys   CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
 
 // For testing: clients can set this flag to force
 // creation of IPv6 sockets to return EAFNOSUPPORT.
@@ -1685,13 +1682,16 @@ func (s NTStatus) Error() string {
 // do not use NTUnicodeString, and instead UTF16PtrFromString should be used for
 // the more common *uint16 string type.
 func NewNTUnicodeString(s string) (*NTUnicodeString, error) {
-	var u NTUnicodeString
-	s16, err := UTF16PtrFromString(s)
+	s16, err := UTF16FromString(s)
 	if err != nil {
 		return nil, err
 	}
-	RtlInitUnicodeString(&u, s16)
-	return &u, nil
+	n := uint16(len(s16) * 2)
+	return &NTUnicodeString{
+		Length:        n - 2, // subtract 2 bytes for the NULL terminator
+		MaximumLength: n,
+		Buffer:        &s16[0],
+	}, nil
 }
 
 // Slice returns a uint16 slice that aliases the data in the NTUnicodeString.
diff --git a/source/vendor/golang.org/x/sys/windows/types_windows.go b/source/vendor/golang.org/x/sys/windows/types_windows.go
index 7b97a15..51311e2 100644
--- a/source/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/source/vendor/golang.org/x/sys/windows/types_windows.go
@@ -2203,6 +2203,132 @@ const (
 	IfOperStatusLowerLayerDown = 7
 )
 
+const (
+	IF_MAX_PHYS_ADDRESS_LENGTH = 32
+	IF_MAX_STRING_SIZE         = 256
+)
+
+// MIB_IF_ENTRY_LEVEL enumeration from netioapi.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/nf-netioapi-getifentry2ex.
+const (
+	MibIfEntryNormal                  = 0
+	MibIfEntryNormalWithoutStatistics = 2
+)
+
+// MIB_NOTIFICATION_TYPE enumeration from netioapi.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ne-netioapi-mib_notification_type.
+const (
+	MibParameterNotification = 0
+	MibAddInstance           = 1
+	MibDeleteInstance        = 2
+	MibInitialNotification   = 3
+)
+
+// MibIfRow2 stores information about a particular interface. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_if_row2.
+type MibIfRow2 struct {
+	InterfaceLuid               uint64
+	InterfaceIndex              uint32
+	InterfaceGuid               GUID
+	Alias                       [IF_MAX_STRING_SIZE + 1]uint16
+	Description                 [IF_MAX_STRING_SIZE + 1]uint16
+	PhysicalAddressLength       uint32
+	PhysicalAddress             [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
+	PermanentPhysicalAddress    [IF_MAX_PHYS_ADDRESS_LENGTH]uint8
+	Mtu                         uint32
+	Type                        uint32
+	TunnelType                  uint32
+	MediaType                   uint32
+	PhysicalMediumType          uint32
+	AccessType                  uint32
+	DirectionType               uint32
+	InterfaceAndOperStatusFlags uint8
+	OperStatus                  uint32
+	AdminStatus                 uint32
+	MediaConnectState           uint32
+	NetworkGuid                 GUID
+	ConnectionType              uint32
+	TransmitLinkSpeed           uint64
+	ReceiveLinkSpeed            uint64
+	InOctets                    uint64
+	InUcastPkts                 uint64
+	InNUcastPkts                uint64
+	InDiscards                  uint64
+	InErrors                    uint64
+	InUnknownProtos             uint64
+	InUcastOctets               uint64
+	InMulticastOctets           uint64
+	InBroadcastOctets           uint64
+	OutOctets                   uint64
+	OutUcastPkts                uint64
+	OutNUcastPkts               uint64
+	OutDiscards                 uint64
+	OutErrors                   uint64
+	OutUcastOctets              uint64
+	OutMulticastOctets          uint64
+	OutBroadcastOctets          uint64
+	OutQLen                     uint64
+}
+
+// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
+type MibUnicastIpAddressRow struct {
+	Address            RawSockaddrInet6 // SOCKADDR_INET union
+	InterfaceLuid      uint64
+	InterfaceIndex     uint32
+	PrefixOrigin       uint32
+	SuffixOrigin       uint32
+	ValidLifetime      uint32
+	PreferredLifetime  uint32
+	OnLinkPrefixLength uint8
+	SkipAsSource       uint8
+	DadState           uint32
+	ScopeId            uint32
+	CreationTimeStamp  Filetime
+}
+
+const ScopeLevelCount = 16
+
+// MIB_IPINTERFACE_ROW stores interface management information for a particular IP address family on a network interface.
+// See https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipinterface_row.
+type MibIpInterfaceRow struct {
+	Family                               uint16
+	InterfaceLuid                        uint64
+	InterfaceIndex                       uint32
+	MaxReassemblySize                    uint32
+	InterfaceIdentifier                  uint64
+	MinRouterAdvertisementInterval       uint32
+	MaxRouterAdvertisementInterval       uint32
+	AdvertisingEnabled                   uint8
+	ForwardingEnabled                    uint8
+	WeakHostSend                         uint8
+	WeakHostReceive                      uint8
+	UseAutomaticMetric                   uint8
+	UseNeighborUnreachabilityDetection   uint8
+	ManagedAddressConfigurationSupported uint8
+	OtherStatefulConfigurationSupported  uint8
+	AdvertiseDefaultRoute                uint8
+	RouterDiscoveryBehavior              uint32
+	DadTransmits                         uint32
+	BaseReachableTime                    uint32
+	RetransmitTime                       uint32
+	PathMtuDiscoveryTimeout              uint32
+	LinkLocalAddressBehavior             uint32
+	LinkLocalAddressTimeout              uint32
+	ZoneIndices                          [ScopeLevelCount]uint32
+	SitePrefixLength                     uint32
+	Metric                               uint32
+	NlMtu                                uint32
+	Connected                            uint8
+	SupportsWakeUpPatterns               uint8
+	SupportsNeighborDiscovery            uint8
+	SupportsRouterDiscovery              uint8
+	ReachableTime                        uint32
+	TransmitOffload                      uint32
+	ReceiveOffload                       uint32
+	DisableDefaultRoutes                 uint8
+}
+
 // Console related constants used for the mode parameter to SetConsoleMode. See
 // https://docs.microsoft.com/en-us/windows/console/setconsolemode for details.
 
diff --git a/source/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/source/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 4c2e1bd..6f52528 100644
--- a/source/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/source/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -181,10 +181,15 @@ var (
 	procDnsRecordListFree                                    = moddnsapi.NewProc("DnsRecordListFree")
 	procDwmGetWindowAttribute                                = moddwmapi.NewProc("DwmGetWindowAttribute")
 	procDwmSetWindowAttribute                                = moddwmapi.NewProc("DwmSetWindowAttribute")
+	procCancelMibChangeNotify2                               = modiphlpapi.NewProc("CancelMibChangeNotify2")
 	procGetAdaptersAddresses                                 = modiphlpapi.NewProc("GetAdaptersAddresses")
 	procGetAdaptersInfo                                      = modiphlpapi.NewProc("GetAdaptersInfo")
 	procGetBestInterfaceEx                                   = modiphlpapi.NewProc("GetBestInterfaceEx")
 	procGetIfEntry                                           = modiphlpapi.NewProc("GetIfEntry")
+	procGetIfEntry2Ex                                        = modiphlpapi.NewProc("GetIfEntry2Ex")
+	procGetUnicastIpAddressEntry                             = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
+	procNotifyIpInterfaceChange                              = modiphlpapi.NewProc("NotifyIpInterfaceChange")
+	procNotifyUnicastIpAddressChange                         = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
 	procAddDllDirectory                                      = modkernel32.NewProc("AddDllDirectory")
 	procAssignProcessToJobObject                             = modkernel32.NewProc("AssignProcessToJobObject")
 	procCancelIo                                             = modkernel32.NewProc("CancelIo")
@@ -1606,6 +1611,14 @@ func DwmSetWindowAttribute(hwnd HWND, attribute uint32, value unsafe.Pointer, si
 	return
 }
 
+func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procCancelMibChangeNotify2.Addr(), uintptr(notificationHandle))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
 func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
 	r0, _, _ := syscall.Syscall6(procGetAdaptersAddresses.Addr(), 5, uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)), 0)
 	if r0 != 0 {
@@ -1638,6 +1651,46 @@ func GetIfEntry(pIfRow *MibIfRow) (errcode error) {
 	return
 }
 
+func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetIfEntry2Ex.Addr(), uintptr(level), uintptr(unsafe.Pointer(row)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
+	r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+	var _p0 uint32
+	if initialNotification {
+		_p0 = 1
+	}
+	r0, _, _ := syscall.SyscallN(procNotifyIpInterfaceChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
+func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+	var _p0 uint32
+	if initialNotification {
+		_p0 = 1
+	}
+	r0, _, _ := syscall.SyscallN(procNotifyUnicastIpAddressChange.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+	if r0 != 0 {
+		errcode = syscall.Errno(r0)
+	}
+	return
+}
+
 func AddDllDirectory(path *uint16) (cookie uintptr, err error) {
 	r0, _, e1 := syscall.Syscall(procAddDllDirectory.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0)
 	cookie = uintptr(r0)
diff --git a/source/vendor/modules.txt b/source/vendor/modules.txt
index f3fa8cb..219eadc 100644
--- a/source/vendor/modules.txt
+++ b/source/vendor/modules.txt
@@ -14,6 +14,33 @@ github.com/charmbracelet/log
 ## explicit; go 1.18
 github.com/charmbracelet/x/ansi
 github.com/charmbracelet/x/ansi/parser
+# github.com/evanw/esbuild v0.24.0
+## explicit; go 1.13
+github.com/evanw/esbuild/internal/api_helpers
+github.com/evanw/esbuild/internal/ast
+github.com/evanw/esbuild/internal/bundler
+github.com/evanw/esbuild/internal/cache
+github.com/evanw/esbuild/internal/compat
+github.com/evanw/esbuild/internal/config
+github.com/evanw/esbuild/internal/css_ast
+github.com/evanw/esbuild/internal/css_lexer
+github.com/evanw/esbuild/internal/css_parser
+github.com/evanw/esbuild/internal/css_printer
+github.com/evanw/esbuild/internal/fs
+github.com/evanw/esbuild/internal/graph
+github.com/evanw/esbuild/internal/helpers
+github.com/evanw/esbuild/internal/js_ast
+github.com/evanw/esbuild/internal/js_lexer
+github.com/evanw/esbuild/internal/js_parser
+github.com/evanw/esbuild/internal/js_printer
+github.com/evanw/esbuild/internal/linker
+github.com/evanw/esbuild/internal/logger
+github.com/evanw/esbuild/internal/renamer
+github.com/evanw/esbuild/internal/resolver
+github.com/evanw/esbuild/internal/runtime
+github.com/evanw/esbuild/internal/sourcemap
+github.com/evanw/esbuild/internal/xxhash
+github.com/evanw/esbuild/pkg/api
 # github.com/go-logfmt/logfmt v0.6.0
 ## explicit; go 1.17
 github.com/go-logfmt/logfmt
@@ -42,37 +69,46 @@ github.com/tdewolff/parse/v2/css
 # github.com/volker-schukai/tokenizer v1.0.0
 ## explicit; go 1.13
 github.com/volker-schukai/tokenizer
+# gitlab.schukai.com/oss/libraries/go/application/configuration.git v1.22.9
+## explicit; go 1.22.0
 # gitlab.schukai.com/oss/libraries/go/application/xflags v1.16.3
 ## explicit; go 1.22
-gitlab.schukai.com/oss/libraries/go/application/xflags
+# gitlab.schukai.com/oss/libraries/go/application/xflags.git v1.16.5
+## explicit; go 1.22
+gitlab.schukai.com/oss/libraries/go/application/xflags.git
 # gitlab.schukai.com/oss/libraries/go/markup/html v0.4.6
 ## explicit; go 1.22
-gitlab.schukai.com/oss/libraries/go/markup/html/engine
+# gitlab.schukai.com/oss/libraries/go/markup/html.git v0.4.7
+## explicit; go 1.22
+gitlab.schukai.com/oss/libraries/go/markup/html.git/engine
+# gitlab.schukai.com/oss/libraries/go/services/job-queues.git v1.20.2
+## explicit; go 1.22
 # gitlab.schukai.com/oss/libraries/go/utilities/data.git v0.2.2
 ## explicit; go 1.22
 gitlab.schukai.com/oss/libraries/go/utilities/data.git
 # gitlab.schukai.com/oss/libraries/go/utilities/pathfinder v0.9.4
 ## explicit; go 1.21
-gitlab.schukai.com/oss/libraries/go/utilities/pathfinder
 # gitlab.schukai.com/oss/libraries/go/utilities/pathfinder.git v0.9.5
 ## explicit; go 1.21
 gitlab.schukai.com/oss/libraries/go/utilities/pathfinder.git
-# golang.org/x/crypto v0.28.0
+# gitlab.schukai.com/oss/libraries/go/utilities/watch.git v0.4.2
+## explicit; go 1.22
+# golang.org/x/crypto v0.29.0
 ## explicit; go 1.20
 golang.org/x/crypto/bcrypt
 golang.org/x/crypto/blowfish
-# golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c
+# golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f
 ## explicit; go 1.22.0
 golang.org/x/exp/constraints
 golang.org/x/exp/slices
 golang.org/x/exp/slog
 golang.org/x/exp/slog/internal
 golang.org/x/exp/slog/internal/buffer
-# golang.org/x/net v0.30.0
+# golang.org/x/net v0.31.0
 ## explicit; go 1.18
 golang.org/x/net/html
 golang.org/x/net/html/atom
-# golang.org/x/sys v0.26.0
+# golang.org/x/sys v0.27.0
 ## explicit; go 1.18
 golang.org/x/sys/unix
 golang.org/x/sys/windows
-- 
GitLab