diff --git a/.gitignore b/.gitignore index b791b8bc3a047f..e8a8c92992968e 100644 --- a/.gitignore +++ b/.gitignore @@ -16,6 +16,7 @@ .vscode/clang* .vscode/cpp* .zig-cache +.bake-debug *.a *.bc *.big diff --git a/build.zig b/build.zig index 054fc4dd944395..ce8624f7e5306c 100644 --- a/build.zig +++ b/build.zig @@ -561,6 +561,7 @@ fn addInternalPackages(b: *Build, obj: *Compile, opts: *BunBuildOptions) void { .{ .file = "bun-error/index.js", .enable = opts.shouldEmbedCode() }, .{ .file = "bun-error/bun-error.css", .enable = opts.shouldEmbedCode() }, .{ .file = "fallback-decoder.js", .enable = opts.shouldEmbedCode() }, + .{ .file = "node-fallbacks/react-refresh.js", .enable = opts.shouldEmbedCode() }, .{ .file = "node-fallbacks/assert.js", .enable = opts.shouldEmbedCode() }, .{ .file = "node-fallbacks/buffer.js", .enable = opts.shouldEmbedCode() }, .{ .file = "node-fallbacks/console.js", .enable = opts.shouldEmbedCode() }, diff --git a/bun.lock b/bun.lock index 4729944dcf9be9..81b4fa1253a77d 100644 --- a/bun.lock +++ b/bun.lock @@ -10,8 +10,8 @@ "@typescript-eslint/eslint-plugin": "^7.11.0", "@typescript-eslint/parser": "^7.11.0", "@vscode/debugadapter": "^1.65.0", - "autoprefixer": "^10.4.20", - "caniuse-lite": "^1.0.30001660", + "autoprefixer": "^10.4.19", + "caniuse-lite": "^1.0.30001620", "esbuild": "^0.21.4", "eslint": "^9.4.0", "eslint-config-prettier": "^9.1.0", @@ -45,21 +45,21 @@ "packages": { "@biomejs/biome": ["@biomejs/biome@1.8.3", "", { "optionalDependencies": { "@biomejs/cli-darwin-arm64": "1.8.3", "@biomejs/cli-darwin-x64": "1.8.3", "@biomejs/cli-linux-arm64": "1.8.3", "@biomejs/cli-linux-arm64-musl": "1.8.3", "@biomejs/cli-linux-x64": "1.8.3", "@biomejs/cli-linux-x64-musl": "1.8.3", "@biomejs/cli-win32-arm64": "1.8.3", "@biomejs/cli-win32-x64": "1.8.3" }, "bin": { "biome": "bin/biome" } }, "sha512-/uUV3MV+vyAczO+vKrPdOW0Iaet7UnJMU4bNMinggGJTAnBPjCoLEYcyYtYHNnUNYlv4xZMH6hVIQCAozq8d5w=="], - "@biomejs/cli-darwin-arm64": ["@biomejs/cli-darwin-arm64@1.8.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-9DYOjclFpKrH/m1Oz75SSExR8VKvNSSsLnVIqdnKexj6NwmiMlKk94Wa1kZEdv6MCOHGHgyyoV57Cw8WzL5n3A=="], + "@biomejs/cli-darwin-arm64": ["@biomejs/cli-darwin-arm64@1.8.3", "", { "os":"darwin", "cpu":"arm64" }, "sha512-9DYOjclFpKrH/m1Oz75SSExR8VKvNSSsLnVIqdnKexj6NwmiMlKk94Wa1kZEdv6MCOHGHgyyoV57Cw8WzL5n3A=="], - "@biomejs/cli-darwin-x64": ["@biomejs/cli-darwin-x64@1.8.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-UeW44L/AtbmOF7KXLCoM+9PSgPo0IDcyEUfIoOXYeANaNXXf9mLUwV1GeF2OWjyic5zj6CnAJ9uzk2LT3v/wAw=="], + "@biomejs/cli-darwin-x64": ["@biomejs/cli-darwin-x64@1.8.3", "", { "os":"darwin", "cpu":"x64" }, "sha512-UeW44L/AtbmOF7KXLCoM+9PSgPo0IDcyEUfIoOXYeANaNXXf9mLUwV1GeF2OWjyic5zj6CnAJ9uzk2LT3v/wAw=="], - "@biomejs/cli-linux-arm64": ["@biomejs/cli-linux-arm64@1.8.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-fed2ji8s+I/m8upWpTJGanqiJ0rnlHOK3DdxsyVLZQ8ClY6qLuPc9uehCREBifRJLl/iJyQpHIRufLDeotsPtw=="], + "@biomejs/cli-linux-arm64": ["@biomejs/cli-linux-arm64@1.8.3", "", { "os":"linux", "cpu":"arm64" }, "sha512-fed2ji8s+I/m8upWpTJGanqiJ0rnlHOK3DdxsyVLZQ8ClY6qLuPc9uehCREBifRJLl/iJyQpHIRufLDeotsPtw=="], - "@biomejs/cli-linux-arm64-musl": ["@biomejs/cli-linux-arm64-musl@1.8.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-9yjUfOFN7wrYsXt/T/gEWfvVxKlnh3yBpnScw98IF+oOeCYb5/b/+K7YNqKROV2i1DlMjg9g/EcN9wvj+NkMuQ=="], + "@biomejs/cli-linux-arm64-musl": ["@biomejs/cli-linux-arm64-musl@1.8.3", "", { "os":"linux", "cpu":"arm64" }, "sha512-9yjUfOFN7wrYsXt/T/gEWfvVxKlnh3yBpnScw98IF+oOeCYb5/b/+K7YNqKROV2i1DlMjg9g/EcN9wvj+NkMuQ=="], - "@biomejs/cli-linux-x64": ["@biomejs/cli-linux-x64@1.8.3", "", { "os": "linux", "cpu": "x64" }, "sha512-I8G2QmuE1teISyT8ie1HXsjFRz9L1m5n83U1O6m30Kw+kPMPSKjag6QGUn+sXT8V+XWIZxFFBoTDEDZW2KPDDw=="], + "@biomejs/cli-linux-x64": ["@biomejs/cli-linux-x64@1.8.3", "", { "os":"linux", "cpu":"x64" }, "sha512-I8G2QmuE1teISyT8ie1HXsjFRz9L1m5n83U1O6m30Kw+kPMPSKjag6QGUn+sXT8V+XWIZxFFBoTDEDZW2KPDDw=="], - "@biomejs/cli-linux-x64-musl": ["@biomejs/cli-linux-x64-musl@1.8.3", "", { "os": "linux", "cpu": "x64" }, "sha512-UHrGJX7PrKMKzPGoEsooKC9jXJMa28TUSMjcIlbDnIO4EAavCoVmNQaIuUSH0Ls2mpGMwUIf+aZJv657zfWWjA=="], + "@biomejs/cli-linux-x64-musl": ["@biomejs/cli-linux-x64-musl@1.8.3", "", { "os":"linux", "cpu":"x64" }, "sha512-UHrGJX7PrKMKzPGoEsooKC9jXJMa28TUSMjcIlbDnIO4EAavCoVmNQaIuUSH0Ls2mpGMwUIf+aZJv657zfWWjA=="], - "@biomejs/cli-win32-arm64": ["@biomejs/cli-win32-arm64@1.8.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-J+Hu9WvrBevfy06eU1Na0lpc7uR9tibm9maHynLIoAjLZpQU3IW+OKHUtyL8p6/3pT2Ju5t5emReeIS2SAxhkQ=="], + "@biomejs/cli-win32-arm64": ["@biomejs/cli-win32-arm64@1.8.3", "", { "os":"win32", "cpu":"arm64" }, "sha512-J+Hu9WvrBevfy06eU1Na0lpc7uR9tibm9maHynLIoAjLZpQU3IW+OKHUtyL8p6/3pT2Ju5t5emReeIS2SAxhkQ=="], - "@biomejs/cli-win32-x64": ["@biomejs/cli-win32-x64@1.8.3", "", { "os": "win32", "cpu": "x64" }, "sha512-/PJ59vA1pnQeKahemaQf4Nyj7IKUvGQSc3Ze1uIGi+Wvr1xF7rGobSrAAG01T/gUDG21vkDsZYM03NAmPiVkqg=="], + "@biomejs/cli-win32-x64": ["@biomejs/cli-win32-x64@1.8.3", "", { "os":"win32", "cpu":"x64" }, "sha512-/PJ59vA1pnQeKahemaQf4Nyj7IKUvGQSc3Ze1uIGi+Wvr1xF7rGobSrAAG01T/gUDG21vkDsZYM03NAmPiVkqg=="], "@definitelytyped/dts-critic": ["@definitelytyped/dts-critic@0.0.191", "", { "dependencies": { "@definitelytyped/header-parser": "0.0.190", "command-exists": "^1.2.9", "semver": "^7.5.4", "tmp": "^0.2.1", "typescript": "^5.2.2", "yargs": "^17.7.2" } }, "sha512-j5HK3pQYiQwSXRLJzyhXJ6KxdzLl4gXXhz3ysCtLnRQkj+zsEfloDkEZ3x2bZMWS0OsKLXmR91JeQ2/c9DFEjg=="], @@ -75,51 +75,51 @@ "@es-joy/jsdoccomment": ["@es-joy/jsdoccomment@0.39.4", "", { "dependencies": { "comment-parser": "1.3.1", "esquery": "^1.5.0", "jsdoc-type-pratt-parser": "~4.0.0" } }, "sha512-Jvw915fjqQct445+yron7Dufix9A+m9j1fCJYlCo1FWlRvTxa3pjJelxdSTdaLWcTwRU6vbL+NYjO4YuNIS5Qg=="], - "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.21.5", "", { "os": "aix", "cpu": "ppc64" }, "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ=="], + "@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.21.5", "", { "os":"aix", "cpu":"ppc64" }, "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ=="], - "@esbuild/android-arm": ["@esbuild/android-arm@0.21.5", "", { "os": "android", "cpu": "arm" }, "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg=="], + "@esbuild/android-arm": ["@esbuild/android-arm@0.21.5", "", { "os":"android", "cpu":"arm" }, "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg=="], - "@esbuild/android-arm64": ["@esbuild/android-arm64@0.21.5", "", { "os": "android", "cpu": "arm64" }, "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A=="], + "@esbuild/android-arm64": ["@esbuild/android-arm64@0.21.5", "", { "os":"android", "cpu":"arm64" }, "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A=="], - "@esbuild/android-x64": ["@esbuild/android-x64@0.21.5", "", { "os": "android", "cpu": "x64" }, "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA=="], + "@esbuild/android-x64": ["@esbuild/android-x64@0.21.5", "", { "os":"android", "cpu":"x64" }, "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA=="], - "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.21.5", "", { "os": "darwin", "cpu": "arm64" }, "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ=="], + "@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.21.5", "", { "os":"darwin", "cpu":"arm64" }, "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ=="], - "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.21.5", "", { "os": "darwin", "cpu": "x64" }, "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw=="], + "@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.21.5", "", { "os":"darwin", "cpu":"x64" }, "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw=="], - "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.21.5", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g=="], + "@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.21.5", "", { "os":"freebsd", "cpu":"arm64" }, "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g=="], - "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.21.5", "", { "os": "freebsd", "cpu": "x64" }, "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ=="], + "@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.21.5", "", { "os":"freebsd", "cpu":"x64" }, "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ=="], - "@esbuild/linux-arm": ["@esbuild/linux-arm@0.21.5", "", { "os": "linux", "cpu": "arm" }, "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA=="], + "@esbuild/linux-arm": ["@esbuild/linux-arm@0.21.5", "", { "os":"linux", "cpu":"arm" }, "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA=="], - "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.21.5", "", { "os": "linux", "cpu": "arm64" }, "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q=="], + "@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.21.5", "", { "os":"linux", "cpu":"arm64" }, "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q=="], - "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.21.5", "", { "os": "linux", "cpu": "ia32" }, "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg=="], + "@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.21.5", "", { "os":"linux", "cpu":"ia32" }, "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg=="], - "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.21.5", "", { "os": "linux", "cpu": "none" }, "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg=="], + "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.21.5", "", { "os":"linux", "cpu":"none" }, "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg=="], - "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.21.5", "", { "os": "linux", "cpu": "none" }, "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg=="], + "@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.21.5", "", { "os":"linux", "cpu":"none" }, "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg=="], - "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.21.5", "", { "os": "linux", "cpu": "ppc64" }, "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w=="], + "@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.21.5", "", { "os":"linux", "cpu":"ppc64" }, "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w=="], - "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.21.5", "", { "os": "linux", "cpu": "none" }, "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA=="], + "@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.21.5", "", { "os":"linux", "cpu":"none" }, "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA=="], - "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.21.5", "", { "os": "linux", "cpu": "s390x" }, "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A=="], + "@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.21.5", "", { "os":"linux", "cpu":"s390x" }, "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A=="], - "@esbuild/linux-x64": ["@esbuild/linux-x64@0.21.5", "", { "os": "linux", "cpu": "x64" }, "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ=="], + "@esbuild/linux-x64": ["@esbuild/linux-x64@0.21.5", "", { "os":"linux", "cpu":"x64" }, "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ=="], - "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.21.5", "", { "os": "none", "cpu": "x64" }, "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg=="], + "@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.21.5", "", { "os":"none", "cpu":"x64" }, "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg=="], - "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.21.5", "", { "os": "openbsd", "cpu": "x64" }, "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow=="], + "@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.21.5", "", { "os":"openbsd", "cpu":"x64" }, "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow=="], - "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.21.5", "", { "os": "sunos", "cpu": "x64" }, "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg=="], + "@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.21.5", "", { "os":"sunos", "cpu":"x64" }, "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg=="], - "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.21.5", "", { "os": "win32", "cpu": "arm64" }, "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A=="], + "@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.21.5", "", { "os":"win32", "cpu":"arm64" }, "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A=="], - "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.21.5", "", { "os": "win32", "cpu": "ia32" }, "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA=="], + "@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.21.5", "", { "os":"win32", "cpu":"ia32" }, "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA=="], - "@esbuild/win32-x64": ["@esbuild/win32-x64@0.21.5", "", { "os": "win32", "cpu": "x64" }, "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw=="], + "@esbuild/win32-x64": ["@esbuild/win32-x64@0.21.5", "", { "os":"win32", "cpu":"x64" }, "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw=="], "@eslint-community/eslint-utils": ["@eslint-community/eslint-utils@4.4.0", "", { "dependencies": { "eslint-visitor-keys": "^3.3.0" }, "peerDependencies": { "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" } }, "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA=="], diff --git a/cmake/targets/BuildBun.cmake b/cmake/targets/BuildBun.cmake index 3eaf1b566d2fdd..2e7df7b43aff08 100644 --- a/cmake/targets/BuildBun.cmake +++ b/cmake/targets/BuildBun.cmake @@ -179,6 +179,32 @@ register_command( ${BUN_NODE_FALLBACKS_OUTPUTS} ) +# An embedded copy of react-refresh is used when the user forgets to install it. +# The library is not versioned alongside React. +set(BUN_REACT_REFRESH_OUTPUT ${BUN_NODE_FALLBACKS_OUTPUT}/react-refresh.js) +register_command( + TARGET + bun-node-fallbacks-react-refresh + COMMENT + "Building node-fallbacks/react-refresh.js" + CWD + ${BUN_NODE_FALLBACKS_SOURCE} + COMMAND + ${BUN_EXECUTABLE} build + ${BUN_NODE_FALLBACKS_SOURCE}/node_modules/react-refresh/cjs/react-refresh-runtime.development.js + --outfile=${BUN_REACT_REFRESH_OUTPUT} + --target=browser + --format=cjs + --minify + --define:process.env.NODE_ENV=\"'development'\" + SOURCES + ${BUN_NODE_FALLBACKS_SOURCE}/package.json + ${BUN_NODE_FALLBACKS_SOURCE}/bun.lock + ${BUN_NODE_FALLBACKS_NODE_MODULES} + OUTPUTS + ${BUN_REACT_REFRESH_OUTPUT} +) + set(BUN_ERROR_CODE_SCRIPT ${CWD}/src/codegen/generate-node-errors.ts) set(BUN_ERROR_CODE_SOURCES @@ -510,6 +536,7 @@ set(BUN_ZIG_GENERATED_SOURCES ${BUN_FALLBACK_DECODER_OUTPUT} ${BUN_RUNTIME_JS_OUTPUT} ${BUN_NODE_FALLBACKS_OUTPUTS} + ${BUN_REACT_REFRESH_OUTPUT} ${BUN_ERROR_CODE_OUTPUTS} ${BUN_ZIG_GENERATED_CLASSES_OUTPUTS} ${BUN_JAVASCRIPT_OUTPUTS} @@ -567,7 +594,6 @@ register_command( -Dcanary=${CANARY_REVISION} -Dcodegen_path=${CODEGEN_PATH} -Dcodegen_embed=$,true,false> - -Denable_asan=$,true,false> --prominent-compile-errors ${ZIG_FLAGS_BUN} ARTIFACTS diff --git a/cmake/tools/SetupBun.cmake b/cmake/tools/SetupBun.cmake index 5377eb1cff1a19..3cb77ff4be9e3b 100644 --- a/cmake/tools/SetupBun.cmake +++ b/cmake/tools/SetupBun.cmake @@ -9,6 +9,14 @@ find_command( >=1.1.26 ) +if (NOT CI) + # If node.js is not installed, it is extremely easy to make this path point to + # a tempdir such as /private/tmp/bun-node-ce532901c/bun, which may cause this + # CMake configuration break after tempdir is cleaned up (ex. after reboot). + get_filename_component(BUN_EXECUTABLE ${BUN_EXECUTABLE} REALPATH) + set(BUN_EXECUTABLE ${BUN_EXECUTABLE} CACHE FILEPATH "Bun executable" FORCE) +endif() + # If this is not set, some advanced features are not checked. # https://github.com/oven-sh/bun/blob/cd7f6a1589db7f1e39dc4e3f4a17234afbe7826c/src/bun.js/javascript.zig#L1069-L1072 setenv(BUN_GARBAGE_COLLECTOR_LEVEL 1) diff --git a/package.json b/package.json index 45c9ed7b50a362..e064be915a6d10 100644 --- a/package.json +++ b/package.json @@ -6,11 +6,14 @@ "./packages/bun-types" ], "devDependencies": { + "@mdn/browser-compat-data": "~5.5.28", "@types/bun": "*", "@types/react": "^18.3.3", "@typescript-eslint/eslint-plugin": "^7.11.0", "@typescript-eslint/parser": "^7.11.0", "@vscode/debugadapter": "^1.65.0", + "autoprefixer": "^10.4.19", + "caniuse-lite": "^1.0.30001620", "esbuild": "^0.21.4", "eslint": "^9.4.0", "eslint-config-prettier": "^9.1.0", @@ -21,10 +24,7 @@ "react": "^18.3.1", "react-dom": "^18.3.1", "source-map-js": "^1.2.0", - "typescript": "^5.7.2", - "caniuse-lite": "^1.0.30001660", - "autoprefixer": "^10.4.20", - "@mdn/browser-compat-data": "~5.5.28" + "typescript": "^5.7.2" }, "resolutions": { "bun-types": "workspace:packages/bun-types" diff --git a/packages/bun-types/bun.d.ts b/packages/bun-types/bun.d.ts index 8d11ef1a943015..7195f0252412ce 100644 --- a/packages/bun-types/bun.d.ts +++ b/packages/bun-types/bun.d.ts @@ -5325,7 +5325,10 @@ declare module "bun" { interface PluginBuilder { /** - * Register a callback which will be invoked when bundling starts. + * Register a callback which will be invoked when bundling starts. When + * using hot module reloading, this is called at the start of each + * incremental rebuild. + * * @example * ```ts * Bun.plugin({ @@ -5422,9 +5425,9 @@ declare module "bun" { * - `browser`: The plugin will be applied to browser builds * - `node`: The plugin will be applied to Node.js builds * - * If in Bun's runtime, the default target is `bun`. + * If unspecified, it is assumed that the plugin is compatible with all targets. * - * If unspecified, it is assumed that the plugin is compatible with the default target. + * This field is not read by Bun.plugin */ target?: Target; /** diff --git a/src/HTMLScanner.zig b/src/HTMLScanner.zig index b029e3dd46f6c6..4a69da3574af6c 100644 --- a/src/HTMLScanner.zig +++ b/src/HTMLScanner.zig @@ -56,9 +56,8 @@ fn createImportRecord(this: *HTMLScanner, input_path: []const u8, kind: ImportKi const debug = bun.Output.scoped(.HTMLScanner, true); -pub fn onWriteHTML(this: *HTMLScanner, bytes: []const u8) void { - _ = this; // autofix - _ = bytes; // autofix +pub fn onWriteHTML(_: *HTMLScanner, bytes: []const u8) void { + _ = bytes; // bytes are not written in scan phase } pub fn onHTMLParseError(this: *HTMLScanner, message: []const u8) void { @@ -70,7 +69,7 @@ pub fn onHTMLParseError(this: *HTMLScanner, message: []const u8) void { } pub fn onTag(this: *HTMLScanner, _: *lol.Element, path: []const u8, url_attribute: []const u8, kind: ImportKind) void { - _ = url_attribute; // autofix + _ = url_attribute; this.createImportRecord(path, kind) catch {}; } @@ -80,7 +79,7 @@ pub fn scan(this: *HTMLScanner, input: []const u8) !void { try processor.run(this, input); } -pub fn HTMLProcessor(comptime T: type, comptime add_head_or_html_tag: bool) type { +pub fn HTMLProcessor(comptime T: type, comptime visit_head_and_body: bool) type { return struct { const TagHandler = struct { /// CSS selector to match elements @@ -95,7 +94,7 @@ pub fn HTMLProcessor(comptime T: type, comptime add_head_or_html_tag: bool) type is_head_or_html: bool = false, }; - const tag_handlers_ = [_]TagHandler{ + const tag_handlers = [_]TagHandler{ // Module scripts with src .{ .selector = "script[src]", @@ -208,16 +207,6 @@ pub fn HTMLProcessor(comptime T: type, comptime add_head_or_html_tag: bool) type // }, }; - const html_head_tag_handler: TagHandler = .{ - .selector = "head", - .has_content = false, - .url_attribute = "", - .kind = .stmt, - .is_head_or_html = true, - }; - - const tag_handlers = if (add_head_or_html_tag) tag_handlers_ ++ [_]TagHandler{html_head_tag_handler} else tag_handlers_; - fn generateHandlerForTag(comptime tag_info: TagHandler) fn (*T, *lol.Element) bool { const Handler = struct { pub fn handle(this: *T, element: *lol.Element) bool { @@ -232,13 +221,6 @@ pub fn HTMLProcessor(comptime T: type, comptime add_head_or_html_tag: bool) type } } } - - if (comptime add_head_or_html_tag) { - if (tag_info.is_head_or_html) { - T.onHEADTag(this, element); - } - } - return false; } }; @@ -248,18 +230,16 @@ pub fn HTMLProcessor(comptime T: type, comptime add_head_or_html_tag: bool) type pub fn run(this: *T, input: []const u8) !void { var builder = lol.HTMLRewriter.Builder.init(); defer builder.deinit(); - var selectors = try std.ArrayList(*lol.HTMLSelector).initCapacity(this.allocator, tag_handlers.len); - defer { - for (selectors.items) |selector| { - selector.deinit(); - } - selectors.deinit(); - } + + var selectors: std.BoundedArray(*lol.HTMLSelector, tag_handlers.len + if (visit_head_and_body) 2 else 0) = .{}; + defer for (selectors.slice()) |selector| { + selector.deinit(); + }; + // Add handlers for each tag type inline for (tag_handlers) |tag_info| { const selector = try lol.HTMLSelector.parse(tag_info.selector); - try selectors.append(selector); - + selectors.appendAssumeCapacity(selector); try builder.addElementContentHandlers( selector, T, @@ -274,6 +254,38 @@ pub fn HTMLProcessor(comptime T: type, comptime add_head_or_html_tag: bool) type ); } + if (visit_head_and_body) { + const head_selector = try lol.HTMLSelector.parse("head"); + selectors.appendAssumeCapacity(head_selector); + try builder.addElementContentHandlers( + head_selector, + T, + T.onHeadTag, + this, + void, + null, + null, + void, + null, + null, + ); + + const body_selector = try lol.HTMLSelector.parse("body"); + selectors.appendAssumeCapacity(body_selector); + try builder.addElementContentHandlers( + body_selector, + T, + T.onBodyTag, + this, + void, + null, + null, + void, + null, + null, + ); + } + const memory_settings = lol.MemorySettings{ .preallocated_parsing_buffer_size = @max(input.len / 4, 1024), .max_allowed_memory_usage = 1024 * 1024 * 10, @@ -294,11 +306,7 @@ pub fn HTMLProcessor(comptime T: type, comptime add_head_or_html_tag: bool) type false, T, this, - struct { - fn write(self: *T, bytes: []const u8) void { - self.onWriteHTML(bytes); - } - }.write, + T.onWriteHTML, struct { fn done(_: *T) void {} }.done, diff --git a/src/Watcher.zig b/src/Watcher.zig index 3f710c64ceb354..ae0be0426810be 100644 --- a/src/Watcher.zig +++ b/src/Watcher.zig @@ -1,17 +1,11 @@ //! Bun's cross-platform filesystem watcher. Runs on its own thread. const Watcher = @This(); -pub const max_count = 128; - -pub const Event = WatchEvent; -pub const Item = WatchItem; -pub const ItemList = WatchList; -pub const WatchList = std.MultiArrayList(WatchItem); -pub const HashType = u32; -const no_watch_item: WatchItemIndex = std.math.maxInt(WatchItemIndex); +const DebugLogScope = bun.Output.Scoped(.watcher, false); +const log = DebugLogScope.log; // Consumer-facing -watch_events: [128]WatchEvent, -changed_filepaths: [128]?[:0]u8, +watch_events: [max_count]WatchEvent, +changed_filepaths: [max_count]?[:0]u8, /// The platform-specific implementation of the watcher platform: Platform, @@ -37,6 +31,15 @@ onError: *const fn (this: *anyopaque, err: bun.sys.Error) void, thread_lock: bun.DebugThreadLock = bun.DebugThreadLock.unlocked, +pub const max_count = 128; + +pub const Event = WatchEvent; +pub const Item = WatchItem; +pub const ItemList = WatchList; +pub const WatchList = std.MultiArrayList(WatchItem); +pub const HashType = u32; +const no_watch_item: WatchItemIndex = std.math.maxInt(WatchItemIndex); + /// Initializes a watcher. Each watcher is tied to some context type, which /// recieves watch callbacks on the watcher thread. This function does not /// actually start the watcher thread. @@ -68,7 +71,7 @@ pub fn init(comptime T: type, ctx: *T, fs: *bun.fs.FileSystem, allocator: std.me const watcher = try allocator.create(Watcher); errdefer allocator.destroy(watcher); - watcher.* = Watcher{ + watcher.* = .{ .fs = fs, .allocator = allocator, .watched_count = 0, @@ -80,7 +83,7 @@ pub fn init(comptime T: type, ctx: *T, fs: *bun.fs.FileSystem, allocator: std.me .onError = &wrapped.onErrorWrapped, .platform = .{}, .watch_events = undefined, - .changed_filepaths = [_]?[:0]u8{null} ** 128, + .changed_filepaths = [_]?[:0]u8{null} ** max_count, }; try Platform.init(&watcher.platform, fs.top_level_dir); @@ -118,9 +121,6 @@ pub fn getHash(filepath: string) HashType { pub const WatchItemIndex = u16; pub const max_eviction_count = 8096; - -const log = bun.Output.scoped(.watcher, false); - const WindowsWatcher = @import("./watcher/WindowsWatcher.zig"); // TODO: some platform-specific behavior is implemented in // this file instead of the platform-specific file. @@ -214,7 +214,7 @@ fn threadMain(this: *Watcher) !void { Output.Source.configureNamedThread("File Watcher"); defer Output.flush(); - if (FeatureFlags.verbose_watcher) Output.prettyln("Watcher started", .{}); + log("Watcher started", .{}); switch (this.watchLoop()) { .err => |err| { @@ -534,12 +534,14 @@ pub fn appendFileMaybeLock( .result => {}, } - if (comptime FeatureFlags.verbose_watcher) { - if (strings.indexOf(file_path, this.cwd)) |i| { - Output.prettyln("Added ./{s} to watch list.", .{file_path[i + this.cwd.len ..]}); - } else { - Output.prettyln("Added {s} to watch list.", .{file_path}); - } + if (DebugLogScope.isVisible()) { + const cwd_len_with_slash = if (this.cwd[this.cwd.len - 1] == '/') this.cwd.len else this.cwd.len + 1; + log("Added {s} to watch list.", .{ + if (file_path.len > cwd_len_with_slash and bun.strings.startsWith(file_path, this.cwd)) + file_path[cwd_len_with_slash..] + else + file_path, + }); } return .{ .result = {} }; diff --git a/src/api/schema.zig b/src/api/schema.zig index 163a1a76131aeb..f7a36f2df42f2e 100644 --- a/src/api/schema.zig +++ b/src/api/schema.zig @@ -1678,7 +1678,7 @@ pub const Api = struct { no_summary: ?bool = null, /// disable_hmr - disable_hmr: ?bool = null, + disable_hmr: bool = false, /// port port: ?u16 = null, diff --git a/src/ast/base.zig b/src/ast/base.zig index de1f8a5a3f3d20..b1cc26244d139b 100644 --- a/src/ast/base.zig +++ b/src/ast/base.zig @@ -188,7 +188,7 @@ pub const Ref = packed struct(u64) { return this.tag == .source_contents_slice; } - pub fn init(inner_index: Int, source_index: usize, is_source_contents_slice: bool) Ref { + pub fn init(inner_index: Int, source_index: u32, is_source_contents_slice: bool) Ref { return .{ .inner_index = inner_index, .source_index = @intCast(source_index), diff --git a/src/bake/BakeGlobalObject.cpp b/src/bake/BakeGlobalObject.cpp index efa663385904d3..abcaf83f6e03a0 100644 --- a/src/bake/BakeGlobalObject.cpp +++ b/src/bake/BakeGlobalObject.cpp @@ -58,17 +58,18 @@ JSC::Identifier bakeModuleLoaderResolve(JSC::JSGlobalObject* jsGlobal, auto& vm = JSC::getVM(global); auto scope = DECLARE_THROW_SCOPE(vm); - ASSERT(referrer.isString()); - WTF::String refererString = jsCast(referrer)->getString(global); + if (auto string = jsDynamicCast(referrer)) { + WTF::String refererString = string->getString(global); - WTF::String keyString = key.toWTFString(global); - RETURN_IF_EXCEPTION(scope, vm.propertyNames->emptyIdentifier); - - if (refererString.startsWith("bake:/"_s) || (refererString == "."_s && keyString.startsWith("bake:/"_s))) { - BunString result = BakeProdResolve(global, Bun::toString(referrer.getString(global)), Bun::toString(keyString)); + WTF::String keyString = key.toWTFString(global); RETURN_IF_EXCEPTION(scope, vm.propertyNames->emptyIdentifier); - return JSC::Identifier::fromString(vm, result.toWTFString(BunString::ZeroCopy)); + if (refererString.startsWith("bake:/"_s) || (refererString == "."_s && keyString.startsWith("bake:/"_s))) { + BunString result = BakeProdResolve(global, Bun::toString(referrer.getString(global)), Bun::toString(keyString)); + RETURN_IF_EXCEPTION(scope, vm.propertyNames->emptyIdentifier); + + return JSC::Identifier::fromString(vm, result.toWTFString(BunString::ZeroCopy)); + } } // Use Zig::GlobalObject's function diff --git a/src/bake/DevServer.zig b/src/bake/DevServer.zig index 6b7f957926d542..25475232e96945 100644 --- a/src/bake/DevServer.zig +++ b/src/bake/DevServer.zig @@ -6,10 +6,15 @@ //! adjusting imports) must always rebundle only that one file. //! //! All work is held in-memory, using manually managed data-oriented design. +//! For questions about DevServer, please consult the delusional @paperclover pub const DevServer = @This(); -pub const debug = bun.Output.Scoped(.Bake, false); +pub const debug = bun.Output.Scoped(.DevServer, false); +pub const memoryLog = bun.Output.Scoped(.DevServerMemory, true); pub const igLog = bun.Output.scoped(.IncrementalGraph, false); +/// --no-hmr sets this to false +pub var enabled = true; + pub const Options = struct { /// Arena must live until DevServer.deinit() arena: Allocator, @@ -17,20 +22,23 @@ pub const Options = struct { vm: *VirtualMachine, framework: bake.Framework, bundler_options: bake.SplitBundlerOptions, + /// When set, nothing is ever bundled for the server-side, + /// and DevSever acts purely as a frontend bundler. + frontend_only: bool = false, // Debugging features dump_sources: ?[]const u8 = if (Environment.isDebug) ".bake-debug" else null, dump_state_on_crash: ?bool = null, - verbose_watcher: bool = false, }; -// The fields `client_graph`, `server_graph`, and `directory_watchers` all -// use `@fieldParentPointer` to access DevServer's state. This pattern has -// made it easier to group related fields together, but one must remember -// those structures still depend on the DevServer pointer. +// The fields `client_graph`, `server_graph`, `directory_watchers`, and `assets` +// all use `@fieldParentPointer` to access DevServer's state. This pattern has +// made it easier to group related fields together, but one must remember those +// structures still depend on the DevServer pointer. /// Used for all server-wide allocations. In debug, this shows up in /// a separate named heap. Thread-safe. +// TODO: make this an "AllocationScope" (debug memory tool i've yet to write) allocator: Allocator, /// Absolute path to project root directory. For the HMR /// runtime, its module IDs are strings relative to this. @@ -41,7 +49,9 @@ root: []const u8, configuration_hash_key: [16]u8, /// The virtual machine (global object) to execute code in. vm: *VirtualMachine, -/// May be `null` if not attached to an HTTP server yet. +/// May be `null` if not attached to an HTTP server yet. When no server is +/// available, functions taking in requests and responses are unavailable. +/// However, a lot of testing in this mode is missing, so it may hit assertions. server: ?bun.JSC.API.AnyServer, /// Contains the tree of routes. This structure contains FileIndex router: FrameworkRouter, @@ -55,18 +65,16 @@ client_graph: IncrementalGraph(.client), server_graph: IncrementalGraph(.server), /// State populated during bundling and hot updates. Often cleared incremental_result: IncrementalResult, -/// Quickly retrieve a route's index from its entry point file. These are -/// populated as the routes are discovered. The route may not be bundled OR +/// Quickly retrieve a framework route's index from its entry point file. These +/// are populated as the routes are discovered. The route may not be bundled OR /// navigatable, such as the case where a layout's index is looked up. route_lookup: AutoArrayHashMapUnmanaged(IncrementalGraph(.server).FileIndex, RouteIndexAndRecurseFlag), -/// CSS files are accessible via `/_bun/css/.css` -/// Value is bundled code owned by `dev.allocator` -css_files: AutoArrayHashMapUnmanaged(u64, []const u8), -/// JS files are accessible via `/_bun/client/route..js` -/// These are randomly generated to avoid possible browser caching of old assets. -route_js_payloads: AutoArrayHashMapUnmanaged(u64, Route.Index.Optional), -// /// Assets are accessible via `/_bun/asset/` -// assets: bun.StringArrayHashMapUnmanaged(u64, Asset), +/// This acts as a duplicate of the lookup table in uws, but only for HTML routes +/// Used to identify what route a connected WebSocket is on, so that only +/// the active pages are notified of a hot updates. +html_router: HTMLRouter, +/// Assets are accessible via `/_bun/asset/` +assets: Assets, /// All bundling failures are stored until a file is saved and rebuilt. /// They are stored in the wire format the HMR runtime expects so that /// serialization only happens once. @@ -76,6 +84,11 @@ bundling_failures: std.ArrayHashMapUnmanaged( SerializedFailure.ArrayHashContextViaOwner, false, ) = .{}, +frontend_only: bool, +/// The Plugin API is missing a way to attach filesystem watchers (addWatchFile) +/// This special case makes `bun-plugin-tailwind` work, which is a requirement +/// to ship initial incremental bundling support for HTML files. +has_tailwind_plugin_hack: ?bun.StringArrayHashMapUnmanaged(void) = null, // These values are handles to the functions in `hmr-runtime-server.ts`. // For type definitions, see `./bake.private.d.ts` @@ -96,32 +109,46 @@ bundles_since_last_error: usize = 0, framework: bake.Framework, bundler_options: bake.SplitBundlerOptions, // Each logical graph gets its own bundler configuration -server_bundler: Transpiler, -client_bundler: Transpiler, -ssr_bundler: Transpiler, -/// The log used by all `server_bundler`, `client_bundler` and `ssr_bundler`. +server_transpiler: Transpiler, +client_transpiler: Transpiler, +ssr_transpiler: Transpiler, +/// The log used by all `server_transpiler`, `client_transpiler` and `ssr_transpiler`. /// Note that it is rarely correct to write messages into it. Instead, associate /// messages with the IncrementalGraph file or Route using `SerializedFailure` log: Log, +plugin_state: enum { + /// Should ask server for plugins. Once plugins are loaded, the plugin + /// pointer is written into `server_transpiler.options.plugin` + unknown, + // These two states mean that `server.getOrLoadPlugins()` was called. + pending, + loaded, + /// Currently, this represents a degraded state where no bundle can + /// be correctly executed because the plugins did not load successfully. + err, +}, /// There is only ever one bundle executing at the same time, since all bundles /// inevitably share state. This bundle is asynchronous, storing its state here /// while in-flight. All allocations held by `.bv2.graph.heap`'s arena current_bundle: ?struct { bv2: *BundleV2, /// Information BundleV2 needs to finalize the bundle - start_data: bun.bundle_v2.BakeBundleStart, + start_data: bun.bundle_v2.DevServerInput, /// Started when the bundle was queued timer: std.time.Timer, /// If any files in this bundle were due to hot-reloading, some extra work /// must be done to inform clients to reload routes. When this is false, /// all entry points do not have bundles yet. had_reload_event: bool, + /// After a bundle finishes, these requests will be continued, either + /// calling their handler on success or sending the error page on failure. + /// Owned by `deferred_request_pool` in DevServer. + requests: DeferredRequest.List, + /// Resolution failures are grouped by incremental graph file index. + /// Unlike parse failures (`handleParseTaskFailure`), the resolution + /// failures can be created asynchronously, and out of order. + resolution_failure_entries: AutoArrayHashMapUnmanaged(SerializedFailure.Owner.Packed, bun.logger.Log), }, -/// This is not stored in `current_bundle` so that its memory can be reused when -/// there is no active bundle. After the bundle finishes, these requests will -/// be continued, either calling their handler on success or sending the error -/// page on failure. -current_bundle_requests: ArrayListUnmanaged(DeferredRequest), /// When `current_bundle` is non-null and new requests to bundle come in, /// those are temporaried here. When the current bundle is finished, it /// will immediately enqueue this. @@ -132,8 +159,9 @@ next_bundle: struct { /// for this watch event is in one of the `watch_events` reload_event: ?*HotReloadEvent, /// The list of requests that are blocked on this bundle. - requests: ArrayListUnmanaged(DeferredRequest), + requests: DeferredRequest.List, }, +deferred_request_pool: bun.HiveArray(DeferredRequest.Node, DeferredRequest.max_preallocated).Fallback, // Debugging @@ -143,50 +171,90 @@ emit_visualizer_events: u32, has_pre_crash_handler: bool, pub const internal_prefix = "/_bun"; -pub const client_prefix = internal_prefix ++ "/client"; +/// Assets which are routed to the `Assets` storage. pub const asset_prefix = internal_prefix ++ "/asset"; -pub const css_prefix = internal_prefix ++ "/css"; +/// Client scripts are available at `/_bun/client/{name}-{rbi}{generation}.js` +/// where: +/// - `name` is the display name of the route, such as "index" or +/// "about". It is ignored when routing. +/// - `rbi` is the route bundle index, in padded hex (e.g. `00000001`) +/// - `generation` which is initialized to a random value. This value is +/// re-randomized whenever `client_bundle` is invalidated. +/// +/// Example: `/_bun/client/index-00000000f209a20e.js` +pub const client_prefix = internal_prefix ++ "/client"; pub const RouteBundle = struct { pub const Index = bun.GenericIndex(u30, RouteBundle); - route: Route.Index, - server_state: State, - - /// Used to communicate over WebSocket the pattern. The HMR client contains code - /// to match this against the URL bar to determine if a reloaded route applies. - full_pattern: []const u8, - /// Generated lazily when the client JS is requested (HTTP GET /_bun/client/*.js), - /// which is only needed when a hard-reload is performed. - /// - /// Freed when a client module updates. - client_bundle: ?[]const u8, - /// Contain the list of serialized failures. Hashmap allows for - /// efficient lookup and removal of failing files. - /// When state == .evaluation_failure, this is popualted with that error. - evaluate_failure: ?SerializedFailure, - - // TODO: micro-opt: use a singular strong - - /// Cached to avoid re-creating the array every request. - /// Invalidated when a layout is added or removed from this route. - cached_module_list: JSC.Strong, - /// Cached to avoid re-creating the string every request. - /// Invalidated when any client file associated with the route is updated. - cached_client_bundle_url: JSC.Strong, - /// Cached to avoid re-creating the array every request. - /// Invalidated when the list of CSS files changes. - cached_css_file_array: JSC.Strong, + /// There are two distinct types of route bundles. + data: union(enum) { + /// FrameworkRouter provided route + framework: Framework, + /// HTMLBundle provided route + html: HTML, + }, + /// Generated lazily when the client JS is requested. + /// Invalidated when a downstream client module updates. + client_bundle: ?*StaticRoute, + + /// If the client tries to load a script with the wrong generation, it will + /// receive a bundle that instantly reloads the page, implying a bundle + /// change has occurred while fetching the script. + client_script_generation: u32, /// Reference count of how many HmrSockets say they are on this route. This /// allows hot-reloading events to reduce the amount of times it traces the /// graph. - active_viewers: usize, + active_viewers: u32, + + pub const Framework = struct { + route_index: Route.Index, + + // TODO: micro-opt: use a singular strong + + /// Cached to avoid re-creating the array every request. + /// TODO: Invalidated when a layout is added or removed from this route. + cached_module_list: JSC.Strong, + /// Cached to avoid re-creating the string every request. + /// TODO: Invalidated when any client file associated with the route is updated. + cached_client_bundle_url: JSC.Strong, + /// Cached to avoid re-creating the array every request. + /// Invalidated when the list of CSS files changes. + cached_css_file_array: JSC.Strong, + + /// Contain the list of serialized failures. Hashmap allows for + /// efficient lookup and removal of failing files. + /// When state == .evaluation_failure, this is populated with that error. + evaluate_failure: ?SerializedFailure, + }; + + pub const HTML = struct { + /// DevServer increments the ref count of this bundle + html_bundle: *HTMLBundle.HTMLBundleRoute, + bundled_file: IncrementalGraph(.client).FileIndex, + /// Invalidated when the HTML file is modified, but not it's imports. + /// The style tag is injected here. + head_end_tag_index: ByteOffset.Optional, + /// Invalidated when the HTML file is modified, but not it's imports. + /// The script tag is injected here. + body_end_tag_index: ByteOffset.Optional, + /// The HTML file bundled, from the bundler. + bundled_html_text: ?[]const u8, + /// Derived from `bundled_html_text` + `client_script_generation` + /// and css information. Invalidated when: + /// - The HTML file itself modified. + /// - The list of CSS files changes. + /// - TODO: Any downstream file is rebundled. + cached_response: ?*StaticRoute, + + const ByteOffset = bun.GenericIndex(u32, u8); + }; /// A union is not used so that `bundler_failure_logs` can re-use memory, as /// this state frequently changes between `loaded` and the failure variants. - const State = enum { + pub const State = enum { /// In development mode, routes are lazily built. This state implies a /// build of this route has never been run. It is possible to bundle the /// route entry point and still have an unqueued route if another route @@ -207,6 +275,45 @@ pub const RouteBundle = struct { /// at fault of bundling, nor would re-bundling change anything. loaded, }; + + pub const UnresolvedIndex = union(enum) { + /// FrameworkRouter provides a fullstack server-side route + framework: FrameworkRouter.Route.Index, + /// HTMLBundle provides a frontend-only route, SPA-style + html: *HTMLBundle.HTMLBundleRoute, + }; + + pub fn invalidateClientBundle(self: *RouteBundle) void { + if (self.client_bundle) |bundle| { + bundle.deref(); + self.client_bundle = null; + } + self.client_script_generation = std.crypto.random.int(u32); + switch (self.data) { + .framework => |*fw| fw.cached_client_bundle_url.clear(), + .html => |*html| if (html.cached_response) |cached_response| { + cached_response.deref(); + html.cached_response = null; + }, + } + } + + /// Does NOT count @sizeOf(RouteBundle) + pub fn memoryCost(self: *const RouteBundle) usize { + var cost: usize = 0; + if (self.client_bundle) |bundle| cost += bundle.memoryCost(); + switch (self.data) { + .framework => { + // the JSC.Strong children do not support memoryCost. likely not needed + // .evaluate_failure is not owned + }, + .html => |*html| { + if (html.bundled_html_text) |text| cost += text.len; + if (html.cached_response) |cached_response| cost += cached_response.memoryCost(); + }, + } + return cost; + } }; /// DevServer is stored on the heap, storing its allocator. @@ -233,11 +340,11 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { .root = options.root, .vm = options.vm, .server = null, - .directory_watchers = DirectoryWatchStore.empty, + .directory_watchers = .empty, .server_fetch_function_callback = .{}, .server_register_update_callback = .{}, .generation = 0, - .graph_safety_lock = bun.DebugThreadLock.unlocked, + .graph_safety_lock = .unlocked, .dump_dir = dump_dir, .framework = options.framework, .bundler_options = options.bundler_options, @@ -245,28 +352,32 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { .has_pre_crash_handler = bun.FeatureFlags.bake_debugging_features and options.dump_state_on_crash orelse bun.getRuntimeFeatureFlag("BUN_DUMP_STATE_ON_CRASH"), - .css_files = .{}, - .route_js_payloads = .{}, - // .assets = .{}, - - .client_graph = IncrementalGraph(.client).empty, - .server_graph = IncrementalGraph(.server).empty, - .incremental_result = IncrementalResult.empty, - .route_lookup = .{}, - .route_bundles = .{}, + .frontend_only = options.frontend_only, + .client_graph = .empty, + .server_graph = .empty, + .incremental_result = .empty, + .route_lookup = .empty, + .route_bundles = .empty, + .html_router = .empty, .current_bundle = null, - .current_bundle_requests = .{}, .next_bundle = .{ - .route_queue = .{}, + .route_queue = .empty, .reload_event = null, .requests = .{}, }, - - .log = bun.logger.Log.init(allocator), - - .server_bundler = undefined, - .client_bundler = undefined, - .ssr_bundler = undefined, + .assets = .{ + .path_map = .empty, + .files = .empty, + .refs = .empty, + }, + .log = .init(allocator), + .plugin_state = .unknown, + .bundling_failures = .{}, + .deferred_request_pool = .init(allocator), + + .server_transpiler = undefined, + .client_transpiler = undefined, + .ssr_transpiler = undefined, .bun_watcher = undefined, .configuration_hash_key = undefined, .router = undefined, @@ -293,25 +404,25 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { dev.bun_watcher.start() catch |err| return global.throwError(err, "while initializing file watcher thread for development server"); - dev.server_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); - dev.client_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); - dev.ssr_bundler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); + dev.server_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); + dev.client_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); + dev.ssr_transpiler.resolver.watcher = dev.bun_watcher.getResolveWatcher(); dev.watcher_atomics = WatcherAtomics.init(dev); - dev.framework.initBundler(allocator, &dev.log, .development, .server, &dev.server_bundler) catch |err| + dev.framework.initBundler(allocator, &dev.log, .development, .server, &dev.server_transpiler) catch |err| return global.throwError(err, generic_action); - dev.client_bundler.options.dev_server = dev; - dev.framework.initBundler(allocator, &dev.log, .development, .client, &dev.client_bundler) catch |err| + dev.server_transpiler.options.dev_server = dev; + dev.framework.initBundler(allocator, &dev.log, .development, .client, &dev.client_transpiler) catch |err| return global.throwError(err, generic_action); - dev.server_bundler.options.dev_server = dev; + dev.client_transpiler.options.dev_server = dev; if (separate_ssr_graph) { - dev.framework.initBundler(allocator, &dev.log, .development, .ssr, &dev.ssr_bundler) catch |err| + dev.framework.initBundler(allocator, &dev.log, .development, .ssr, &dev.ssr_transpiler) catch |err| return global.throwError(err, generic_action); - dev.ssr_bundler.options.dev_server = dev; + dev.ssr_transpiler.options.dev_server = dev; } - dev.framework = dev.framework.resolve(&dev.server_bundler.resolver, &dev.client_bundler.resolver, options.arena) catch { + dev.framework = dev.framework.resolve(&dev.server_transpiler.resolver, &dev.client_transpiler.resolver, options.arena) catch { if (dev.framework.is_built_in_react) try bake.Framework.addReactInstallCommandNote(&dev.log); return global.throwValue(dev.log.toJSAggregateError(global, bun.String.static("Framework is missing required files!"))); @@ -329,8 +440,8 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { Output.panic("unhandled {}", .{e})).unwrap() catch |e| Output.panic("unhandled {}", .{e}); bun.writeAnyToHasher(&hash, stat.mtime()); - hash.update(bake.getHmrRuntime(.client)); - hash.update(bake.getHmrRuntime(.server)); + hash.update(bake.getHmrRuntime(.client).code); + hash.update(bake.getHmrRuntime(.server).code); } else { hash.update(bun.Environment.git_sha_short); } @@ -391,6 +502,8 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { } hash.update(&.{0}); + bun.writeAnyToHasher(&hash, options.frontend_only); + break :hash_key std.fmt.bytesToHex(std.mem.asBytes(&hash.final()), .lower); }; @@ -400,7 +513,9 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { assert(try dev.client_graph.insertStale(rfr.import_source, false) == IncrementalGraph(.client).react_refresh_index); } - dev.initServerRuntime(); + if (!options.frontend_only) { + dev.initServerRuntime(); + } // Initialize FrameworkRouter dev.router = router: { @@ -409,7 +524,7 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { for (options.framework.file_system_router_types, 0..) |fsr, i| { const joined_root = bun.path.joinAbs(dev.root, .auto, fsr.root); - const entry = dev.server_bundler.resolver.readDirInfoIgnoreError(joined_root) orelse + const entry = dev.server_transpiler.resolver.readDirInfoIgnoreError(joined_root) orelse continue; const server_file = try dev.server_graph.insertStaleExtra(fsr.entry_server, false, true); @@ -439,10 +554,12 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { break :router try FrameworkRouter.initEmpty(dev.root, types.items, allocator); }; - // TODO: move scanning to be one tick after server startup. this way the - // line saying the server is ready shows quicker, and route errors show up - // after that line. - try dev.scanInitialRoutes(); + if (options.frontend_only) { + // TODO: move scanning to be one tick after server startup. this way the + // line saying the server is ready shows quicker, and route errors show up + // after that line. + try dev.scanInitialRoutes(); + } if (bun.FeatureFlags.bake_debugging_features and dev.has_pre_crash_handler) try bun.crash_handler.appendPreCrashHandler(DevServer, dev, dumpStateDueToCrash); @@ -450,8 +567,242 @@ pub fn init(options: Options) bun.JSOOM!*DevServer { return dev; } +pub fn deinit(dev: *DevServer) void { + // TODO: Currently deinit is not implemented, as it was assumed to be alive for + // the remainder of this process' lifespan. This isn't always true. + const allocator = dev.allocator; + + // _ = VoidFieldTypes(DevServer){ + // // has no action taken + // .allocator = {}, + // .configuration_hash_key = {}, + // .graph_safety_lock = {}, + // .bun_watcher = {}, + // .watcher_atomics = {}, + // .plugin_state = {}, + // .generation = {}, + // .bundles_since_last_error = {}, + // .emit_visualizer_events = {}, + // .dump_dir = {}, + // .frontend_only = {}, + // .server_fetch_function_callback = {}, + // .server_register_update_callback = {}, + // .deferred_request_pool = {}, + + // .has_pre_crash_handler = if (dev.has_pre_crash_handler) + // bun.crash_handler.removePreCrashHandler(dev), + + // // pointers that are not considered a part of DevServer + // .vm = {}, + // .server = {}, + // .server_transpiler = {}, + // .client_transpiler = {}, + // .ssr_transpiler = {}, + // .log = {}, + // .framework = {}, // TODO: maybe + // .bundler_options = {}, // TODO: maybe + + // // to be counted. + // .root = { + // cost += dev.root.len; + // }, + // .router = { + // cost += dev.router.memoryCost(); + // }, + // .route_bundles = for (dev.route_bundles.items) |*bundle| { + // cost += bundle.memoryCost(); + // }, + // .server_graph = { + // cost += dev.server_graph.memoryCost(); + // }, + // .client_graph = { + // cost += dev.client_graph.memoryCost(); + // }, + // .assets = { + // cost += dev.assets.memoryCost(); + // }, + // .incremental_result = { + // cost += memoryCostArrayList(dev.incremental_result.client_components_added); + // cost += memoryCostArrayList(dev.incremental_result.html_routes_affected); + // cost += memoryCostArrayList(dev.incremental_result.framework_routes_affected); + // cost += memoryCostArrayList(dev.incremental_result.client_components_removed); + // cost += memoryCostArrayList(dev.incremental_result.failures_removed); + // cost += memoryCostArrayList(dev.incremental_result.client_components_affected); + // cost += memoryCostArrayList(dev.incremental_result.failures_added); + // }, + // .has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |hack| { + // cost += memoryCostArrayHashMap(hack); + // }, + // .directory_watchers = { + // cost += memoryCostArrayList(dev.directory_watchers.dependencies); + // cost += memoryCostArrayList(dev.directory_watchers.dependencies_free_list); + // cost += memoryCostArrayHashMap(dev.directory_watchers.watches); + // for (dev.directory_watchers.dependencies.items) |dep| { + // cost += dep.specifier.len; + // } + // }, + // .html_router = { + // // std does not provide a way to measure exact allocation size of HashMapUnmanaged + // cost += dev.html_router.map.capacity() * (@sizeOf(*HTMLBundle.HTMLBundleRoute) + @sizeOf([]const u8)); + // // DevServer does not count the referenced HTMLBundle.HTMLBundleRoutes + // }, + // .bundling_failures = { + // cost += memoryCostSlice(dev.bundling_failures.keys()); + // for (dev.bundling_failures.keys()) |failure| { + // cost += failure.data.len; + // } + // }, + // .current_bundle = { + // // All entries are owned by the bundler arena, not DevServer, except for `requests` + // if (dev.current_bundle) |bundle| { + // var r = bundle.requests.first; + // while (r) |request| : (r = request.next) { + // cost += @sizeOf(DeferredRequest.Node); + // } + // } + // }, + // .next_bundle = { + // var r = dev.next_bundle.requests.first; + // while (r) |request| : (r = request.next) { + // cost += @sizeOf(DeferredRequest.Node); + // } + // cost += memoryCostArrayHashMap(dev.next_bundle.route_queue); + // }, + // .route_lookup = { + // cost += memoryCostArrayHashMap(dev.route_lookup); + // }, + // }; + + allocator.destroy(dev); + // if (bun.Environment.isDebug) + // bun.todoPanic(@src(), "bake.DevServer.deinit()", .{}); +} + +/// Returns an estimation for how many bytes DevServer is explicitly aware of. +/// If this number stays constant but RSS grows, then there is a memory leak. If +/// this number grows out of control, then incremental garbage collection is not +/// good enough. +/// +/// Memory measurements are important as DevServer has a long lifetime, but +/// unlike the HTTP server, it controls a lot of objects that are frequently +/// being added, removed, and changed (as the developer edits source files). It +/// is exponentially easy to mess up memory management. +pub fn memoryCost(dev: *DevServer) usize { + var cost: usize = @sizeOf(DevServer); + // See https://github.com/ziglang/zig/issues/21879 + _ = VoidFieldTypes(DevServer){ + // does not contain pointers + .allocator = {}, + .configuration_hash_key = {}, + .graph_safety_lock = {}, + .bun_watcher = {}, + .watcher_atomics = {}, + .plugin_state = {}, + .generation = {}, + .bundles_since_last_error = {}, + .emit_visualizer_events = {}, + .dump_dir = {}, + .has_pre_crash_handler = {}, + .frontend_only = {}, + .server_fetch_function_callback = {}, + .server_register_update_callback = {}, + .deferred_request_pool = {}, + + // pointers that are not considered a part of DevServer + .vm = {}, + .server = {}, + .server_transpiler = {}, + .client_transpiler = {}, + .ssr_transpiler = {}, + .log = {}, + .framework = {}, // TODO: maybe + .bundler_options = {}, // TODO: maybe + + // to be counted. + .root = { + cost += dev.root.len; + }, + .router = { + cost += dev.router.memoryCost(); + }, + .route_bundles = for (dev.route_bundles.items) |*bundle| { + cost += bundle.memoryCost(); + }, + .server_graph = { + cost += dev.server_graph.memoryCost(); + }, + .client_graph = { + cost += dev.client_graph.memoryCost(); + }, + .assets = { + cost += dev.assets.memoryCost(); + }, + .incremental_result = { + cost += memoryCostArrayList(dev.incremental_result.client_components_added); + cost += memoryCostArrayList(dev.incremental_result.html_routes_affected); + cost += memoryCostArrayList(dev.incremental_result.framework_routes_affected); + cost += memoryCostArrayList(dev.incremental_result.client_components_removed); + cost += memoryCostArrayList(dev.incremental_result.failures_removed); + cost += memoryCostArrayList(dev.incremental_result.client_components_affected); + cost += memoryCostArrayList(dev.incremental_result.failures_added); + }, + .has_tailwind_plugin_hack = if (dev.has_tailwind_plugin_hack) |hack| { + cost += memoryCostArrayHashMap(hack); + }, + .directory_watchers = { + cost += memoryCostArrayList(dev.directory_watchers.dependencies); + cost += memoryCostArrayList(dev.directory_watchers.dependencies_free_list); + cost += memoryCostArrayHashMap(dev.directory_watchers.watches); + for (dev.directory_watchers.dependencies.items) |dep| { + cost += dep.specifier.len; + } + }, + .html_router = { + // std does not provide a way to measure exact allocation size of HashMapUnmanaged + cost += dev.html_router.map.capacity() * (@sizeOf(*HTMLBundle.HTMLBundleRoute) + @sizeOf([]const u8)); + // DevServer does not count the referenced HTMLBundle.HTMLBundleRoutes + }, + .bundling_failures = { + cost += memoryCostSlice(dev.bundling_failures.keys()); + for (dev.bundling_failures.keys()) |failure| { + cost += failure.data.len; + } + }, + .current_bundle = { + // All entries are owned by the bundler arena, not DevServer, except for `requests` + if (dev.current_bundle) |bundle| { + var r = bundle.requests.first; + while (r) |request| : (r = request.next) { + cost += @sizeOf(DeferredRequest.Node); + } + } + }, + .next_bundle = { + var r = dev.next_bundle.requests.first; + while (r) |request| : (r = request.next) { + cost += @sizeOf(DeferredRequest.Node); + } + cost += memoryCostArrayHashMap(dev.next_bundle.route_queue); + }, + .route_lookup = { + cost += memoryCostArrayHashMap(dev.route_lookup); + }, + }; + return cost; +} + +fn memoryCostArrayList(slice: anytype) usize { + return slice.capacity * @sizeOf(@typeInfo(@TypeOf(slice.items)).pointer.child); +} +fn memoryCostSlice(slice: anytype) usize { + return slice.len * @sizeOf(@typeInfo(@TypeOf(slice)).pointer.child); +} +fn memoryCostArrayHashMap(map: anytype) usize { + return @TypeOf(map.entries).capacityInBytes(map.entries.capacity); +} + fn initServerRuntime(dev: *DevServer) void { - const runtime = bun.String.static(bun.bake.getHmrRuntime(.server)); + const runtime = bun.String.static(bun.bake.getHmrRuntime(.server).code); const interface = c.BakeLoadInitialServerCode( @ptrCast(dev.vm.global), @@ -479,7 +830,7 @@ fn initServerRuntime(dev: *DevServer) void { fn scanInitialRoutes(dev: *DevServer) !void { try dev.router.scanAll( dev.allocator, - &dev.server_bundler.resolver, + &dev.server_transpiler.resolver, FrameworkRouter.InsertionContext.wrap(DevServer, dev), ); @@ -487,94 +838,86 @@ fn scanInitialRoutes(dev: *DevServer) !void { try dev.client_graph.ensureStaleBitCapacity(true); } -pub fn attachRoutes(dev: *DevServer, server: anytype) !void { +/// Returns true if a catch-all handler was attached. +pub fn attachRoutes(dev: *DevServer, server: anytype) !bool { dev.server = bun.JSC.API.AnyServer.from(server); const app = server.app.?; + const is_ssl = @typeInfo(@TypeOf(app)).pointer.child.is_ssl; - // For this to work, the route handlers need to be augmented to use the comptime - // SSL parameter. It's worth considering removing the SSL boolean. - if (@TypeOf(app) == *uws.NewApp(true)) { - bun.todoPanic(@src(), "DevServer does not support SSL yet", .{}); - } - - app.get(client_prefix ++ "/:route", *DevServer, dev, onJsRequest); - app.get(asset_prefix ++ "/:asset", *DevServer, dev, onAssetRequest); - app.get(css_prefix ++ "/:asset", *DevServer, dev, onCssRequest); - app.get(internal_prefix ++ "/src/*", *DevServer, dev, onSrcRequest); + app.get(client_prefix ++ "/:route", *DevServer, dev, wrapGenericRequestHandler(onJsRequest, is_ssl)); + app.get(asset_prefix ++ "/:asset", *DevServer, dev, wrapGenericRequestHandler(onAssetRequest, is_ssl)); + app.get(internal_prefix ++ "/src/*", *DevServer, dev, wrapGenericRequestHandler(onSrcRequest, is_ssl)); app.ws( internal_prefix ++ "/hmr", dev, 0, - uws.WebSocketBehavior.Wrap(DevServer, HmrSocket, false).apply(.{}), + uws.WebSocketBehavior.Wrap(DevServer, HmrSocket, is_ssl).apply(.{}), ); - if (bun.FeatureFlags.bake_debugging_features) - app.get(internal_prefix ++ "/incremental_visualizer", *DevServer, dev, onIncrementalVisualizer); + if (bun.FeatureFlags.bake_debugging_features) { + app.get( + internal_prefix ++ "/incremental_visualizer", + *DevServer, + dev, + wrapGenericRequestHandler(onIncrementalVisualizer, is_ssl), + ); + } - app.any("/*", *DevServer, dev, onRequest); + // Only attach a catch-all handler if the framework has filesystem router + // types. Otherwise, this can just be Bun.serve's default handler. + if (dev.framework.file_system_router_types.len > 0) { + app.any("/*", *DevServer, dev, wrapGenericRequestHandler(onRequest, is_ssl)); + return true; + } else { + return false; + } } -pub fn deinit(dev: *DevServer) void { - // TODO: Currently deinit is not implemented, as it was assumed to be alive for - // the remainder of this process' lifespan. This isn't always true. - const allocator = dev.allocator; - if (dev.has_pre_crash_handler) - bun.crash_handler.removePreCrashHandler(dev); - allocator.destroy(dev); - // if (bun.Environment.isDebug) - // bun.todoPanic(@src(), "bake.DevServer.deinit()", .{}); -} +fn onJsRequest(dev: *DevServer, req: *Request, resp: AnyResponse) void { + const route_id = req.parameter(0); + if (!bun.strings.hasSuffixComptime(route_id, ".js")) + return req.setYield(true); + const min_len = "-00000000FFFFFFFF.js".len; + if (route_id.len < min_len) + return req.setYield(true); + const hex = route_id[route_id.len - min_len + 1 ..][0 .. @sizeOf(u64) * 2]; + if (hex.len != @sizeOf(u64) * 2) + return req.setYield(true); + const id = parseHexToInt(u64, hex) orelse + return req.setYield(true); -fn onJsRequest(dev: *DevServer, req: *Request, resp: *Response) void { - const maybe_route = route: { - const route_id = req.parameter(0); - if (!bun.strings.hasSuffixComptime(route_id, ".js")) - return req.setYield(true); - if (!bun.strings.hasPrefixComptime(route_id, "route.")) - return req.setYield(true); - const i = parseHexToInt(u64, route_id["route.".len .. route_id.len - ".js".len]) orelse - return req.setYield(true); - break :route dev.route_js_payloads.get(i) orelse - return req.setYield(true); - }; + const route_bundle_index: RouteBundle.Index = .init(@intCast(id & 0xFFFFFFFF)); + const generation: u32 = @intCast(id >> 32); - if (maybe_route.unwrap()) |route| { - dev.ensureRouteIsBundled(route, .js_payload, req, resp) catch bun.outOfMemory(); - } else { - @panic("TODO: generate client bundle with no source files"); - } -} + if (route_bundle_index.get() >= dev.route_bundles.items.len) + return req.setYield(true); -fn onAssetRequest(dev: *DevServer, req: *Request, resp: *Response) void { - _ = dev; - _ = req; - _ = resp; - bun.todoPanic(@src(), "serve asset file", .{}); - // const route_id = req.parameter(0); - // const asset = dev.assets.get(route_id) orelse - // return req.setYield(true); - // _ = asset; // autofix + const route_bundle = dev.route_bundles.items[route_bundle_index.get()]; + if (route_bundle.client_script_generation != generation or + route_bundle.server_state != .loaded) + { + bun.Output.debugWarn("TODO: Outdated JS Payload", .{}); + return req.setYield(true); + } + dev.onJsRequestWithBundle(route_bundle_index, resp, bun.http.Method.which(req.method()) orelse .POST); } -fn onCssRequest(dev: *DevServer, req: *Request, resp: *Response) void { +fn onAssetRequest(dev: *DevServer, req: *Request, resp: AnyResponse) void { const param = req.parameter(0); - if (!bun.strings.hasSuffixComptime(param, ".css")) - return req.setYield(true); - const hex = param[0 .. param.len - ".css".len]; - if (hex.len != @sizeOf(u64) * 2) + if (param.len < @sizeOf(u64) * 2) return req.setYield(true); - + const hex = param[0 .. @sizeOf(u64) * 2]; var out: [@sizeOf(u64)]u8 = undefined; assert((std.fmt.hexToBytes(&out, hex) catch return req.setYield(true)).len == @sizeOf(u64)); const hash: u64 = @bitCast(out); - - const css = dev.css_files.get(hash) orelse + debug.log("onAssetRequest {} {s}", .{ hash, param }); + const asset = dev.assets.get(hash) orelse return req.setYield(true); - - sendTextFile(css, MimeType.css.value, resp); + req.setYield(false); + asset.on(resp); } fn parseHexToInt(comptime T: type, slice: []const u8) ?T { @@ -583,11 +926,29 @@ fn parseHexToInt(comptime T: type, slice: []const u8) ?T { return @bitCast(out); } -fn onIncrementalVisualizer(_: *DevServer, _: *Request, resp: *Response) void { +inline fn wrapGenericRequestHandler( + comptime handler: anytype, + comptime is_ssl: bool, +) fn ( + dev: *DevServer, + req: *Request, + resp: *uws.NewApp(is_ssl).Response, +) void { + const fn_info = @typeInfo(@TypeOf(handler)).@"fn"; + assert(fn_info.params.len == 3); + const uses_any_response = if (fn_info.params[2].type) |t| t == AnyResponse else false; + return struct { + fn handle(dev: *DevServer, req: *Request, resp: *uws.NewApp(is_ssl).Response) void { + handler(dev, req, if (uses_any_response) AnyResponse.init(resp) else resp); + } + }.handle; +} + +fn onIncrementalVisualizer(_: *DevServer, _: *Request, resp: anytype) void { resp.corked(onIncrementalVisualizerCorked, .{resp}); } -fn onIncrementalVisualizerCorked(resp: *Response) void { +fn onIncrementalVisualizerCorked(resp: anytype) void { const code = if (Environment.codegen_embed) @embedFile("incremental_visualizer.html") else @@ -598,159 +959,210 @@ fn onIncrementalVisualizerCorked(resp: *Response) void { fn ensureRouteIsBundled( dev: *DevServer, - route_index: Route.Index, - kind: DeferredRequest.Data.Tag, + route_bundle_index: RouteBundle.Index, + kind: DeferredRequest.Handler.Kind, req: *Request, - resp: *Response, + resp: AnyResponse, ) bun.OOM!void { - const route_bundle_index = try dev.getOrPutRouteBundle(route_index); - - // TODO: Zig 0.14 gets labelled continue: - // - Remove the `while` - // - Move the code after this switch into `.loaded =>` - // - Replace `break` with `continue :sw .loaded` - // - Replace `continue` with `continue :sw ` - while (true) { - switch (dev.routeBundlePtr(route_bundle_index).server_state) { - .unqueued => { - try dev.next_bundle.requests.ensureUnusedCapacity(dev.allocator, 1); - if (dev.current_bundle != null) { - try dev.next_bundle.route_queue.ensureUnusedCapacity(dev.allocator, 1); - } - - const deferred: DeferredRequest = .{ - .route_bundle_index = route_bundle_index, - .data = switch (kind) { - .js_payload => .{ .js_payload = resp }, - .server_handler => .{ - .server_handler = (dev.server.?.DebugHTTPServer.prepareJsRequestContext(req, resp, null) orelse return) - .save(dev.vm.global, req, resp), - }, + assert(dev.server != null); + sw: switch (dev.routeBundlePtr(route_bundle_index).server_state) { + .unqueued => { + if (dev.current_bundle != null) { + try dev.next_bundle.route_queue.put(dev.allocator, route_bundle_index, {}); + dev.routeBundlePtr(route_bundle_index).server_state = .bundling; + try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp); + } else { + // If plugins are not yet loaded, prepare them. + // In the case plugins are set to &.{}, this will not hit `.pending`. + plugin: switch (dev.plugin_state) { + .unknown => if (dev.bundler_options.plugin != null) { + // Framework-provided plugin is likely going to be phased out later + dev.plugin_state = .loaded; + } else { + // TODO: implement a proper solution here + dev.has_tailwind_plugin_hack = if (dev.vm.transpiler.options.serve_plugins) |serve_plugins| + for (serve_plugins) |plugin| { + if (bun.strings.includes(plugin, "tailwind")) break .empty; + } else null + else + null; + + switch (dev.server.?.getOrLoadPlugins(.{ .dev_server = dev })) { + .pending => { + dev.plugin_state = .pending; + continue :plugin .pending; + }, + .err => { + dev.plugin_state = .err; + continue :plugin .err; + }, + .ready => |ready| { + dev.plugin_state = .loaded; + dev.bundler_options.plugin = ready; + }, + } }, - }; - errdefer @compileError("cannot error since the request is already stored"); - - dev.next_bundle.requests.appendAssumeCapacity(deferred); - if (dev.current_bundle != null) { - dev.next_bundle.route_queue.putAssumeCapacity(route_bundle_index, {}); - } else { - var sfa = std.heap.stackFallback(4096, dev.allocator); - const temp_alloc = sfa.get(); - - var entry_points: EntryPointList = EntryPointList.empty; - defer entry_points.deinit(temp_alloc); - - dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, route_index) catch bun.outOfMemory(); + .pending => { + try dev.next_bundle.route_queue.put(dev.allocator, route_bundle_index, {}); + dev.routeBundlePtr(route_bundle_index).server_state = .bundling; + try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp); + return; + }, + .err => { + // TODO: render plugin error page + resp.endWithoutBody(true); + return; + }, + .loaded => {}, + } - if (entry_points.set.count() == 0) { - if (dev.bundling_failures.count() > 0) { - dev.routeBundlePtr(route_bundle_index).server_state = .possible_bundling_failures; - } else { - dev.routeBundlePtr(route_bundle_index).server_state = .loaded; - } - continue; + // Prepare a bundle with just this route. + var sfa = std.heap.stackFallback(4096, dev.allocator); + const temp_alloc = sfa.get(); + + var entry_points: EntryPointList = .empty; + defer entry_points.deinit(temp_alloc); + try dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, route_bundle_index); + + // If all files were already bundled (possible with layouts), + // then no entry points will be queued up here. That does + // not mean the route is ready for presentation. + if (entry_points.set.count() == 0) { + if (dev.bundling_failures.count() > 0) { + dev.routeBundlePtr(route_bundle_index).server_state = .possible_bundling_failures; + continue :sw .possible_bundling_failures; + } else { + dev.routeBundlePtr(route_bundle_index).server_state = .loaded; + continue :sw .loaded; } + } - dev.startAsyncBundle( - entry_points, - false, - std.time.Timer.start() catch @panic("timers unsupported"), - ) catch |err| { - if (dev.log.hasAny()) { - dev.log.print(Output.errorWriterBuffered()) catch {}; - Output.flush(); - } - Output.panic("Fatal error while initializing bundle job: {}", .{err}); - }; + try dev.deferRequest(&dev.next_bundle.requests, route_bundle_index, kind, req, resp); - dev.routeBundlePtr(route_bundle_index).server_state = .bundling; - } - return; - }, - .bundling => { - bun.assert(dev.current_bundle != null); - try dev.current_bundle_requests.ensureUnusedCapacity(dev.allocator, 1); - - const deferred: DeferredRequest = .{ - .route_bundle_index = route_bundle_index, - .data = switch (kind) { - .js_payload => .{ .js_payload = resp }, - .server_handler => .{ - .server_handler = (dev.server.?.DebugHTTPServer.prepareJsRequestContext(req, resp, null) orelse return) - .save(dev.vm.global, req, resp), - }, - }, - }; + dev.startAsyncBundle( + entry_points, + false, + std.time.Timer.start() catch @panic("timers unsupported"), + ) catch bun.outOfMemory(); + } - dev.current_bundle_requests.appendAssumeCapacity(deferred); - return; - }, - .possible_bundling_failures => { - // TODO: perform a graph trace to find just the errors that are needed - if (dev.bundling_failures.count() > 0) { - resp.corked(sendSerializedFailures, .{ - dev, - resp, - dev.bundling_failures.keys(), - .bundler, - }); - return; - } else { - dev.routeBundlePtr(route_bundle_index).server_state = .loaded; - break; + dev.routeBundlePtr(route_bundle_index).server_state = .bundling; + }, + .bundling => { + bun.assert(dev.current_bundle != null); + try dev.deferRequest(&dev.current_bundle.?.requests, route_bundle_index, kind, req, resp); + }, + .possible_bundling_failures => { + if (dev.bundling_failures.count() > 0) { + // Trace the graph to see if there are any failures that are + // reachable by this route. + switch (try checkRouteFailures(dev, route_bundle_index, resp)) { + .stop => return, + .ok => {}, // Errors were cleared or not in the way. } - }, - .evaluation_failure => { - resp.corked(sendSerializedFailures, .{ - dev, - resp, - (&(dev.routeBundlePtr(route_bundle_index).evaluate_failure orelse @panic("missing error")))[0..1], - .evaluation, - }); - return; - }, - .loaded => break, - } + } - // this error is here to make sure there are no accidental loop exits - @compileError("all branches above should `return`, `break` or `continue`"); + dev.routeBundlePtr(route_bundle_index).server_state = .loaded; + continue :sw .loaded; + }, + .evaluation_failure => { + resp.corked(sendSerializedFailures, .{ + dev, + resp, + (&(dev.routeBundlePtr(route_bundle_index).data.framework.evaluate_failure.?))[0..1], + .evaluation, + }); + }, + .loaded => switch (kind) { + .server_handler => dev.onFrameworkRequestWithBundle(route_bundle_index, .{ .stack = req }, resp), + .bundled_html_page => dev.onHtmlRequestWithBundle(route_bundle_index, resp, bun.http.Method.which(req.method()) orelse .POST), + }, } +} + +fn deferRequest( + dev: *DevServer, + requests_array: *DeferredRequest.List, + route_bundle_index: RouteBundle.Index, + kind: DeferredRequest.Handler.Kind, + req: *Request, + resp: AnyResponse, +) !void { + const deferred = dev.deferred_request_pool.get(); + deferred.data = .{ + .route_bundle_index = route_bundle_index, + .handler = switch (kind) { + .bundled_html_page => .{ .bundled_html_page = .{ .response = resp, .method = bun.http.Method.which(req.method()) orelse .POST } }, + .server_handler => .{ + .server_handler = dev.server.?.prepareAndSaveJsRequestContext(req, resp, dev.vm.global) orelse return, + }, + }, + }; + resp.onAborted(*DeferredRequest, DeferredRequest.onAbort, &deferred.data); + requests_array.prepend(deferred); +} - switch (kind) { - .server_handler => dev.onRequestWithBundle(route_bundle_index, .{ .stack = req }, resp), - .js_payload => dev.onJsRequestWithBundle(route_bundle_index, resp), +fn checkRouteFailures(dev: *DevServer, route_bundle_index: RouteBundle.Index, resp: anytype) !enum { stop, ok } { + var sfa_state = std.heap.stackFallback(65536, dev.allocator); + const sfa = sfa_state.get(); + var gts = try dev.initGraphTraceState(sfa); + defer gts.deinit(sfa); + defer dev.incremental_result.failures_added.clearRetainingCapacity(); + dev.graph_safety_lock.lock(); + defer dev.graph_safety_lock.unlock(); + try dev.traceAllRouteImports(dev.routeBundlePtr(route_bundle_index), >s, .find_errors); + if (dev.incremental_result.failures_added.items.len > 0) { + resp.corked(sendSerializedFailures, .{ + dev, + resp, + dev.incremental_result.failures_added.items, + .bundler, + }); + return .stop; + } else { + // Failures are unreachable by this route, so it is OK to load. + return .ok; } } -fn appendRouteEntryPointsIfNotStale(dev: *DevServer, entry_points: *EntryPointList, alloc: Allocator, route_index: Route.Index) bun.OOM!void { +fn appendRouteEntryPointsIfNotStale(dev: *DevServer, entry_points: *EntryPointList, alloc: Allocator, rbi: RouteBundle.Index) bun.OOM!void { const server_file_names = dev.server_graph.bundled_files.keys(); const client_file_names = dev.client_graph.bundled_files.keys(); // Build a list of all files that have not yet been bundled. - var route = dev.router.routePtr(route_index); - const router_type = dev.router.typePtr(route.type); - try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, router_type.server_file); - try dev.appendOpaqueEntryPoint(client_file_names, entry_points, alloc, .client, router_type.client_file); - try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_page); - try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout); - while (route.parent.unwrap()) |parent_index| { - route = dev.router.routePtr(parent_index); - try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout); + switch (dev.routeBundlePtr(rbi).data) { + .framework => |*bundle| { + var route = dev.router.routePtr(bundle.route_index); + const router_type = dev.router.typePtr(route.type); + try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, router_type.server_file); + try dev.appendOpaqueEntryPoint(client_file_names, entry_points, alloc, .client, router_type.client_file); + try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_page); + try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout); + while (route.parent.unwrap()) |parent_index| { + route = dev.router.routePtr(parent_index); + try dev.appendOpaqueEntryPoint(server_file_names, entry_points, alloc, .server, route.file_layout); + } + }, + .html => |*html| { + try entry_points.append(alloc, html.html_bundle.html_bundle.path, .{ .client = true }); + }, } } -fn onRequestWithBundle( +fn onFrameworkRequestWithBundle( dev: *DevServer, route_bundle_index: RouteBundle.Index, req: bun.JSC.API.SavedRequest.Union, - resp: *Response, + resp: AnyResponse, ) void { - const server_request_callback = dev.server_fetch_function_callback.get() orelse - unreachable; // did not bundle - const route_bundle = dev.routeBundlePtr(route_bundle_index); + assert(route_bundle.data == .framework); + const bundle = &route_bundle.data.framework; + + const server_request_callback = dev.server_fetch_function_callback.get() orelse + unreachable; // did not initialize server code - const router_type = dev.router.typePtr(dev.router.routePtr(route_bundle.route).type); + const router_type = dev.router.typePtr(dev.router.routePtr(bundle.route_index).type); dev.server.?.onRequestFromSaved( req, @@ -766,17 +1178,17 @@ fn onRequestWithBundle( break :str str; }, // routeModules - route_bundle.cached_module_list.get() orelse arr: { + bundle.cached_module_list.get() orelse arr: { const global = dev.vm.global; const keys = dev.server_graph.bundled_files.keys(); var n: usize = 1; - var route = dev.router.routePtr(route_bundle.route); + var route = dev.router.routePtr(bundle.route_index); while (true) { if (route.file_layout != .none) n += 1; route = dev.router.routePtr(route.parent.unwrap() orelse break); } const arr = JSValue.createEmptyArray(global, n); - route = dev.router.routePtr(route_bundle.route); + route = dev.router.routePtr(bundle.route_index); var route_name = bun.String.createUTF8(dev.relativePath(keys[fromOpaqueFileId(.server, route.file_page.unwrap().?).get()])); arr.putIndex(global, 0, route_name.transferToJS(global)); n = 1; @@ -788,85 +1200,276 @@ fn onRequestWithBundle( } route = dev.router.routePtr(route.parent.unwrap() orelse break); } - route_bundle.cached_module_list = JSC.Strong.create(arr, global); + bundle.cached_module_list = JSC.Strong.create(arr, global); break :arr arr; }, // clientId - route_bundle.cached_client_bundle_url.get() orelse str: { - const id, const route_index: Route.Index.Optional = if (router_type.client_file != .none) - .{ std.crypto.random.int(u64), route_bundle.route.toOptional() } - else - // When there is no framework-provided client code, generate - // a JS file so that the hot-reloading code can reload the - // page on server-side changes and show errors in-browser. - .{ 0, .none }; - dev.route_js_payloads.put(dev.allocator, id, route_index) catch bun.outOfMemory(); - const str = bun.String.createFormat(client_prefix ++ "/route.{}.js", .{std.fmt.fmtSliceHexLower(std.mem.asBytes(&id))}) catch bun.outOfMemory(); + bundle.cached_client_bundle_url.get() orelse str: { + const bundle_index: u32 = route_bundle_index.get(); + const generation: u32 = route_bundle.client_script_generation; + const str = bun.String.createFormat(client_prefix ++ "/route-{}{}.js", .{ + std.fmt.fmtSliceHexLower(std.mem.asBytes(&bundle_index)), + std.fmt.fmtSliceHexLower(std.mem.asBytes(&generation)), + }) catch bun.outOfMemory(); defer str.deref(); const js = str.toJS(dev.vm.global); - route_bundle.cached_client_bundle_url = JSC.Strong.create(js, dev.vm.global); + bundle.cached_client_bundle_url = JSC.Strong.create(js, dev.vm.global); break :str js; }, // styles - route_bundle.cached_css_file_array.get() orelse arr: { + bundle.cached_css_file_array.get() orelse arr: { const js = dev.generateCssJSArray(route_bundle) catch bun.outOfMemory(); - route_bundle.cached_css_file_array = JSC.Strong.create(js, dev.vm.global); + bundle.cached_css_file_array = JSC.Strong.create(js, dev.vm.global); break :arr js; }, }, ); } -pub fn onJsRequestWithBundle(dev: *DevServer, bundle_index: RouteBundle.Index, resp: *Response) void { +fn onHtmlRequestWithBundle(dev: *DevServer, route_bundle_index: RouteBundle.Index, resp: AnyResponse, method: bun.http.Method) void { + const route_bundle = dev.routeBundlePtr(route_bundle_index); + assert(route_bundle.data == .html); + const html = &route_bundle.data.html; + const blob = html.cached_response orelse generate: { + const payload = generateHTMLPayload(dev, route_bundle_index, route_bundle, html) catch bun.outOfMemory(); + errdefer dev.allocator.free(payload); + html.cached_response = StaticRoute.initFromAnyBlob( + .fromOwnedSlice(dev.allocator, payload), + .{ + .mime_type = .html, + .server = dev.server orelse unreachable, + }, + ); + break :generate html.cached_response.?; + }; + blob.onWithMethod(method, resp); +} + +fn generateHTMLPayload(dev: *DevServer, route_bundle_index: RouteBundle.Index, route_bundle: *RouteBundle, html: *RouteBundle.HTML) bun.OOM![]u8 { + assert(route_bundle.server_state == .loaded); // if not loaded, following values wont be initialized + assert(html.html_bundle.dev_server_id.unwrap() == route_bundle_index); + assert(html.cached_response == null); + const head_end_tag_index = (html.head_end_tag_index.unwrap() orelse unreachable).get(); + const body_end_tag_index = (html.body_end_tag_index.unwrap() orelse unreachable).get(); + const bundled_html = html.bundled_html_text orelse unreachable; + + // The bundler records two offsets in development mode, splitting the HTML + // file into three chunks. DevServer is able to insert style/script tags + // using the information available in IncrementalGraph. This approach + // allows downstream files to update without re-bundling the HTML file. + // + // + // + // + // Single Page Web App + // {head_end_tag_index} + // + //
+ // {body_end_tag_index} + // + const before_head_end = bundled_html[0..head_end_tag_index]; + const before_body_end = bundled_html[head_end_tag_index..body_end_tag_index]; + const after_body_end = bundled_html[body_end_tag_index..]; + + var display_name = bun.strings.withoutSuffixComptime(bun.path.basename(html.html_bundle.html_bundle.path), ".html"); + // TODO: function for URL safe chars + if (!bun.strings.isAllASCII(display_name)) display_name = "page"; + + dev.graph_safety_lock.lock(); + defer dev.graph_safety_lock.unlock(); + + // Prepare bitsets for tracing + var sfa_state = std.heap.stackFallback(65536, dev.allocator); + const sfa = sfa_state.get(); + var gts = try dev.initGraphTraceState(sfa); + defer gts.deinit(sfa); + // Run tracing + dev.client_graph.reset(); + try dev.traceAllRouteImports(route_bundle, >s, .find_css); + + const css_ids = dev.client_graph.current_css_files.items; + + const payload_size = bundled_html.len + + ("").len * css_ids.len + + "".len + + client_prefix.len + "/".len + + display_name.len + + "-0000000000000000.js".len; + + var array: std.ArrayListUnmanaged(u8) = try std.ArrayListUnmanaged(u8).initCapacity(dev.allocator, payload_size); + errdefer array.deinit(dev.allocator); + array.appendSliceAssumeCapacity(before_head_end); + // Insert all link tags before "" + for (css_ids) |name| { + array.appendSliceAssumeCapacity(""); + } + array.appendSliceAssumeCapacity(before_body_end); + // Insert the client script tag before "" + array.appendSliceAssumeCapacity(""); + array.appendSliceAssumeCapacity(after_body_end); + assert(array.items.len == array.capacity); // incorrect memory allocation size + return array.items; +} + +fn getJavaScriptCodeForHTMLFile( + dev: *DevServer, + index: bun.JSAst.Index, + import_records: []bun.BabyList(bun.ImportRecord), + input_file_sources: []bun.logger.Source, + loaders: []bun.options.Loader, +) bun.OOM![]const u8 { + var sfa_state = std.heap.stackFallback(65536, dev.allocator); + const sfa = sfa_state.get(); + var array: std.ArrayListUnmanaged(u8) = std.ArrayListUnmanaged(u8).initCapacity(sfa, 65536) catch bun.outOfMemory(); + defer array.deinit(sfa); + const w = array.writer(sfa); + + try w.writeAll(" "); + try bun.js_printer.writeJSONString(input_file_sources[index.get()].path.pretty, @TypeOf(w), w, .utf8); + try w.writeAll("(m) {\n "); + for (import_records[index.get()].slice()) |import| { + if (import.source_index.isValid() and loaders[import.source_index.get()] == .css) continue; + try w.writeAll(" m.dynamicImport("); + try bun.js_printer.writeJSONString(import.path.pretty, @TypeOf(w), w, .utf8); + try w.writeAll(");\n "); + } + try w.writeAll("},\n"); + + // Avoid-recloning if it is was moved to the hap + return if (array.items.ptr == &sfa_state.buffer) + try bun.default_allocator.dupe(u8, array.items) + else + array.items; +} + +pub fn onJsRequestWithBundle(dev: *DevServer, bundle_index: RouteBundle.Index, resp: AnyResponse, method: bun.http.Method) void { const route_bundle = dev.routeBundlePtr(bundle_index); - const code = route_bundle.client_bundle orelse code: { - const code = dev.generateClientBundle(route_bundle) catch bun.outOfMemory(); - route_bundle.client_bundle = code; - break :code code; + const blob = route_bundle.client_bundle orelse generate: { + const payload = dev.generateClientBundle(route_bundle) catch bun.outOfMemory(); + errdefer dev.allocator.free(payload); + route_bundle.client_bundle = StaticRoute.initFromAnyBlob( + .fromOwnedSlice(dev.allocator, payload), + .{ + .mime_type = .javascript, + .server = dev.server orelse unreachable, + }, + ); + break :generate route_bundle.client_bundle.?; }; - sendTextFile(code, MimeType.javascript.value, resp); + blob.onWithMethod(method, resp); } -pub fn onSrcRequest(dev: *DevServer, req: *uws.Request, resp: *App.Response) void { +pub fn onSrcRequest(dev: *DevServer, req: *uws.Request, resp: anytype) void { if (req.header("open-in-editor") == null) { resp.writeStatus("501 Not Implemented"); resp.end("Viewing source without opening in editor is not implemented yet!", false); return; } - const ctx = &dev.vm.rareData().editor_context; - ctx.autoDetectEditor(JSC.VirtualMachine.get().transpiler.env); - const line: ?[]const u8 = req.header("editor-line"); - const column: ?[]const u8 = req.header("editor-column"); + // TODO: better editor detection. on chloe's dev env, this opens apple terminal + vim + resp.writeStatus("501 Not Implemented"); + resp.end("TODO", false); + _ = dev; - if (ctx.editor) |editor| { - var url = req.url()[internal_prefix.len + "/src/".len ..]; - if (bun.strings.indexOfChar(url, ':')) |colon| { - url = url[0..colon]; - } - editor.open(ctx.path, url, line, column, dev.allocator) catch { - resp.writeStatus("202 No Content"); - resp.end("", false); - return; - }; - resp.writeStatus("202 No Content"); - resp.end("", false); - } else { - resp.writeStatus("500 Internal Server Error"); - resp.end("Please set your editor in bunfig.toml", false); - } + // const ctx = &dev.vm.rareData().editor_context; + // ctx.autoDetectEditor(JSC.VirtualMachine.get().transpiler.env); + // const line: ?[]const u8 = req.header("editor-line"); + // const column: ?[]const u8 = req.header("editor-column"); + + // if (ctx.editor) |editor| { + // var url = req.url()[internal_prefix.len + "/src/".len ..]; + // if (bun.strings.indexOfChar(url, ':')) |colon| { + // url = url[0..colon]; + // } + // editor.open(ctx.path, url, line, column, dev.allocator) catch { + // resp.writeStatus("202 No Content"); + // resp.end("", false); + // return; + // }; + // resp.writeStatus("202 No Content"); + // resp.end("", false); + // } else { + // resp.writeStatus("500 Internal Server Error"); + // resp.end("Please set your editor in bunfig.toml", false); + // } } +/// When requests are waiting on a bundle, the relevant request information is +/// prepared and stored in a linked list. const DeferredRequest = struct { + /// A small maximum is set because development servers are unlikely to + /// acquire much load, so allocating a ton at the start for no reason + /// is very silly. This contributes to ~6kb of the initial DevServer allocation. + const max_preallocated = 16; + + pub const List = std.SinglyLinkedList(DeferredRequest); + pub const Node = List.Node; + route_bundle_index: RouteBundle.Index, - data: Data, + handler: Handler, - const Data = union(enum) { + const Handler = union(enum) { + /// For a .framework route. This says to call and render the page. server_handler: bun.JSC.API.SavedRequest, - js_payload: *Response, - - const Tag = @typeInfo(Data).@"union".tag_type.?; + /// For a .html route. Serve the bundled HTML page. + bundled_html_page: ResponseAndMethod, + /// Do nothing and free this node. To simplify lifetimes, + /// the `DeferredRequest` is not freed upon abortion. Which + /// is okay since most requests do not abort. + aborted, + + /// Does not include `aborted` because branching on that value + /// has no meaningful purpose, so it is excluded. + const Kind = enum { + server_handler, + bundled_html_page, + }; }; + + fn onAbort(this: *DeferredRequest, resp: AnyResponse) void { + _ = resp; + this.abort(); + assert(this.handler == .aborted); + } + + /// Calling this is only required if the desired handler is going to be avoided, + /// such as for bundling failures or aborting the server. + /// Does not free the underlying `DeferredRequest.Node` + fn deinit(this: *DeferredRequest) void { + switch (this.handler) { + .server_handler => |*saved| saved.deinit(), + .bundled_html_page, .aborted => {}, + } + } + + /// Deinitializes state by aborting the connection. + fn abort(this: *DeferredRequest) void { + switch (this.handler) { + .server_handler => |*saved| { + saved.response.endWithoutBody(true); + saved.deinit(); + }, + .bundled_html_page => |r| { + r.response.endWithoutBody(true); + }, + .aborted => return, + } + this.handler = .aborted; + } +}; + +const ResponseAndMethod = struct { + response: AnyResponse, + method: bun.http.Method, }; fn startAsyncBundle( @@ -881,6 +1484,9 @@ fn startAsyncBundle( dev.incremental_result.reset(); + // Ref server to keep it from closing. + if (dev.server) |server| server.onPendingRequest(); + var heap = try ThreadlocalArena.init(); errdefer heap.deinit(); const allocator = heap.allocator(); @@ -889,24 +1495,17 @@ fn startAsyncBundle( ast_memory_allocator.reset(); ast_memory_allocator.push(); - if (dev.framework.server_components == null) { - // The handling of the dependency graphs are SLIGHTLY different when - // server components are disabled. It's subtle, but enough that it - // would be incorrect to even try to run a build. - bun.todoPanic(@src(), "support non-server components build", .{}); - } - const bv2 = try BundleV2.init( - &dev.server_bundler, - if (dev.framework.server_components != null) .{ + &dev.server_transpiler, + .{ .framework = dev.framework, - .client_bundler = &dev.client_bundler, - .ssr_bundler = &dev.ssr_bundler, + .client_transpiler = &dev.client_transpiler, + .ssr_transpiler = &dev.ssr_transpiler, .plugins = dev.bundler_options.plugin, - } else @panic("TODO: support non-server components"), + }, allocator, .{ .js = dev.vm.eventLoop() }, - false, // reloading is handled separately + false, // watching is handled separately JSC.WorkPool.get(), heap, ); @@ -928,14 +1527,38 @@ fn startAsyncBundle( .timer = timer, .start_data = start_data, .had_reload_event = had_reload_event, + .requests = dev.next_bundle.requests, + .resolution_failure_entries = .{}, }; - const old_current_requests = dev.current_bundle_requests; - bun.assert(old_current_requests.items.len == 0); - dev.current_bundle_requests = dev.next_bundle.requests; - dev.next_bundle.requests = old_current_requests; + dev.next_bundle.requests = .{}; } fn indexFailures(dev: *DevServer) !void { + // Since resolution failures can be asynchronous, their logs are not inserted + // until the very end. + const resolution_failures = dev.current_bundle.?.resolution_failure_entries; + if (resolution_failures.count() > 0) { + for (resolution_failures.keys(), resolution_failures.values()) |owner, *log| { + if (log.hasErrors()) { + switch (owner.decode()) { + .client => |index| try dev.client_graph.insertFailure(.index, index, log, false), + .server => |index| try dev.server_graph.insertFailure(.index, index, log, true), + .none, .route => unreachable, + } + } + } + } + + // Theoretically, it shouldn't be possible for errors to leak into dev.log, but just in + // case that happens, they can be printed out. + if (dev.log.hasErrors()) { + if (Environment.isDebug) { + Output.debugWarn("dev.log should not be written into when using DevServer", .{}); + } + dev.log.print(Output.errorWriter()) catch {}; + } + + // After inserting failures into the IncrementalGraphs, they are traced to their routes. var sfa_state = std.heap.stackFallback(65536, dev.allocator); const sfa = sfa_state.get(); @@ -973,7 +1596,7 @@ fn indexFailures(dev: *DevServer) !void { } } - for (dev.incremental_result.routes_affected.items) |entry| { + for (dev.incremental_result.framework_routes_affected.items) |entry| { if (dev.router.routePtr(entry.route_index).bundle.unwrap()) |index| { dev.routeBundlePtr(index).server_state = .possible_bundling_failures; } @@ -981,6 +1604,10 @@ fn indexFailures(dev: *DevServer) !void { dev.markAllRouteChildrenFailed(entry.route_index); } + for (dev.incremental_result.html_routes_affected.items) |index| { + dev.routeBundlePtr(index).server_state = .possible_bundling_failures; + } + dev.publish(.errors, payload.items, .binary); } else if (dev.incremental_result.failures_removed.items.len > 0) { var payload = try std.ArrayList(u8).initCapacity(sfa, @sizeOf(MessageId) + @sizeOf(u32) + dev.incremental_result.failures_removed.items.len * @sizeOf(u32)); @@ -1003,7 +1630,7 @@ fn indexFailures(dev: *DevServer) !void { /// Used to generate the entry point. Unlike incremental patches, this always /// contains all needed files for a route. -fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]const u8 { +fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]u8 { assert(route_bundle.client_bundle == null); assert(route_bundle.server_state == .loaded); // page is unfit to load @@ -1018,19 +1645,65 @@ fn generateClientBundle(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM![]c // Run tracing dev.client_graph.reset(); - try dev.traceAllRouteImports(route_bundle, >s, .{ .find_client_modules = true }); + try dev.traceAllRouteImports(route_bundle, >s, .find_client_modules); + + var react_fast_refresh_id: []const u8 = ""; + if (dev.framework.react_fast_refresh) |rfr| brk: { + const rfr_index = dev.client_graph.getFileIndex(rfr.import_source) orelse + break :brk; + if (!dev.client_graph.stale_files.isSet(rfr_index.get())) { + try dev.client_graph.traceImports(rfr_index, >s, .find_client_modules); + react_fast_refresh_id = dev.relativePath(rfr.import_source); + } + } + + const client_file: ?IncrementalGraph(.client).FileIndex = switch (route_bundle.data) { + .framework => |fw| if (dev.router.typePtr(dev.router.routePtr(fw.route_index).type).client_file.unwrap()) |ofi| + fromOpaqueFileId(.client, ofi) + else + null, + .html => |html| html.bundled_file, + }; - const client_file = dev.router.typePtr(dev.router.routePtr(route_bundle.route).type).client_file.unwrap() orelse - @panic("No client side entrypoint in client bundle"); + const hash = hash: { + var source_map_hash: bun.bundle_v2.ContentHasher.Hash = .init(0x4b10); // arbitrarily different seed than what .initial_response uses + const keys = dev.client_graph.bundled_files.keys(); + for (dev.client_graph.current_chunk_parts.items) |part| { + source_map_hash.update(keys[part.get()]); + source_map_hash.update(dev.client_graph.source_maps.items[part.get()].vlq_chunk.slice()); + } + break :hash source_map_hash.final(); + }; + // Insert the source map + if (try dev.assets.putOrIncrementRefCount(hash, 1)) |static_route_ptr| { + // TODO: this asset is never unreferenced + const source_map = try dev.client_graph.takeSourceMap(.initial_response, sfa, dev.allocator); + errdefer dev.allocator.free(source_map); + static_route_ptr.* = StaticRoute.initFromAnyBlob(.fromOwnedSlice(dev.allocator, source_map), .{ + .server = dev.server.?, + .mime_type = .json, + }); + } - return dev.client_graph.takeBundle( - .initial_response, - dev.relativePath(dev.client_graph.bundled_files.keys()[fromOpaqueFileId(.client, client_file).get()]), - ); + const client_bundle = dev.client_graph.takeJSBundle(.{ + .kind = .initial_response, + .initial_response_entry_point = if (client_file) |index| + dev.relativePath(dev.client_graph.bundled_files.keys()[index.get()]) + else + "", + .react_refresh_entry_point = react_fast_refresh_id, + .source_map_id = hash, + }); + + const source_map = try dev.client_graph.takeSourceMap(.initial_response, sfa, dev.allocator); + dev.allocator.free(source_map); + + return client_bundle; } fn generateCssJSArray(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM!JSC.JSValue { - if (Environment.allow_assert) assert(!route_bundle.cached_css_file_array.has()); + assert(route_bundle.data == .framework); // a JSC.JSValue has no purpose, and therefore isn't implemented. + if (Environment.allow_assert) assert(!route_bundle.data.framework.cached_css_file_array.has()); assert(route_bundle.server_state == .loaded); // page is unfit to load dev.graph_safety_lock.lock(); @@ -1045,39 +1718,50 @@ fn generateCssJSArray(dev: *DevServer, route_bundle: *RouteBundle) bun.OOM!JSC.J // Run tracing dev.client_graph.reset(); - try dev.traceAllRouteImports(route_bundle, >s, .{ .find_css = true }); + try dev.traceAllRouteImports(route_bundle, >s, .find_css); const names = dev.client_graph.current_css_files.items; const arr = JSC.JSArray.createEmpty(dev.vm.global, names.len); for (names, 0..) |item, i| { - const str = bun.String.createUTF8(item); + var buf: [asset_prefix.len + @sizeOf(u64) * 2 + "/.css".len]u8 = undefined; + const path = std.fmt.bufPrint(&buf, asset_prefix ++ "/{s}.css", .{ + &std.fmt.bytesToHex(std.mem.asBytes(&item), .lower), + }) catch unreachable; + const str = bun.String.createUTF8(path); defer str.deref(); arr.putIndex(dev.vm.global, @intCast(i), str.toJS(dev.vm.global)); } return arr; } -fn traceAllRouteImports(dev: *DevServer, route_bundle: *RouteBundle, gts: *GraphTraceState, goal: TraceImportGoal) !void { - var route = dev.router.routePtr(route_bundle.route); - const router_type = dev.router.typePtr(route.type); +fn traceAllRouteImports(dev: *DevServer, route_bundle: *RouteBundle, gts: *GraphTraceState, comptime goal: TraceImportGoal) !void { + switch (route_bundle.data) { + .framework => |fw| { + var route = dev.router.routePtr(fw.route_index); + const router_type = dev.router.typePtr(route.type); - // Both framework entry points are considered - try dev.server_graph.traceImports(fromOpaqueFileId(.server, router_type.server_file), gts, .{ .find_css = true }); - if (router_type.client_file.unwrap()) |id| { - try dev.client_graph.traceImports(fromOpaqueFileId(.client, id), gts, goal); - } + // Both framework entry points are considered + try dev.server_graph.traceImports(fromOpaqueFileId(.server, router_type.server_file), gts, .find_css); + if (router_type.client_file.unwrap()) |id| { + try dev.client_graph.traceImports(fromOpaqueFileId(.client, id), gts, goal); + } - // The route file is considered - if (route.file_page.unwrap()) |id| { - try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal); - } + // The route file is considered + if (route.file_page.unwrap()) |id| { + try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal); + } - // For all parents, the layout is considered - while (true) { - if (route.file_layout.unwrap()) |id| { - try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal); - } - route = dev.router.routePtr(route.parent.unwrap() orelse break); + // For all parents, the layout is considered + while (true) { + if (route.file_layout.unwrap()) |id| { + try dev.server_graph.traceImports(fromOpaqueFileId(.server, id), gts, goal); + } + route = dev.router.routePtr(route.parent.unwrap() orelse break); + } + }, + .html => |html| { + try dev.client_graph.traceImports(html.bundled_file, gts, goal); + }, } } @@ -1138,10 +1822,33 @@ pub const HotUpdateContext = struct { pub fn finalizeBundle( dev: *DevServer, bv2: *bun.bundle_v2.BundleV2, - result: bun.bundle_v2.BakeBundleOutput, + result: bun.bundle_v2.DevServerOutput, ) bun.OOM!void { - defer dev.startNextBundleIfPresent(); + defer { + bv2.deinit(); + dev.current_bundle = null; + + dev.assets.reindexIfNeeded(dev.allocator) catch { + // not fatal: the assets may be reindexed some time later. + }; + + dev.startNextBundleIfPresent(); + + // Unref the ref added in `startAsyncBundle` + if (dev.server) |server| server.onStaticRequestComplete(); + } const current_bundle = &dev.current_bundle.?; + defer { + if (current_bundle.requests.first != null) { + // cannot be an assertion because in the case of error.OutOfMemory, the request list was not drained. + Output.debug("current_bundle.requests.first != null. this leaves pending requests without an error page!", .{}); + } + while (current_bundle.requests.popFirst()) |node| { + defer dev.deferred_request_pool.put(node); + const req = &node.data; + req.abort(); + } + } dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); @@ -1185,10 +1892,17 @@ pub fn finalizeBundle( js_chunk.compile_results_for_chunk, ) |part_range, compile_result| { const index = part_range.source_index; + const source_map: SourceMap.Chunk = compile_result.sourceMapChunk() orelse brk: { + // The source map is `null` if empty + bun.assert(compile_result.javascript.result == .result); + bun.assert(dev.server_transpiler.options.source_map != .none); + bun.assert(!part_range.source_index.isRuntime()); + break :brk .empty; + }; switch (targets[part_range.source_index.get()].bakeGraph()) { - .server => try dev.server_graph.receiveChunk(&ctx, index, compile_result.code(), .js, false), - .ssr => try dev.server_graph.receiveChunk(&ctx, index, compile_result.code(), .js, true), - .client => try dev.client_graph.receiveChunk(&ctx, index, compile_result.code(), .js, false), + .server => try dev.server_graph.receiveChunk(&ctx, index, .{ .js = compile_result.code() }, source_map, false), + .ssr => try dev.server_graph.receiveChunk(&ctx, index, .{ .js = compile_result.code() }, source_map, true), + .client => try dev.client_graph.receiveChunk(&ctx, index, .{ .js = compile_result.code() }, source_map, false), } } @@ -1203,16 +1917,41 @@ pub fn finalizeBundle( chunk, result.chunks, null, - false, // TODO: sourcemaps true + false, ); // Create an entry for this file. const key = ctx.sources[index.get()].path.keyForIncrementalGraph(); + // const hash = brk: { + // var hash: ContentHasher.Hash = .init(0x9a4e); // arbitrary seed + // hash.update(key); + // hash.update(code.buffer); + // break :brk hash.final(); + // }; + // TODO: use a hash mix with the first half being a path hash (to identify files) and + // the second half to be the content hash (to know if the file has changed) + const hash = bun.hash(key); + const asset_index = (try dev.assets.replacePath( + key, + .fromOwnedSlice(dev.allocator, code.buffer), + .css, + hash, + )).index; // Later code needs to retrieve the CSS content // The hack is to use `entry_point_id`, which is otherwise unused, to store an index. - chunk.entry_point.entry_point_id = try dev.insertOrUpdateCssAsset(key, code.buffer); + chunk.entry_point.entry_point_id = asset_index; + + // Track css files that look like tailwind files. + if (dev.has_tailwind_plugin_hack) |*map| { + const first_1024 = code.buffer[0..@min(code.buffer.len, 1024)]; + if (std.mem.indexOf(u8, first_1024, "tailwind") != null) { + try map.put(dev.allocator, key, {}); + } else { + _ = map.swapRemove(key); + } + } - try dev.client_graph.receiveChunk(&ctx, index, "", .css, false); + try dev.client_graph.receiveChunk(&ctx, index, .{ .css = hash }, null, false); // If imported on server, there needs to be a server-side file entry // so that edges can be attached. When a file is only imported on @@ -1226,6 +1965,44 @@ pub fn finalizeBundle( } } + for (result.htmlChunks()) |*chunk| { + const index = bun.JSAst.Index.init(chunk.entry_point.source_index); + const compile_result = chunk.compile_results_for_chunk[0].html; + const generated_js = try dev.getJavaScriptCodeForHTMLFile( + index, + import_records, + input_file_sources, + bv2.graph.input_files.items(.loader), + ); + try dev.client_graph.receiveChunk( + &ctx, + index, + .{ .js = generated_js }, + null, // HTML chunk does not have a source map. + false, + ); + const client_index = ctx.getCachedIndex(.client, index).*; + const route_bundle_index = dev.client_graph.htmlRouteBundleIndex(client_index); + const route_bundle = dev.routeBundlePtr(route_bundle_index); + assert(route_bundle.data.html.bundled_file == client_index); + const html = &route_bundle.data.html; + + if (html.cached_response) |blob| { + blob.deref(); + html.cached_response = null; + route_bundle.invalidateClientBundle(); + } + if (html.bundled_html_text) |slice| { + dev.allocator.free(slice); + } + html.bundled_html_text = compile_result.code; + + html.head_end_tag_index = .init(compile_result.offsets.head_end_tag); + html.body_end_tag_index = .init(compile_result.offsets.body_end_tag); + + chunk.entry_point.entry_point_id = @intCast(route_bundle_index.get()); + } + var gts = try dev.initGraphTraceState(bv2.graph.allocator); defer gts.deinit(bv2.graph.allocator); ctx.gts = >s; @@ -1242,9 +2019,14 @@ pub fn finalizeBundle( .client => try dev.client_graph.processChunkDependencies(&ctx, part_range.source_index, bv2.graph.allocator), } } + for (result.htmlChunks()) |*chunk| { + const index = bun.JSAst.Index.init(chunk.entry_point.source_index); + try dev.client_graph.processChunkDependencies(&ctx, index, bv2.graph.allocator); + } for (result.cssChunks(), result.css_file_list.values()) |*chunk, metadata| { const index = bun.JSAst.Index.init(chunk.entry_point.source_index); - // TODO: index css deps + // TODO: index css deps. this must add all recursively referenced files + // as dependencies of the entry point, instead of building a recursive tree. _ = index; _ = metadata; } @@ -1266,8 +2048,8 @@ pub fn finalizeBundle( } // Load all new chunks into the server runtime. - if (dev.server_graph.current_chunk_len > 0) { - const server_bundle = try dev.server_graph.takeBundle(.hmr_chunk, ""); + if (!dev.frontend_only and dev.server_graph.current_chunk_len > 0) { + const server_bundle = try dev.server_graph.takeJSBundle(.{ .kind = .hmr_chunk }); defer dev.allocator.free(server_bundle); const server_modules = c.BakeLoadServerHmrPatch(@ptrCast(dev.vm.global), bun.String.createLatin1(server_bundle)) catch |err| { @@ -1315,7 +2097,8 @@ pub fn finalizeBundle( // It was discovered that if a tree falls with nobody around it, it does not // make any sound. Let's avoid writing into `w` if no sockets are open. - const will_hear_hot_update = dev.numSubscribers(.hot_update) > 0; + const hot_update_subscribers = dev.numSubscribers(.hot_update); + const will_hear_hot_update = hot_update_subscribers > 0; // This list of routes affected excludes client code. This means changing // a client component wont count as a route to trigger a reload on. @@ -1326,20 +2109,25 @@ pub fn finalizeBundle( // clear for those) if (will_hear_hot_update and current_bundle.had_reload_event and - dev.incremental_result.routes_affected.items.len > 0 and + (dev.incremental_result.framework_routes_affected.items.len + + dev.incremental_result.html_routes_affected.items.len) > 0 and dev.bundling_failures.count() == 0) { has_route_bits_set = true; // A bit-set is used to avoid duplicate entries. This is not a problem - // with `dev.incremental_result.routes_affected` - for (dev.incremental_result.routes_affected.items) |request| { + // with `dev.incremental_result.framework_routes_affected` + for (dev.incremental_result.framework_routes_affected.items) |request| { const route = dev.router.routePtr(request.route_index); if (route.bundle.unwrap()) |id| route_bits.set(id.get()); if (request.should_recurse_when_visiting) { markAllRouteChildren(&dev.router, 1, .{&route_bits}, request.route_index); } } + for (dev.incremental_result.html_routes_affected.items) |route_bundle_index| { + route_bits.set(route_bundle_index.get()); + route_bits_client.set(route_bundle_index.get()); + } // List 1 var it = route_bits.iterator(.{ .kind = .set }); @@ -1358,7 +2146,7 @@ pub fn finalizeBundle( if (dev.incremental_result.client_components_affected.items.len > 0) { has_route_bits_set = true; - dev.incremental_result.routes_affected.clearRetainingCapacity(); + dev.incremental_result.framework_routes_affected.clearRetainingCapacity(); gts.clear(); for (dev.incremental_result.client_components_affected.items) |index| { @@ -1367,7 +2155,7 @@ pub fn finalizeBundle( // A bit-set is used to avoid duplicate entries. This is not a problem // with `dev.incremental_result.routes_affected` - for (dev.incremental_result.routes_affected.items) |request| { + for (dev.incremental_result.framework_routes_affected.items) |request| { const route = dev.router.routePtr(request.route_index); if (route.bundle.unwrap()) |id| { route_bits.set(id.get()); @@ -1382,10 +2170,19 @@ pub fn finalizeBundle( var it = route_bits_client.iterator(.{ .kind = .set }); while (it.next()) |bundled_route_index| { const bundle = &dev.route_bundles.items[bundled_route_index]; - if (bundle.client_bundle) |old| { - dev.allocator.free(old); - } - bundle.client_bundle = null; + bundle.invalidateClientBundle(); + } + } else if (dev.incremental_result.html_routes_affected.items.len > 0) { + // When only HTML routes were affected, there may not be any client + // components that got affected, but the bundles for these HTML routes + // are invalid now. That is why HTML routes above writes into + // `route_bits_client`. + + // Free old bundles + var it = route_bits_client.iterator(.{ .kind = .set }); + while (it.next()) |bundled_route_index| { + const bundle = &dev.route_bundles.items[bundled_route_index]; + bundle.invalidateClientBundle(); } } @@ -1398,29 +2195,30 @@ pub fn finalizeBundle( var it = route_bits.iterator(.{ .kind = .set }); // List 2 while (it.next()) |i| { - const bundle = dev.routeBundlePtr(RouteBundle.Index.init(@intCast(i))); + const route_bundle = dev.routeBundlePtr(RouteBundle.Index.init(@intCast(i))); if (dev.incremental_result.had_adjusted_edges) { - bundle.cached_css_file_array.clear(); + switch (route_bundle.data) { + .framework => |*fw_bundle| fw_bundle.cached_css_file_array.clear(), + .html => |*html| if (html.cached_response) |blob| { + blob.deref(); + html.cached_response = null; + }, + } } - if (bundle.active_viewers == 0 or !will_hear_hot_update) continue; + if (route_bundle.active_viewers == 0 or !will_hear_hot_update) continue; try w.writeInt(i32, @intCast(i), .little); - try w.writeInt(u32, @intCast(bundle.full_pattern.len), .little); - try w.writeAll(bundle.full_pattern); // If no edges were changed, then it is impossible to // change the list of CSS files. if (dev.incremental_result.had_adjusted_edges) { gts.clear(); - try dev.traceAllRouteImports(bundle, >s, .{ .find_css = true }); - const names = dev.client_graph.current_css_files.items; - - try w.writeInt(i32, @intCast(names.len), .little); - for (names) |name| { - const css_prefix_slash = css_prefix ++ "/"; - // These slices are url pathnames. The ID can be extracted - bun.assert(name.len == (css_prefix_slash ++ ".css").len + 16); - bun.assert(bun.strings.hasPrefix(name, css_prefix_slash)); - try w.writeAll(name[css_prefix_slash.len..][0..16]); + dev.client_graph.current_css_files.clearRetainingCapacity(); + try dev.traceAllRouteImports(route_bundle, >s, .find_css); + const css_ids = dev.client_graph.current_css_files.items; + + try w.writeInt(i32, @intCast(css_ids.len), .little); + for (css_ids) |css_id| { + try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&css_id), .lower)); } } else { try w.writeInt(i32, -1, .little); @@ -1433,19 +2231,42 @@ pub fn finalizeBundle( const css_chunks = result.cssChunks(); if (will_hear_hot_update) { if (dev.client_graph.current_chunk_len > 0 or css_chunks.len > 0) { - const css_values = dev.css_files.values(); + const asset_values = dev.assets.files.values(); try w.writeInt(u32, @intCast(css_chunks.len), .little); const sources = bv2.graph.input_files.items(.source); for (css_chunks) |chunk| { const key = sources[chunk.entry_point.source_index].path.keyForIncrementalGraph(); try w.writeAll(&std.fmt.bytesToHex(std.mem.asBytes(&bun.hash(key)), .lower)); - const css_data = css_values[chunk.entry_point.entry_point_id]; + const css_data = asset_values[chunk.entry_point.entry_point_id].blob.InternalBlob.bytes.items; try w.writeInt(u32, @intCast(css_data.len), .little); try w.writeAll(css_data); } - if (dev.client_graph.current_chunk_len > 0) - try dev.client_graph.takeBundleToList(.hmr_chunk, &hot_update_payload, ""); + if (dev.client_graph.current_chunk_len > 0) { + const hash = hash: { + var source_map_hash: bun.bundle_v2.ContentHasher.Hash = .init(0x4b12); // arbitrarily different seed than what .initial_response uses + const keys = dev.client_graph.bundled_files.keys(); + for (dev.client_graph.current_chunk_parts.items) |part| { + source_map_hash.update(keys[part.get()]); + source_map_hash.update(dev.client_graph.source_maps.items[part.get()].vlq_chunk.slice()); + } + break :hash source_map_hash.final(); + }; + // Insert the source map + if (try dev.assets.putOrIncrementRefCount(hash, hot_update_subscribers)) |static_route_ptr| { + const source_map = try dev.client_graph.takeSourceMap(.hmr_chunk, bv2.graph.allocator, dev.allocator); + errdefer dev.allocator.free(source_map); + static_route_ptr.* = StaticRoute.initFromAnyBlob(.fromOwnedSlice(dev.allocator, source_map), .{ + .server = dev.server.?, + .mime_type = .json, + }); + } + // Build and send the source chunk + try dev.client_graph.takeJSBundleToList(&hot_update_payload, .{ + .kind = .hmr_chunk, + .source_map_id = hash, + }); + } } else { try w.writeInt(i32, 0, .little); } @@ -1456,17 +2277,21 @@ pub fn finalizeBundle( if (dev.incremental_result.failures_added.items.len > 0) { dev.bundles_since_last_error = 0; - for (dev.current_bundle_requests.items) |*req| { + while (current_bundle.requests.popFirst()) |node| { + defer dev.deferred_request_pool.put(node); + const req = &node.data; + const rb = dev.routeBundlePtr(req.route_bundle_index); rb.server_state = .possible_bundling_failures; - const resp: *Response = switch (req.data) { + const resp: AnyResponse = switch (req.handler) { + .aborted => continue, .server_handler => |*saved| brk: { - const resp = saved.response.TCP; + const resp = saved.response; saved.deinit(); break :brk resp; }, - .js_payload => |resp| resp, + .bundled_html_page => |ram| ram.response, }; resp.corked(sendSerializedFailures, .{ @@ -1479,7 +2304,6 @@ pub fn finalizeBundle( return; } - // TODO: improve this visual feedback if (dev.bundling_failures.count() == 0) { if (current_bundle.had_reload_event) { const clear_terminal = !debug.isVisible(); @@ -1489,35 +2313,53 @@ pub fn finalizeBundle( Output.enableBuffering(); } + if (Environment.isDebug and memoryLog.isVisible()) { + Output.prettyErrorln("DevServer: {}, RSS: {}", .{ + bun.fmt.size(dev.memoryCost(), .{}), + bun.fmt.size(bun.sys.selfProcessMemoryUsage() orelse 0, .{}), + }); + } + dev.bundles_since_last_error += 1; if (dev.bundles_since_last_error > 1) { Output.prettyError("[x{d}] ", .{dev.bundles_since_last_error}); } } else { dev.bundles_since_last_error = 0; + if (Environment.isDebug and memoryLog.isVisible()) { + Output.prettyErrorln("DevServer: {}, RSS: {}", .{ + bun.fmt.size(dev.memoryCost(), .{}), + bun.fmt.size(bun.sys.selfProcessMemoryUsage() orelse 0, .{}), + }); + } } Output.prettyError("{s} in {d}ms", .{ - if (current_bundle.had_reload_event) "Reloaded" else "Bundled route", + if (current_bundle.had_reload_event) + "Reloaded" + else + "Bundled page", @divFloor(current_bundle.timer.read(), std.time.ns_per_ms), }); // Compute a file name to display - const file_name: ?[]const u8, const total_count: usize = if (current_bundle.had_reload_event) - .{ null, 0 } - else first_route_file_name: { - const opaque_id = dev.router.routePtr( - dev.routeBundlePtr(dev.current_bundle_requests.items[0].route_bundle_index) - .route, - ).file_page.unwrap() orelse - break :first_route_file_name .{ null, 0 }; - const server_index = fromOpaqueFileId(.server, opaque_id); - - break :first_route_file_name .{ - dev.relativePath(dev.server_graph.bundled_files.keys()[server_index.get()]), - 0, - }; + const file_name: ?[]const u8 = if (current_bundle.had_reload_event) + dev.relativePath( + bv2.graph.input_files.items(.source)[bv2.graph.entry_points.items[0].get()].path.text, + ) + else switch (dev.routeBundlePtr(current_bundle.requests.first.?.data.route_bundle_index).data) { + .html => |html| dev.relativePath(html.html_bundle.html_bundle.path), + .framework => |fw| file_name: { + const route = dev.router.routePtr(fw.route_index); + const opaque_id = route.file_page.unwrap() orelse + route.file_layout.unwrap() orelse + break :file_name null; + const server_index = fromOpaqueFileId(.server, opaque_id); + const abs_path = dev.server_graph.bundled_files.keys()[server_index.get()]; + break :file_name dev.relativePath(abs_path); + }, }; + const total_count = bv2.graph.entry_points.items.len; if (file_name) |name| { Output.prettyError(": {s}", .{name}); if (total_count > 1) { @@ -1532,26 +2374,29 @@ pub fn finalizeBundle( dev.graph_safety_lock.unlock(); defer dev.graph_safety_lock.lock(); - for (dev.current_bundle_requests.items) |req| { + while (current_bundle.requests.popFirst()) |node| { + defer dev.deferred_request_pool.put(node); + const req = &node.data; + const rb = dev.routeBundlePtr(req.route_bundle_index); rb.server_state = .loaded; - switch (req.data) { - .server_handler => |saved| dev.onRequestWithBundle(req.route_bundle_index, .{ .saved = saved }, saved.response.TCP), - .js_payload => |resp| dev.onJsRequestWithBundle(req.route_bundle_index, resp), + switch (req.handler) { + .aborted => continue, + .server_handler => |saved| dev.onFrameworkRequestWithBundle(req.route_bundle_index, .{ .saved = saved }, saved.response), + .bundled_html_page => |ram| dev.onHtmlRequestWithBundle(req.route_bundle_index, ram.response, ram.method), } } } fn startNextBundleIfPresent(dev: *DevServer) void { // Clear the current bundle - dev.current_bundle = null; + assert(dev.current_bundle == null); dev.log.clearAndFree(); - dev.current_bundle_requests.clearRetainingCapacity(); - dev.emitVisualizerMessageIfNeeded() catch {}; + dev.emitVisualizerMessageIfNeeded(); // If there were pending requests, begin another bundle. - if (dev.next_bundle.reload_event != null or dev.next_bundle.requests.items.len > 0) { + if (dev.next_bundle.reload_event != null or dev.next_bundle.requests.first != null) { var sfb = std.heap.stackFallback(4096, bun.default_allocator); const temp_alloc = sfb.get(); var entry_points: EntryPointList = EntryPointList.empty; @@ -1569,7 +2414,7 @@ fn startNextBundleIfPresent(dev: *DevServer) void { for (dev.next_bundle.route_queue.keys()) |route_bundle_index| { const rb = dev.routeBundlePtr(route_bundle_index); rb.server_state = .bundling; - dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, rb.route) catch bun.outOfMemory(); + dev.appendRouteEntryPointsIfNotStale(&entry_points, temp_alloc, route_bundle_index) catch bun.outOfMemory(); } dev.startAsyncBundle( @@ -1583,27 +2428,24 @@ fn startNextBundleIfPresent(dev: *DevServer) void { } } -fn insertOrUpdateCssAsset(dev: *DevServer, abs_path: []const u8, code: []const u8) !Chunk.EntryPoint.ID { - const path_hash = bun.hash(abs_path); - const gop = try dev.css_files.getOrPut(dev.allocator, path_hash); - if (gop.found_existing) { - dev.allocator.free(gop.value_ptr.*); - } - gop.value_ptr.* = code; - return @intCast(gop.index); -} - /// Note: The log is not consumed here pub fn handleParseTaskFailure( dev: *DevServer, err: anyerror, graph: bake.Graph, - key: []const u8, + abs_path: []const u8, log: *const Log, ) bun.OOM!void { dev.graph_safety_lock.lock(); defer dev.graph_safety_lock.unlock(); + debug.log("handleParseTaskFailure({}, .{s}, {}, {d} messages)", .{ + err, + @tagName(graph), + bun.fmt.quote(abs_path), + log.msgs.items.len, + }); + if (err == error.FileNotFound) { // Special-case files being deleted. Note that if a // file never existed, resolution would fail first. @@ -1611,28 +2453,51 @@ pub fn handleParseTaskFailure( // TODO: this should walk up the graph one level, and queue all of these // files for re-bundling if they aren't already in the BundleV2 graph. switch (graph) { - .server, .ssr => try dev.server_graph.onFileDeleted(key, log), - .client => try dev.client_graph.onFileDeleted(key, log), + .server, .ssr => try dev.server_graph.onFileDeleted(abs_path, log), + .client => try dev.client_graph.onFileDeleted(abs_path, log), } } else { Output.prettyErrorln("Error{s} while bundling \"{s}\":", .{ if (log.errors +| log.warnings != 1) "s" else "", - dev.relativePath(key), + dev.relativePath(abs_path), }); log.print(Output.errorWriterBuffered()) catch {}; Output.flush(); // Do not index css errors - if (!bun.strings.hasSuffixComptime(key, ".css")) { + if (!bun.strings.hasSuffixComptime(abs_path, ".css")) { switch (graph) { - .server => try dev.server_graph.insertFailure(key, log, false), - .ssr => try dev.server_graph.insertFailure(key, log, true), - .client => try dev.client_graph.insertFailure(key, log, false), + .server => try dev.server_graph.insertFailure(.abs_path, abs_path, log, false), + .ssr => try dev.server_graph.insertFailure(.abs_path, abs_path, log, true), + .client => try dev.client_graph.insertFailure(.abs_path, abs_path, log, false), } } } } +/// Return a log to write resolution failures into. +pub fn getLogForResolutionFailures(dev: *DevServer, abs_path: []const u8, graph: bake.Graph) !*bun.logger.Log { + assert(dev.current_bundle != null); + const current_bundle = &dev.current_bundle.?; + + dev.graph_safety_lock.lock(); + defer dev.graph_safety_lock.unlock(); + + const owner = switch (graph == .client) { + inline else => |is_client| @unionInit( + SerializedFailure.Owner, + if (is_client) "client" else "server", + try (if (is_client) dev.client_graph else dev.server_graph) + .insertStale(abs_path, !is_client and graph == .ssr), + ).encode(), + }; + const gop = try current_bundle.resolution_failure_entries.getOrPut(current_bundle.bv2.graph.allocator, owner); + if (!gop.found_existing) { + gop.value_ptr.* = bun.logger.Log.init(current_bundle.bv2.graph.allocator); + } + return gop.value_ptr; +} + const CacheEntry = struct { kind: FileKind, }; @@ -1685,72 +2550,86 @@ pub fn routeBundlePtr(dev: *DevServer, idx: RouteBundle.Index) *RouteBundle { return &dev.route_bundles.items[idx.get()]; } -fn onRequest(dev: *DevServer, req: *Request, resp: *Response) void { +fn onRequest(dev: *DevServer, req: *Request, resp: anytype) void { var params: FrameworkRouter.MatchedParams = undefined; if (dev.router.matchSlow(req.url(), ¶ms)) |route_index| { - dev.ensureRouteIsBundled(route_index, .server_handler, req, resp) catch bun.outOfMemory(); + dev.ensureRouteIsBundled( + dev.getOrPutRouteBundle(.{ .framework = route_index }) catch bun.outOfMemory(), + .server_handler, + req, + AnyResponse.init(resp), + ) catch bun.outOfMemory(); return; } switch (dev.server.?) { - inline .DebugHTTPServer, .HTTPServer => |s| if (s.config.onRequest != .zero) { - s.onRequest(req, resp); - return; + inline else => |s| { + if (@typeInfo(@TypeOf(s.app.?)).pointer.child.Response != @typeInfo(@TypeOf(resp)).pointer.child) { + unreachable; // mismatch between `is_ssl` with server and response types. optimize these checks out. + } + if (s.config.onRequest != .zero) { + s.onRequest(req, resp); + return; + } }, - else => @panic("TODO: HTTPS"), } sendBuiltInNotFound(resp); } -fn getOrPutRouteBundle(dev: *DevServer, route: Route.Index) !RouteBundle.Index { - if (dev.router.routePtr(route).bundle.unwrap()) |bundle_index| - return bundle_index; +pub fn respondForHTMLBundle(dev: *DevServer, html: *HTMLBundle.HTMLBundleRoute, req: *uws.Request, resp: AnyResponse) !void { + try dev.ensureRouteIsBundled(try dev.getOrPutRouteBundle(.{ .html = html }), .bundled_html_page, req, resp); +} - const full_pattern = full_pattern: { - var buf = bake.PatternBuffer.empty; - var current: *Route = dev.router.routePtr(route); - // This loop is done to avoid prepending `/` at the root - // if there is more than one component. - buf.prependPart(current.part); - if (current.parent.unwrap()) |first| { - current = dev.router.routePtr(first); - while (current.parent.unwrap()) |next| { - buf.prependPart(current.part); - current = dev.router.routePtr(next); - } - } - break :full_pattern try dev.allocator.dupe(u8, buf.slice()); +fn getOrPutRouteBundle(dev: *DevServer, route: RouteBundle.UnresolvedIndex) !RouteBundle.Index { + const index_location: *RouteBundle.Index.Optional = switch (route) { + .framework => |route_index| &dev.router.routePtr(route_index).bundle, + .html => |html| &html.dev_server_id, }; - errdefer dev.allocator.free(full_pattern); + if (index_location.unwrap()) |bundle_index| { + return bundle_index; + } + + dev.graph_safety_lock.lock(); + defer dev.graph_safety_lock.unlock(); - try dev.route_bundles.append(dev.allocator, .{ - .route = route, + const bundle_index = RouteBundle.Index.init(@intCast(dev.route_bundles.items.len)); + + try dev.route_bundles.ensureUnusedCapacity(dev.allocator, 1); + dev.route_bundles.appendAssumeCapacity(.{ + .data = switch (route) { + .framework => |route_index| .{ .framework = .{ + .route_index = route_index, + .evaluate_failure = null, + .cached_module_list = .{}, + .cached_client_bundle_url = .{}, + .cached_css_file_array = .{}, + } }, + .html => |html| brk: { + const incremental_graph_index = try dev.client_graph.insertStaleExtra(html.html_bundle.path, false, true); + dev.client_graph.source_maps.items[incremental_graph_index.get()].extra.empty.html_bundle_route_index = .init(bundle_index.get()); + break :brk .{ .html = .{ + .html_bundle = html, + .bundled_file = incremental_graph_index, + .head_end_tag_index = .none, + .body_end_tag_index = .none, + .cached_response = null, + .bundled_html_text = null, + } }; + }, + }, + .client_script_generation = 0, .server_state = .unqueued, - .full_pattern = full_pattern, .client_bundle = null, - .evaluate_failure = null, - .cached_module_list = .{}, - .cached_client_bundle_url = .{}, - .cached_css_file_array = .{}, .active_viewers = 0, }); - const bundle_index = RouteBundle.Index.init(@intCast(dev.route_bundles.items.len - 1)); - dev.router.routePtr(route).bundle = bundle_index.toOptional(); + index_location.* = bundle_index.toOptional(); return bundle_index; } -fn sendTextFile(code: []const u8, content_type: []const u8, resp: *Response) void { - if (code.len == 0) { - resp.writeStatus("202 No Content"); - resp.writeHeaderInt("Content-Length", 0); - resp.end("", true); - return; - } - - resp.writeStatus("200 OK"); - resp.writeHeader("Content-Type", content_type); - resp.end(code, true); // TODO: You should never call res.end(huge buffer) +fn registerCatchAllHtmlRoute(dev: *DevServer, html: *HTMLBundle.HTMLBundleRoute) !void { + const bundle_index = try getOrPutRouteBundle(dev, .{ .html = html }); + dev.html_router.fallback = bundle_index.toOptional(); } const ErrorPageKind = enum { @@ -1764,10 +2643,22 @@ const ErrorPageKind = enum { fn sendSerializedFailures( dev: *DevServer, - resp: *Response, + resp: AnyResponse, failures: []const SerializedFailure, kind: ErrorPageKind, ) void { + switch (resp) { + inline else => |r| sendSerializedFailuresInner(dev, r, failures, kind), + } +} + +fn sendSerializedFailuresInner( + dev: *DevServer, + resp: anytype, + failures: []const SerializedFailure, + kind: ErrorPageKind, +) void { + // TODO: write to Blob and serve that resp.writeStatus("500 Internal Server Error"); resp.writeHeader("Content-Type", MimeType.html.value); @@ -1783,11 +2674,11 @@ fn sendSerializedFailures( \\ \\ \\ - \\ + \\ \\", .{js_chunk.unique_key}) catch bun.outOfMemory(); + defer allocator.free(script); + element.append(script, true) catch bun.outOfMemory(); + } + } else { + element.onEndTag(endHeadTagHandler, this) catch return true; } - if (this.chunk.getJSChunkForHTML(this.chunks)) |js_chunk| { - const script = std.fmt.allocPrintZ(allocator, "", .{js_chunk.unique_key}) catch bun.outOfMemory(); - defer allocator.free(script); - element.append(script, true) catch bun.outOfMemory(); + return false; + } + + pub fn onBodyTag(this: *@This(), element: *lol.Element) bool { + if (this.linker.dev_server != null) { + element.onEndTag(endBodyTagHandler, this) catch return true; } + return false; } - const processor = HTMLScanner.HTMLProcessor(@This(), true); + fn endHeadTagHandler(_: *lol.EndTag, opaque_this: ?*anyopaque) callconv(.C) lol.Directive { + const this: *@This() = @alignCast(@ptrCast(opaque_this.?)); + this.head_end_tag_index = @intCast(this.output.items.len); + return .@"continue"; + } - pub fn run(this: *@This(), input: []const u8) !void { - processor.run(this, input) catch bun.outOfMemory(); + fn endBodyTagHandler(_: *lol.EndTag, opaque_this: ?*anyopaque) callconv(.C) lol.Directive { + const this: *@This() = @alignCast(@ptrCast(opaque_this.?)); + this.body_end_tag_index = @intCast(this.output.items.len); + return .@"continue"; } }; - var html_loader = HTMLLoader{ + // HTML bundles for Bake must be globally allocated, as it must outlive + // the bundle task. See `DevServer.RouteBundle.HTML.bundled_html_text` + const output_allocator = if (c.dev_server != null) bun.default_allocator else worker.allocator; + + var html_loader: HTMLLoader = .{ .linker = c, .source_index = chunk.entry_point.source_index, .import_records = import_records[chunk.entry_point.source_index].slice(), @@ -10208,24 +10328,29 @@ pub const LinkerContext = struct { .minify_whitespace = c.options.minify_whitespace, .chunk = chunk, .chunks = chunks, - .output = std.ArrayList(u8).init(worker.allocator), + .output = std.ArrayList(u8).init(output_allocator), .current_import_record_index = 0, }; - html_loader.run(sources[chunk.entry_point.source_index].contents) catch bun.outOfMemory(); + HTMLScanner.HTMLProcessor(HTMLLoader, true).run( + &html_loader, + sources[chunk.entry_point.source_index].contents, + ) catch bun.outOfMemory(); return .{ .html = .{ .code = html_loader.output.items, .source_index = chunk.entry_point.source_index, + .offsets = .{ + .head_end_tag = html_loader.head_end_tag_index, + .body_end_tag = html_loader.body_end_tag_index, + }, }, }; } fn postProcessHTMLChunk(ctx: GenerateChunkCtx, worker: *ThreadPool.Worker, chunk: *Chunk) !void { - // This is where we split output into pieces - const c = ctx.c; var j = StringJoiner{ .allocator = worker.allocator, @@ -10315,7 +10440,7 @@ pub const LinkerContext = struct { // Save the offset to the start of the stored JavaScript j.push(compile_result.code(), bun.default_allocator); - if (compile_result.source_map_chunk()) |source_map_chunk| { + if (compile_result.sourceMapChunk()) |source_map_chunk| { if (c.options.source_maps != .none) { try compile_results_for_source_map.append(worker.allocator, CompileResultForSourceMap{ .source_map_chunk = source_map_chunk, @@ -10425,7 +10550,7 @@ pub const LinkerContext = struct { worker.allocator, c.resolver.opts.target, ast.toAST(), - c.source_(chunk.entry_point.source_index), + c.getSource(chunk.entry_point.source_index), print_options, cross_chunk_import_records.slice(), &[_]Part{ @@ -10438,7 +10563,7 @@ pub const LinkerContext = struct { worker.allocator, c.resolver.opts.target, ast.toAST(), - c.source_(chunk.entry_point.source_index), + c.getSource(chunk.entry_point.source_index), print_options, &.{}, &[_]Part{ @@ -10551,8 +10676,8 @@ pub const LinkerContext = struct { switch (c.options.output_format) { .internal_bake_dev => { const start = bun.bake.getHmrRuntime(if (c.options.target.isServerSide()) .server else .client); - j.pushStatic(start); - line_offset.advance(start); + j.pushStatic(start.code); + line_offset.advance(start.code); }, .iife => { // Bun does not do arrow function lowering. So the wrapper can be an arrow. @@ -10662,7 +10787,7 @@ pub const LinkerContext = struct { } else { j.push(compile_result.code(), bun.default_allocator); - if (compile_result.source_map_chunk()) |source_map_chunk| { + if (compile_result.sourceMapChunk()) |source_map_chunk| { if (c.options.source_maps != .none) { try compile_results_for_source_map.append(worker.allocator, CompileResultForSourceMap{ .source_map_chunk = source_map_chunk, @@ -11552,7 +11677,7 @@ pub const LinkerContext = struct { allocator, c.resolver.opts.target, ast.toAST(), - c.source_(source_index), + c.getSource(source_index), print_options, ast.import_records.slice(), &[_]Part{ @@ -12317,33 +12442,22 @@ pub const LinkerContext = struct { }); const module_id = Expr.initIdentifier(ast.module_ref, Logger.Loc.Empty); - // add a marker for the client runtime to tell that this is an ES module - if (ast.exports_kind == .esm) { - try stmts.inside_wrapper_prefix.append(Stmt.alloc(S.SExpr, .{ - .value = Expr.assign( - Expr.init(E.Dot, .{ - .target = Expr.initIdentifier(ast.module_ref, Loc.Empty), - .name = "__esModule", - .name_loc = Loc.Empty, - }, Loc.Empty), - Expr.init(E.Boolean, .{ .value = true }, Loc.Empty), - ), - }, Loc.Empty)); - } - for (part_stmts) |stmt| { switch (stmt.data) { else => { try stmts.inside_wrapper_suffix.append(stmt); }, .s_import => |st| { - // hmr-runtime.ts defines `module.importSync` to be - // a synchronous import. this is different from - // require in that esm <-> cjs is handled - // automatically, instead of with transpiler-added - // annotations like '__commonJS'. + // hmr-runtime.ts defines `module.dynamicImport` to be the + // ESM `import`. this is different from `require` in that + // esm <-> cjs is handled by the runtime instead of via + // transpiler-added annotations like '__commonJS'. These + // annotations couldn't be added since the bundled file + // must not have any reference to it's imports. That way + // changing a module's type does not re-bundle its + // incremental dependencies. // - // this cannot be done in the parse step because the final + // This cannot be done in the parse step because the final // pretty path is not yet known. the other statement types // are not handled here because some of those generate // new local variables (it is too late to do that here). @@ -12375,10 +12489,13 @@ pub const LinkerContext = struct { str.* = Expr.init(E.String, .{ .data = item.alias }, item.name.loc); } - break :call Expr.init(E.Call, .{ + const expr = Expr.init(E.Call, .{ .target = Expr.init(E.Dot, .{ .target = module_id, - .name = if (is_builtin) "importBuiltin" else "importSync", + .name = if (is_builtin) + "importBuiltin" + else + "importStmt", .name_loc = stmt.loc, }, stmt.loc), .args = js_ast.ExprNodeList.init( @@ -12407,6 +12524,10 @@ pub const LinkerContext = struct { }), ), }, stmt.loc); + break :call if (is_builtin) + expr + else + Expr.init(E.Await, .{ .value = expr }, stmt.loc); } else Expr.init(E.Object, .{}, stmt.loc); if (is_bare_import) { @@ -12459,7 +12580,7 @@ pub const LinkerContext = struct { Index.invalid; // referencing everything by array makes the code a lot more annoying :( - const ast: JSAst = c.graph.ast.get(part_range.source_index.get()); + var ast: JSAst = c.graph.ast.get(part_range.source_index.get()); // For Bun Kit, part generation is entirely special cased. // - export wrapping is already done. @@ -12468,6 +12589,21 @@ pub const LinkerContext = struct { if (c.options.output_format == .internal_bake_dev) { bun.assert(!part_range.source_index.isRuntime()); // embedded in HMR runtime + // add a marker for the client runtime to tell that this is an ES module + if (ast.exports_kind == .esm) { + stmts.inside_wrapper_prefix.append(Stmt.alloc(S.SExpr, .{ + .value = Expr.assign( + Expr.init(E.Dot, .{ + .target = Expr.initIdentifier(ast.module_ref, Loc.Empty), + .name = "__esModule", + .name_loc = Loc.Empty, + }, Loc.Empty), + Expr.init(E.Boolean, .{ .value = true }, Loc.Empty), + ), + }, Loc.Empty)) catch bun.outOfMemory(); + ast.top_level_await_keyword = .{ .loc = .{ .start = 0 }, .len = 1 }; + } + for (parts) |part| { c.convertStmtsForChunkForBake(part_range.source_index.get(), stmts, part.stmts, allocator, &ast) catch |err| return .{ .err = err }; @@ -13048,7 +13184,7 @@ pub const LinkerContext = struct { source_index: Index, ) js_printer.PrintResult { const parts_to_print = &[_]Part{ - Part{ .stmts = out_stmts }, + .{ .stmts = out_stmts }, }; const print_options = js_printer.Options{ @@ -13073,13 +13209,14 @@ pub const LinkerContext = struct { .has_run_symbol_renamer = true, .allocator = allocator, + .source_map_allocator = writer.buffer.allocator, .to_esm_ref = to_esm_ref, .to_commonjs_ref = to_commonjs_ref, .require_ref = switch (c.options.output_format) { - .cjs => null, + .cjs => null, // use unbounded global else => runtime_require_ref, }, - .require_or_import_meta_for_source_callback = js_printer.RequireOrImportMeta.Callback.init( + .require_or_import_meta_for_source_callback = .init( LinkerContext, requireOrImportMetaForSource, c, @@ -13104,7 +13241,7 @@ pub const LinkerContext = struct { &printer, ast.target, ast.toAST(), - c.source_(source_index.get()), + c.getSource(source_index.get()), print_options, ast.import_records.slice(), parts_to_print, @@ -13181,13 +13318,7 @@ pub const LinkerContext = struct { // Per CSS chunk: // Remove duplicate rules across files. This must be done in serial, not // in parallel, and must be done from the last rule to the first rule. - if (brk: { - // TODO: Have count of chunks with css on linker context? - for (chunks) |*chunk| { - if (chunk.content == .css) break :brk true; - } - break :brk false; - }) { + if (c.parse_graph.css_file_count > 0) { var wait_group = try c.allocator.create(sync.WaitGroup); wait_group.init(); defer { @@ -13218,13 +13349,17 @@ pub const LinkerContext = struct { .linker = c, .wg = wait_group, }; - batch.push(ThreadPoolLib.Batch.from(&tasks[i].task)); + batch.push(.from(&tasks[i].task)); i += 1; } } wait_group.counter = @as(u32, @truncate(total_count)); c.parse_graph.pool.pool.schedule(batch); wait_group.wait(); + } else if (Environment.isDebug) { + for (chunks) |*chunk| { + bun.assert(chunk.content != .css); + } } } @@ -13289,13 +13424,13 @@ pub const LinkerContext = struct { remaining_part_ranges[0] = .{ .part_range = part_range, - .i = @truncate(i), + .i = @intCast(i), .task = .{ .callback = &generateCompileResultForJSChunk, }, .ctx = chunk_ctx, }; - batch.push(ThreadPoolLib.Batch.from(&remaining_part_ranges[0].task)); + batch.push(.from(&remaining_part_ranges[0].task)); remaining_part_ranges = remaining_part_ranges[1..]; } @@ -13304,13 +13439,13 @@ pub const LinkerContext = struct { for (0..chunk.content.css.imports_in_chunk_in_order.len) |i| { remaining_part_ranges[0] = .{ .part_range = .{}, - .i = @as(u32, @truncate(i)), - .task = ThreadPoolLib.Task{ + .i = @intCast(i), + .task = .{ .callback = &generateCompileResultForCssChunk, }, .ctx = chunk_ctx, }; - batch.push(ThreadPoolLib.Batch.from(&remaining_part_ranges[0].task)); + batch.push(.from(&remaining_part_ranges[0].task)); remaining_part_ranges = remaining_part_ranges[1..]; } @@ -13319,13 +13454,13 @@ pub const LinkerContext = struct { remaining_part_ranges[0] = .{ .part_range = .{}, .i = 0, - .task = ThreadPoolLib.Task{ + .task = .{ .callback = &generateCompileResultForHtmlChunk, }, .ctx = chunk_ctx, }; - batch.push(ThreadPoolLib.Batch.from(&remaining_part_ranges[0].task)); + batch.push(.from(&remaining_part_ranges[0].task)); remaining_part_ranges = remaining_part_ranges[1..]; }, } @@ -13343,7 +13478,7 @@ pub const LinkerContext = struct { c.source_maps.quoted_contents_tasks.len = 0; } - // For dev server, only post-process CSS chunks. + // For dev server, only post-process CSS + HTML chunks. const chunks_to_do = if (is_dev_server) chunks[1..] else chunks; if (!is_dev_server or chunks_to_do.len > 0) { bun.assert(chunks_to_do.len > 0); @@ -13368,6 +13503,7 @@ pub const LinkerContext = struct { // // - Reuse unchanged parts to assemble the full bundle if Cmd+R is used in the browser // - Send only the newly changed code through a socket. + // - Use IncrementalGraph to have full knowledge of referenced CSS files. // // When this isn't the initial bundle, concatenation as usual would produce a // broken module. It is DevServer's job to create and send HMR patches. @@ -14540,7 +14676,7 @@ pub const LinkerContext = struct { // Warn about importing from a file that is known to not have any exports if (status == .cjs_without_exports) { - const source = c.source_(tracker.source_index.get()); + const source = c.getSource(tracker.source_index.get()); c.log.addRangeWarningFmt( source, source.rangeOfIdentifier(named_import.alias_loc.?), @@ -14591,9 +14727,9 @@ pub const LinkerContext = struct { // Report mismatched imports and exports const symbol = c.graph.symbols.get(tracker.import_ref).?; const named_import: js_ast.NamedImport = named_imports[prev_source_index].get(tracker.import_ref).?; - const source = c.source_(prev_source_index); + const source = c.getSource(prev_source_index); - const next_source = c.source_(next_tracker.source_index.get()); + const next_source = c.getSource(next_tracker.source_index.get()); const r = source.rangeOfIdentifier(named_import.alias_loc.?); // Report mismatched imports and exports @@ -15586,7 +15722,7 @@ pub const Chunk = struct { } pub const CodeResult = struct { - buffer: string, + buffer: []u8, shifts: []sourcemap.SourceMapShifts, }; @@ -15935,7 +16071,6 @@ pub const Chunk = struct { pub const CssImportOrder = struct { conditions: BabyList(bun.css.ImportConditions) = .{}, - // TODO: unfuck this condition_import_records: BabyList(ImportRecord) = .{}, kind: union(enum) { @@ -16023,27 +16158,16 @@ pub const Chunk = struct { pub const ImportsFromOtherChunks = std.AutoArrayHashMapUnmanaged(Index.Int, CrossChunkImport.Item.List); - pub const ContentKind = enum { - javascript, - css, - html, - }; - - pub const HtmlChunk = struct {}; - - pub const Content = union(ContentKind) { + pub const Content = union(enum) { javascript: JavaScriptChunk, css: CssChunk, - html: HtmlChunk, + html, pub fn sourcemap(this: *const Content, default: options.SourceMapOption) options.SourceMapOption { return switch (this.*) { .javascript => default, - // TODO: - .css => options.SourceMapOption.none, - - // probably never - .html => options.SourceMapOption.none, + .css => .none, // TODO: css source maps + .html => .none, }; } @@ -16141,6 +16265,13 @@ pub const CompileResult = union(enum) { html: struct { source_index: Index.Int, code: []const u8, + /// Offsets are used for DevServer to inject resources without re-bundling + offsets: struct { + /// The index of the "<" byte of "" + head_end_tag: u32, + /// The index of the "<" byte of "" + body_end_tag: u32, + }, }, pub const empty = CompileResult{ @@ -16168,7 +16299,7 @@ pub const CompileResult = union(enum) { }; } - pub fn source_map_chunk(this: *const CompileResult) ?sourcemap.Chunk { + pub fn sourceMapChunk(this: *const CompileResult) ?sourcemap.Chunk { return switch (this.*) { .javascript => |r| switch (r.result) { .result => |r2| r2.source_map, @@ -16192,9 +16323,11 @@ const CompileResultForSourceMap = struct { source_index: u32, }; -const ContentHasher = struct { +pub const ContentHasher = struct { + pub const Hash = std.hash.XxHash64; + // xxhash64 outperforms Wyhash if the file is > 1KB or so - hasher: std.hash.XxHash64 = std.hash.XxHash64.init(0), + hasher: Hash = .init(0), const log = bun.Output.scoped(.ContentHasher, true); @@ -16262,11 +16395,9 @@ fn getRedirectId(id: u32) ?u32 { if (id == std.math.maxInt(u32)) { return null; } - return id; } -// TODO: this needs to also update `define` and `external`. This whole setup needs to be more resilient. fn targetFromHashbang(buffer: []const u8) ?options.Target { if (buffer.len > "#!/usr/bin/env bun".len) { if (strings.hasPrefixComptime(buffer, "#!/usr/bin/env bun")) { @@ -16276,7 +16407,6 @@ fn targetFromHashbang(buffer: []const u8) ?options.Target { } } } - return null; } @@ -16623,22 +16753,27 @@ pub const CssEntryPointMeta = struct { imported_on_server: bool, }; -/// The lifetime of this structure is tied to the transpiler's arena -pub const BakeBundleStart = struct { +/// The lifetime of this structure is tied to the bundler's arena +pub const DevServerInput = struct { css_entry_points: std.AutoArrayHashMapUnmanaged(Index, CssEntryPointMeta), }; -/// The lifetime of this structure is tied to the transpiler's arena -pub const BakeBundleOutput = struct { +/// The lifetime of this structure is tied to the bundler's arena +pub const DevServerOutput = struct { chunks: []Chunk, css_file_list: std.AutoArrayHashMapUnmanaged(Index, CssEntryPointMeta), + html_files: std.AutoArrayHashMapUnmanaged(Index, void), - pub fn jsPseudoChunk(out: BakeBundleOutput) *Chunk { + pub fn jsPseudoChunk(out: DevServerOutput) *Chunk { return &out.chunks[0]; } - pub fn cssChunks(out: BakeBundleOutput) []Chunk { - return out.chunks[1..]; + pub fn cssChunks(out: DevServerOutput) []Chunk { + return out.chunks[1..][0..out.css_file_list.count()]; + } + + pub fn htmlChunks(out: DevServerOutput) []Chunk { + return out.chunks[1 + out.css_file_list.count() ..][0..out.html_files.count()]; } }; diff --git a/src/cache.zig b/src/cache.zig index 96ecf3484c45de..021f6e5cd34145 100644 --- a/src/cache.zig +++ b/src/cache.zig @@ -45,12 +45,16 @@ const debug = Output.scoped(.fs, false); pub const Fs = struct { pub const Entry = struct { contents: string, - fd: StoredFileDescriptorType = bun.invalid_fd, - external: External = .{}, + fd: StoredFileDescriptorType, + /// When `contents` comes from a native plugin, this field is populated + /// with information on how to free it. + external_free_function: ExternalFreeFunction = .none, - pub const External = struct { - ctx: ?*anyopaque = null, - function: ?*const fn (?*anyopaque) callconv(.C) void = null, + pub const ExternalFreeFunction = struct { + ctx: ?*anyopaque, + function: ?*const fn (?*anyopaque) callconv(.C) void, + + pub const none: ExternalFreeFunction = .{ .ctx = null, .function = null }; pub fn call(this: *const @This()) void { if (this.function) |func| { @@ -60,8 +64,8 @@ pub const Fs = struct { }; pub fn deinit(entry: *Entry, allocator: std.mem.Allocator) void { - if (entry.external.function) |func| { - func(entry.external.ctx); + if (entry.external_free_function.function) |func| { + func(entry.external_free_function.ctx); } else if (entry.contents.len > 0) { allocator.free(entry.contents); entry.contents = ""; diff --git a/src/cli.zig b/src/cli.zig index 844b88e1947d19..f8b42edb10fb40 100644 --- a/src/cli.zig +++ b/src/cli.zig @@ -243,6 +243,7 @@ pub const Arguments = struct { clap.parseParam("--throw-deprecation Determine whether or not deprecation warnings result in errors.") catch unreachable, clap.parseParam("--title Set the process title") catch unreachable, clap.parseParam("--zero-fill-buffers Boolean to force Buffer.allocUnsafe(size) to be zero-filled.") catch unreachable, + clap.parseParam("--no-hmr Disable Hot-module-replacement when using HTML imports with Bun.serve") catch unreachable, }; const auto_or_run_params = [_]ParamType{ @@ -818,6 +819,10 @@ pub const Arguments = struct { if (args.flag("--zero-fill-buffers")) { Bun__Node__ZeroFillBuffers = true; } + + if (args.flag("--no-hmr")) { + bun.bake.DevServer.enabled = false; + } } if (opts.port != null and opts.origin == null) { diff --git a/src/cli/build_command.zig b/src/cli/build_command.zig index 0b371a67232890..37e2feb1b27dde 100644 --- a/src/cli/build_command.zig +++ b/src/cli/build_command.zig @@ -235,18 +235,18 @@ pub const BuildCommand = struct { .unspecified => {}, } - var client_bundler: transpiler.Transpiler = undefined; + var client_transpiler: transpiler.Transpiler = undefined; if (this_transpiler.options.server_components) { - client_bundler = try transpiler.Transpiler.init(allocator, log, ctx.args, null); - client_bundler.options = this_transpiler.options; - client_bundler.options.target = .browser; - client_bundler.options.server_components = true; - client_bundler.options.conditions = try this_transpiler.options.conditions.clone(); + client_transpiler = try transpiler.Transpiler.init(allocator, log, ctx.args, null); + client_transpiler.options = this_transpiler.options; + client_transpiler.options.target = .browser; + client_transpiler.options.server_components = true; + client_transpiler.options.conditions = try this_transpiler.options.conditions.clone(); try this_transpiler.options.conditions.appendSlice(&.{"react-server"}); this_transpiler.options.react_fast_refresh = false; this_transpiler.options.minify_syntax = true; - client_bundler.options.minify_syntax = true; - client_bundler.options.define = try options.Define.init( + client_transpiler.options.minify_syntax = true; + client_transpiler.options.define = try options.Define.init( allocator, if (ctx.args.define) |user_defines| try options.Define.Data.fromInput(try options.stringHashMapFromArrays( @@ -262,10 +262,10 @@ pub const BuildCommand = struct { ); try bun.bake.addImportMetaDefines(allocator, this_transpiler.options.define, .development, .server); - try bun.bake.addImportMetaDefines(allocator, client_bundler.options.define, .development, .client); + try bun.bake.addImportMetaDefines(allocator, client_transpiler.options.define, .development, .client); this_transpiler.resolver.opts = this_transpiler.options; - client_bundler.resolver.opts = client_bundler.options; + client_transpiler.resolver.opts = client_transpiler.options; } // var env_loader = this_transpiler.env; diff --git a/src/cli/create_command.zig b/src/cli/create_command.zig index 07656cefcfa870..ea181e1e9eb06d 100644 --- a/src/cli/create_command.zig +++ b/src/cli/create_command.zig @@ -1931,7 +1931,7 @@ pub const Example = struct { ), ); - var header_entries: Headers.Entries = .{}; + var header_entries: Headers.Entry.List = .{}; var headers_buf: string = ""; if (env_loader.map.get("GITHUB_TOKEN") orelse env_loader.map.get("GITHUB_ACCESS_TOKEN")) |access_token| { @@ -1939,14 +1939,14 @@ pub const Example = struct { headers_buf = try std.fmt.allocPrint(ctx.allocator, "AuthorizationBearer {s}", .{access_token}); try header_entries.append( ctx.allocator, - Headers.Kv{ - .name = Api.StringPointer{ + .{ + .name = .{ .offset = 0, - .length = @as(u32, @intCast("Authorization".len)), + .length = @intCast("Authorization".len), }, - .value = Api.StringPointer{ - .offset = @as(u32, @intCast("Authorization".len)), - .length = @as(u32, @intCast(headers_buf.len - "Authorization".len)), + .value = .{ + .offset = @intCast("Authorization".len), + .length = @intCast(headers_buf.len - "Authorization".len), }, }, ); diff --git a/src/cli/install.ps1 b/src/cli/install.ps1 index 4d3e36a5792b1d..39c49ac348818c 100644 --- a/src/cli/install.ps1 +++ b/src/cli/install.ps1 @@ -23,7 +23,7 @@ if (-not ((Get-CimInstance Win32_ComputerSystem)).SystemType -match "x64-based") # This corresponds to .win10_rs5 in build.zig $MinBuild = 17763; -$MinBuildName = "Windows 10 1809" +$MinBuildName = "Windows 10 1809 / Windows Server 2019" $WinVer = [System.Environment]::OSVersion.Version if ($WinVer.Major -lt 10 -or ($WinVer.Major -eq 10 -and $WinVer.Build -lt $MinBuild)) { diff --git a/src/cli/upgrade_command.zig b/src/cli/upgrade_command.zig index dc9204a14d0cbb..8f38f9eb0f5d51 100644 --- a/src/cli/upgrade_command.zig +++ b/src/cli/upgrade_command.zig @@ -186,10 +186,10 @@ pub const UpgradeCommand = struct { } } - var header_entries: Headers.Entries = .{}; - const accept = Headers.Kv{ - .name = Api.StringPointer{ .offset = 0, .length = @as(u32, @intCast("Accept".len)) }, - .value = Api.StringPointer{ .offset = @as(u32, @intCast("Accept".len)), .length = @as(u32, @intCast("application/vnd.github.v3+json".len)) }, + var header_entries: Headers.Entry.List = .empty; + const accept = Headers.Entry{ + .name = .{ .offset = 0, .length = @intCast("Accept".len) }, + .value = .{ .offset = @intCast("Accept".len), .length = @intCast("application/vnd.github.v3+json".len) }, }; try header_entries.append(allocator, accept); defer if (comptime silent) header_entries.deinit(allocator); @@ -217,14 +217,14 @@ pub const UpgradeCommand = struct { headers_buf = try std.fmt.allocPrint(allocator, default_github_headers ++ "AuthorizationBearer {s}", .{access_token}); try header_entries.append( allocator, - Headers.Kv{ - .name = Api.StringPointer{ + .{ + .name = .{ .offset = accept.value.offset + accept.value.length, - .length = @as(u32, @intCast("Authorization".len)), + .length = @intCast("Authorization".len), }, - .value = Api.StringPointer{ - .offset = @as(u32, @intCast(accept.value.offset + accept.value.length + "Authorization".len)), - .length = @as(u32, @intCast("Bearer ".len + access_token.len)), + .value = .{ + .offset = @intCast(accept.value.offset + accept.value.length + "Authorization".len), + .length = @intCast("Bearer ".len + access_token.len), }, }, ); diff --git a/src/codegen/bake-codegen.ts b/src/codegen/bake-codegen.ts index a48fbaae4e4750..21d1aa607e00ae 100644 --- a/src/codegen/bake-codegen.ts +++ b/src/codegen/bake-codegen.ts @@ -37,6 +37,7 @@ async function run() { entrypoints: [join(base_dir, `hmr-runtime-${file}.ts`)], define: { side: JSON.stringify(side), + IS_ERROR_RUNTIME: String(file === "error"), IS_BUN_DEVELOPMENT: String(!!debug), }, minify: { @@ -120,7 +121,7 @@ async function run() { code = debug ? `((${params}) => {${code}})\n` : `((${params})=>{${code}})\n`; } else { - code = debug ? `((${names}) => {${code}})({\n` : `((${names})=>{${code}})({`; + code = debug ? `(async (${names}) => {${code}})({\n` : `(async(${names})=>{${code}})({`; } } @@ -139,6 +140,7 @@ async function run() { { kind: ["error"], result: results[2] }, ] .filter(x => x.result.status === "rejected") + // @ts-ignore .map(x => ({ kind: x.kind, err: x.result.reason })) as Err[]; if (failed.length > 0) { const flattened_errors: Err[] = []; diff --git a/src/crash_handler.zig b/src/crash_handler.zig index 982a2970a8abde..f8aef7da0018d9 100644 --- a/src/crash_handler.zig +++ b/src/crash_handler.zig @@ -1687,6 +1687,12 @@ pub const StoredTrace = struct { var frame = stored.trace(); std.debug.captureStackTrace(begin orelse @returnAddress(), &frame); stored.index = frame.index; + for (frame.instruction_addresses[0..frame.index], 0..) |addr, i| { + if (addr == 0) { + stored.index = i; + break; + } + } return stored; } diff --git a/src/deps/libuv.zig b/src/deps/libuv.zig index 011be6e3823f35..336b5720d77a51 100644 --- a/src/deps/libuv.zig +++ b/src/deps/libuv.zig @@ -2274,8 +2274,7 @@ pub const uv_stdio_container_t = struct_uv_stdio_container_s; pub const uv_process_options_t = extern struct { exit_cb: uv_exit_cb, file: [*:0]const u8, - // TODO(@paperdave): upstream changing libuv's args to const - // it is not mutated in any of their code + // In libuv, this is not 'const', but they never mutate it. args: [*:null]?[*:0]const u8, env: [*:null]?[*:0]const u8, cwd: [*:0]const u8, diff --git a/src/deps/lol-html.zig b/src/deps/lol-html.zig index 303588edc09276..489f2e99c04d6a 100644 --- a/src/deps/lol-html.zig +++ b/src/deps/lol-html.zig @@ -732,8 +732,8 @@ pub const Comment = opaque { }; pub const Directive = enum(c_uint) { - stop = 0, - @"continue" = 1, + @"continue" = 0, + stop = 1, }; pub const lol_html_comment_handler_t = *const fn (*Comment, ?*anyopaque) callconv(.C) Directive; pub const lol_html_text_handler_handler_t = *const fn (*TextChunk, ?*anyopaque) callconv(.C) Directive; diff --git a/src/deps/uws.zig b/src/deps/uws.zig index a56e2cbf3847fa..a64dc65f52a950 100644 --- a/src/deps/uws.zig +++ b/src/deps/uws.zig @@ -3039,7 +3039,7 @@ pub const WebSocketBehavior = extern struct { pub fn onMessage(raw_ws: *RawWebSocket, message: [*c]const u8, length: usize, opcode: Opcode) callconv(.C) void { const ws = @unionInit(AnyWebSocket, active_field_name, @as(*WebSocket, @ptrCast(raw_ws))); const this = ws.as(Type).?; - @call(.always_inline, Type.onMessage, .{ + @call(bun.callmod_inline, Type.onMessage, .{ this, ws, if (length > 0) message[0..length] else "", @@ -3079,7 +3079,7 @@ pub const WebSocketBehavior = extern struct { pub fn onClose(raw_ws: *RawWebSocket, code: i32, message: [*c]const u8, length: usize) callconv(.C) void { const ws = @unionInit(AnyWebSocket, active_field_name, @as(*WebSocket, @ptrCast(raw_ws))); const this = ws.as(Type).?; - @call(.always_inline, Type.onClose, .{ + @call(bun.callmod_inline, Type.onClose, .{ this, ws, code, @@ -3088,7 +3088,7 @@ pub const WebSocketBehavior = extern struct { } pub fn onUpgrade(ptr: *anyopaque, res: *uws_res, req: *Request, context: *uws_socket_context_t, id: usize) callconv(.C) void { - @call(.always_inline, Server.onWebSocketUpgrade, .{ + @call(bun.callmod_inline, Server.onWebSocketUpgrade, .{ bun.cast(*Server, ptr), @as(*NewApp(is_ssl).Response, @ptrCast(res)), req, @@ -3191,7 +3191,7 @@ pub const AnyResponse = union(enum) { SSL: *NewApp(true).Response, TCP: *NewApp(false).Response, - pub fn init(response: anytype) AnyResponse { + pub inline fn init(response: anytype) AnyResponse { return switch (@TypeOf(response)) { *NewApp(true).Response => .{ .SSL = response }, *NewApp(false).Response => .{ .TCP = response }, @@ -3220,12 +3220,7 @@ pub const AnyResponse = union(enum) { }; } - pub fn write(this: AnyResponse, data: []const u8) void { - return switch (this) { - .SSL => |resp| resp.write(data), - .TCP => |resp| resp.write(data), - }; - } + pub const write = @compileError("this function is not provided to discourage repeatedly checking the response type. use `switch(...) { inline else => ... }` so that multiple calls"); pub fn end(this: AnyResponse, data: []const u8, close_connection: bool) void { return switch (this) { @@ -3292,7 +3287,7 @@ pub const AnyResponse = union(enum) { }; } - pub fn onAborted(this: AnyResponse, comptime UserDataType: type, comptime handler: fn (UserDataType, AnyResponse) void, opcional_data: UserDataType) void { + pub fn onAborted(this: AnyResponse, comptime UserDataType: type, comptime handler: fn (UserDataType, AnyResponse) void, optional_data: UserDataType) void { const wrapper = struct { pub fn ssl_handler(user_data: UserDataType, resp: *NewApp(true).Response) void { handler(user_data, .{ .SSL = resp }); @@ -3302,8 +3297,8 @@ pub const AnyResponse = union(enum) { } }; return switch (this) { - .SSL => |resp| resp.onAborted(UserDataType, wrapper.ssl_handler, opcional_data), - .TCP => |resp| resp.onAborted(UserDataType, wrapper.tcp_handler, opcional_data), + .SSL => |resp| resp.onAborted(UserDataType, wrapper.ssl_handler, optional_data), + .TCP => |resp| resp.onAborted(UserDataType, wrapper.tcp_handler, optional_data), }; } @@ -3357,7 +3352,8 @@ pub const AnyResponse = union(enum) { }; pub fn NewApp(comptime ssl: bool) type { return opaque { - const ssl_flag = @as(i32, @intFromBool(ssl)); + pub const is_ssl = ssl; + const ssl_flag: i32 = @intFromBool(ssl); const ThisApp = @This(); pub fn close(this: *ThisApp) void { diff --git a/src/feature_flags.zig b/src/feature_flags.zig index 8b42968a31ba1e..7a5c7605cc3685 100644 --- a/src/feature_flags.zig +++ b/src/feature_flags.zig @@ -13,9 +13,6 @@ pub const jsx_runtime_is_cjs = true; pub const tracing = true; -// TODO: remove this flag, it should use bun.Output.scoped -pub const verbose_watcher = false; - pub const css_supports_fence = true; pub const enable_entry_cache = true; @@ -155,12 +152,13 @@ pub fn isLibdeflateEnabled() bool { return !bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_NO_LIBDEFLATE"); } -/// Enable Bun Kit's experimental bundler tools. +/// Enable the "app" option in Bun.serve. This option will likely be removed +/// in favor of HTML loaders and configuring framework options in bunfig.toml pub fn bake() bool { // In canary or if an environment variable is specified. return env.is_canary or env.isDebug or bun.getRuntimeFeatureFlag("BUN_FEATURE_FLAG_EXPERIMENTAL_BAKE"); } -/// Additional debugging features for Bake, such as the incremental visualizer. +/// Additional debugging features for bake.DevServer, such as the incremental visualizer. /// To use them, extra flags are passed in addition to this one. pub const bake_debugging_features = env.is_canary or env.isDebug; diff --git a/src/fmt.zig b/src/fmt.zig index 179c29a26ef0ff..2720f0b6f9e062 100644 --- a/src/fmt.zig +++ b/src/fmt.zig @@ -1491,12 +1491,12 @@ pub const SizeFormatter = struct { } }; -pub fn size(value: anytype, opts: SizeFormatter.Options) SizeFormatter { +pub fn size(bytes: anytype, opts: SizeFormatter.Options) SizeFormatter { return .{ - .value = switch (@TypeOf(value)) { - f64, f32, f128 => @intFromFloat(value), - i64, isize => @intCast(value), - else => value, + .value = switch (@TypeOf(bytes)) { + f64, f32, f128 => @intFromFloat(bytes), + i64, isize => @intCast(bytes), + else => bytes, }, .opts = opts, }; diff --git a/src/fs.zig b/src/fs.zig index fa20e43a5272aa..a0b5344df55bc3 100644 --- a/src/fs.zig +++ b/src/fs.zig @@ -412,18 +412,18 @@ pub const FileSystem = struct { // } pub fn normalize(_: *@This(), str: string) string { - return @call(bun.callmod_inline, path_handler.normalizeString, .{ str, true, .auto }); + return @call(bun.callmod_inline, path_handler.normalizeString, .{ str, true, bun.path.Platform.auto }); } pub fn normalizeBuf(_: *@This(), buf: []u8, str: string) string { - return @call(bun.callmod_inline, path_handler.normalizeStringBuf, .{ str, buf, false, .auto, false }); + return @call(bun.callmod_inline, path_handler.normalizeStringBuf, .{ str, buf, false, bun.path.Platform.auto, false }); } pub fn join(_: *@This(), parts: anytype) string { return @call(bun.callmod_inline, path_handler.joinStringBuf, .{ &join_buf, parts, - .loose, + bun.path.Platform.loose, }); } @@ -431,7 +431,7 @@ pub const FileSystem = struct { return @call(bun.callmod_inline, path_handler.joinStringBuf, .{ buf, parts, - .loose, + bun.path.Platform.loose, }); } diff --git a/src/hive_array.zig b/src/hive_array.zig index 375e7929c811a4..a39e0702e6a5db 100644 --- a/src/hive_array.zig +++ b/src/hive_array.zig @@ -10,9 +10,15 @@ const testing = std.testing; pub fn HiveArray(comptime T: type, comptime capacity: u16) type { return struct { const Self = @This(); - buffer: [capacity]T = undefined, - available: bun.bit_set.IntegerBitSet(capacity) = bun.bit_set.IntegerBitSet(capacity).initFull(), + + buffer: [capacity]T, + available: bun.bit_set.IntegerBitSet(capacity), + pub const size = capacity; + pub const empty: Self = .{ + .buffer = undefined, + .available = .initFull(), + }; pub fn init() Self { return .{}; @@ -75,7 +81,7 @@ pub fn HiveArray(comptime T: type, comptime capacity: u16) type { pub fn init(allocator: std.mem.Allocator) This { return .{ .allocator = allocator, - .hive = if (capacity > 0) HiveArray(T, capacity).init(), + .hive = if (capacity > 0) .empty, }; } @@ -86,7 +92,7 @@ pub fn HiveArray(comptime T: type, comptime capacity: u16) type { } } - return self.allocator.create(T) catch unreachable; + return self.allocator.create(T) catch bun.outOfMemory(); } pub fn getAndSeeIfNew(self: *This, new: *bool) *T { @@ -97,7 +103,7 @@ pub fn HiveArray(comptime T: type, comptime capacity: u16) type { } } - return self.allocator.create(T) catch unreachable; + return self.allocator.create(T) catch bun.outOfMemory(); } pub fn tryGet(self: *This) !*T { diff --git a/src/http.zig b/src/http.zig index efb9255ba75e6c..6eb64576ac4b91 100644 --- a/src/http.zig +++ b/src/http.zig @@ -410,7 +410,7 @@ const ProxyTunnel = struct { }; const pending = encoded_data[@intCast(written)..]; if (pending.len > 0) { - // lets flush when we are trully writable + // lets flush when we are truly writable proxy.write_buffer.write(pending) catch bun.outOfMemory(); } } @@ -583,7 +583,7 @@ fn NewHTTPContext(comptime ssl: bool) type { return ActiveSocket.init(&dead_socket); } - pending_sockets: HiveArray(PooledSocket, pool_size) = HiveArray(PooledSocket, pool_size).init(), + pending_sockets: HiveArray(PooledSocket, pool_size) = .empty, us_socket_context: *uws.SocketContext, const Context = @This(); @@ -1767,7 +1767,7 @@ pub inline fn cleanup(force: bool) void { default_arena.gc(force); } -pub const Headers = @import("./http/headers.zig"); +pub const Headers = JSC.WebCore.Headers; pub const SOCKET_FLAGS: u32 = if (Environment.isLinux) SOCK.CLOEXEC | posix.MSG.NOSIGNAL @@ -2226,7 +2226,7 @@ pub const Flags = packed struct { // TODO: reduce the size of this struct // Many of these fields can be moved to a packed struct and use less space method: Method, -header_entries: Headers.Entries, +header_entries: Headers.Entry.List, header_buf: string, url: URL, connected_url: URL = URL{}, @@ -2400,8 +2400,8 @@ pub const HTTPChannelContext = struct { pub const AsyncHTTP = struct { request: ?picohttp.Request = null, response: ?picohttp.Response = null, - request_headers: Headers.Entries = Headers.Entries{}, - response_headers: Headers.Entries = Headers.Entries{}, + request_headers: Headers.Entry.List = .empty, + response_headers: Headers.Entry.List = .empty, response_buffer: *MutableString, request_body: HTTPRequestBody = .{ .bytes = "" }, allocator: std.mem.Allocator, @@ -2551,7 +2551,7 @@ pub const AsyncHTTP = struct { allocator: std.mem.Allocator, method: Method, url: URL, - headers: Headers.Entries, + headers: Headers.Entry.List, headers_buf: string, response_buffer: *MutableString, request_body: []const u8, @@ -2671,7 +2671,7 @@ pub const AsyncHTTP = struct { allocator: std.mem.Allocator, method: Method, url: URL, - headers: Headers.Entries, + headers: Headers.Entry.List, headers_buf: string, response_buffer: *MutableString, request_body: []const u8, diff --git a/src/http/header_builder.zig b/src/http/header_builder.zig index 94744fc3263e5e..247bcf1cadc132 100644 --- a/src/http/header_builder.zig +++ b/src/http/header_builder.zig @@ -1,15 +1,15 @@ const HeaderBuilder = @This(); const StringBuilder = @import("../string_builder.zig"); -const Headers = @import("./headers.zig"); +const Headers = bun.JSC.WebCore.Headers; const string = bun.string; const HTTPClient = @import("../http.zig"); const Api = @import("../api/schema.zig").Api; const std = @import("std"); const bun = @import("root").bun; -content: StringBuilder = StringBuilder{}, +content: StringBuilder = .{}, header_count: u64 = 0, -entries: Headers.Entries = Headers.Entries{}, +entries: Headers.Entry.List = .empty, pub fn count(this: *HeaderBuilder, name: string, value: string) void { this.header_count += 1; @@ -34,7 +34,7 @@ pub fn append(this: *HeaderBuilder, name: string, value: string) void { .length = @as(u32, @truncate(value.len)), }; _ = this.content.append(value); - this.entries.appendAssumeCapacity(Headers.Kv{ .name = name_ptr, .value = value_ptr }); + this.entries.appendAssumeCapacity(.{ .name = name_ptr, .value = value_ptr }); } pub fn appendFmt(this: *HeaderBuilder, name: string, comptime fmt: string, args: anytype) void { @@ -52,7 +52,7 @@ pub fn appendFmt(this: *HeaderBuilder, name: string, comptime fmt: string, args: .length = @as(u32, @truncate(value.len)), }; - this.entries.appendAssumeCapacity(Headers.Kv{ .name = name_ptr, .value = value_ptr }); + this.entries.appendAssumeCapacity(.{ .name = name_ptr, .value = value_ptr }); } pub fn apply(this: *HeaderBuilder, client: *HTTPClient) void { diff --git a/src/http/headers.zig b/src/http/headers.zig deleted file mode 100644 index fe1f08e98b5c8c..00000000000000 --- a/src/http/headers.zig +++ /dev/null @@ -1,8 +0,0 @@ -const Api = @import("../api/schema.zig").Api; -const std = @import("std"); -const bun = @import("root").bun; -pub const Kv = struct { - name: Api.StringPointer, - value: Api.StringPointer, -}; -pub const Entries = bun.MultiArrayList(Kv); diff --git a/src/http/mime_type.zig b/src/http/mime_type.zig index 1f284dc7ee4769..084ebfe1a7f78f 100644 --- a/src/http/mime_type.zig +++ b/src/http/mime_type.zig @@ -239,6 +239,11 @@ pub fn byExtensionNoDefault(ext: string) ?MimeType { return extensions.get(ext); } +pub fn detectFromPath(path: string) MimeType { + const ext = std.fs.path.extension(path); + return byExtension(ext); +} + // this is partially auto-generated pub const all = struct { pub const @"application/webassembly" = wasm; diff --git a/src/import_record.zig b/src/import_record.zig index a81f5f180b073d..ab1ff7958dbd27 100644 --- a/src/import_record.zig +++ b/src/import_record.zig @@ -196,8 +196,6 @@ pub const ImportRecord = struct { with_type_toml, with_type_file, - tailwind, - pub fn loader(this: Tag) ?bun.options.Loader { return switch (this) { .with_type_sqlite => .sqlite, diff --git a/src/install/install.zig b/src/install/install.zig index 3bbbeb80eeccd9..f5a268871543b3 100644 --- a/src/install/install.zig +++ b/src/install/install.zig @@ -2698,8 +2698,8 @@ pub const PackageManager = struct { pending_pre_calc_hashes: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), pending_tasks: std.atomic.Value(u32) = std.atomic.Value(u32).init(0), total_tasks: u32 = 0, - preallocated_network_tasks: PreallocatedNetworkTasks = PreallocatedNetworkTasks.init(bun.default_allocator), - preallocated_resolve_tasks: PreallocatedTaskStore = PreallocatedTaskStore.init(bun.default_allocator), + preallocated_network_tasks: PreallocatedNetworkTasks = .init(bun.default_allocator), + preallocated_resolve_tasks: PreallocatedTaskStore = .init(bun.default_allocator), /// items are only inserted into this if they took more than 500ms lifecycle_script_time_log: LifecycleScriptTimeLog = .{}, diff --git a/src/js_ast.zig b/src/js_ast.zig index 530eb4ee54ebd2..38105e00907294 100644 --- a/src/js_ast.zig +++ b/src/js_ast.zig @@ -1301,8 +1301,8 @@ pub const Symbol = struct { .{ symbol.original_name, @tagName(symbol.kind), if (symbol.hasLink()) symbol.link else Ref{ - .source_index = @as(Ref.Int, @truncate(i)), - .inner_index = @as(Ref.Int, @truncate(inner_index)), + .source_index = @truncate(i), + .inner_index = @truncate(inner_index), .tag = .symbol, }, }, @@ -1614,7 +1614,7 @@ pub const E = struct { pub fn hasSameFlagsAs(a: *Dot, b: *Dot) bool { return (a.optional_chain == b.optional_chain and a.is_direct_eval == b.is_direct_eval and - a.can_be_unwrapped_if_unused == b.can_be_unwrapped_if_unused and a.call_can_be_unwrapped_if_unused == b.call_can_be_unwrapped_if_unused); + a.can_be_removed_if_unused == b.can_be_removed_if_unused and a.call_can_be_unwrapped_if_unused == b.call_can_be_unwrapped_if_unused); } }; @@ -1648,7 +1648,7 @@ pub const E = struct { must_keep_due_to_with_stmt: bool = false, // If true, this identifier is known to not have a side effect (i.e. to not - // throw an exception) when referenced. If false, this identifier may or may + // throw an exception) when referenced. If false, this identifier may or // not have side effects when referenced. This is used to allow the removal // of known globals such as "Object" if they aren't used. can_be_removed_if_unused: bool = false, @@ -2027,12 +2027,12 @@ pub const E = struct { return error.Clobber; }, .e_object => |object| { - if (rope.next == null) { - // success - return existing; + if (rope.next != null) { + return try object.getOrPutObject(rope.next.?, allocator); } - return try object.getOrPutObject(rope.next.?, allocator); + // success + return existing; }, else => { return error.Clobber; diff --git a/src/js_parser.zig b/src/js_parser.zig index c99fd26db8bf12..b7a62f08644345 100644 --- a/src/js_parser.zig +++ b/src/js_parser.zig @@ -1470,12 +1470,6 @@ pub const ImportScanner = struct { } } } - - // when bundling, all top-level variables become var - // TODO(@paperdave): we already do this earlier in visiting? - if (!hot_module_reloading_transformations and p.options.bundle and !st.kind.isUsing()) { - st.kind = .k_var; - } }, .s_export_default => |st| { // This is defer'd so that we still record export default for identifiers @@ -3279,7 +3273,7 @@ pub const Parser = struct { // The lexer location won't be totally accurate, but it's kind of helpful. try p.log.addError(p.source, p.lexer.loc(), "Maximum call stack size exceeded"); - // Return a SyntaxError so that we reuse existing code for handling erorrs. + // Return a SyntaxError so that we reuse existing code for handling errors. return error.SyntaxError; } @@ -5104,136 +5098,6 @@ fn NewParser_( }; }; - /// "Fast Refresh" is React's solution for hot-module-reloading in the context of the UI framework - /// user guide: https://reactnative.dev/docs/fast-refresh (applies to react-dom and native) - /// - /// This depends on performing a couple extra transformations at bundle time, as well as - /// including the `react-refresh` NPM package, which is able to do the heavy lifting, - /// integrating with `react` and `react-dom`. - /// - /// Prior implementations: - /// [1]: https://github.com/facebook/react/blob/main/packages/react-refresh/src/ReactFreshBabelPlugin.js - /// [2]: https://github.com/swc-project/swc/blob/main/crates/swc_ecma_transforms_react/src/refresh/mod.rs - /// - /// Additional reading: - /// [3] https://github.com/facebook/react/issues/16604#issuecomment-528663101 - /// [4] https://github.com/facebook/react/blob/master/packages/react-refresh/src/__tests__/ReactFreshIntegration-test.js - /// - /// Instead of a plugin which visits the tree separately, Bun's implementation of fast refresh - /// happens in tandem with the visit pass. The responsibilities of the transform are as follows: - /// - /// 1. For all Components (which is defined as any top-level function/function variable, that is - /// named with a capital letter; see `isComponentishName`), register them to the runtime using - /// `$RefreshReg$(ComponentFunction, "Component");`. Implemented in `p.handleReactRefreshRegister` - /// HOC components are also registered, but only through a special case for `export default` - /// - /// 2. For all functions which call a Hook (a hook is an identifier matching /^use[A-Z]/): - /// a. Outside of the function, create a signature function `const _s = $RefreshSig$();` - /// b. At the start of the function, call `_s()` - /// c. Record all of the hooks called, the variables they are assigned to, and - /// arguments depending on which hook has been used. `useState` and `useReducer`, - /// for example, are special-cased. - /// d. Directly after the function, call `_s(hook, "", forceReset)` - /// - If a user-defined hook is called, the alterate form is used: - /// `_s(hook, "", forceReset, () => [useCustom1, useCustom2])` - /// - /// The upstream transforms do not declare `$RefreshReg$` or `$RefreshSig$`. A typical - /// implementation might look like this, prepending this data to the module start: - /// - /// import * as Refresh from 'react-refresh/runtime'; - /// const $RefreshReg$ = (type, id) => Refresh.register(type, "" + id); - /// const $RefreshSig$ = Refresh.createSignatureFunctionForTransform; - /// - /// Since Bun is a transpiler *and* bundler, we take a slightly different approach. Aside - /// from including the link to the refresh runtime, our notation of $RefreshReg$ is just - /// pointing at `Refresh.register`, which means when we call it, the second argument has - /// to be a string containing the filepath, not just the component name. - const ReactRefresh = struct { - // Set if this JSX/TSX file uses the refresh runtime. If so, - // we must insert an import statement to it. - register_used: bool = false, - signature_used: bool = false, - - /// $RefreshReg$ is called on all top-level variables that are - /// components, as well as HOCs found in the `export default` clause. - register_ref: Ref = Ref.None, - - /// $RefreshSig$ is called to create a signature function, which is - /// used by the refresh runtime to perform smart hook tracking. - create_signature_ref: Ref = Ref.None, - - /// If a comment with '@refresh reset' is seen, we will forward a - /// force refresh to the refresh runtime. This lets you reset the - /// state of hooks on an update on a per-component basis. - // TODO: this is never set - force_reset: bool = false, - - /// The last hook that was scanned. This is used when visiting - /// `.s_local`, as we must hash the variable destructure if the - /// hook's result is assigned directly to a local. - last_hook_seen: ?*E.Call = null, - - /// Every function sets up stack memory to hold data related to it's - /// hook tracking. This is a pointer to that ?HookContext, where an - /// inner null means there are no hook calls. - /// - /// The inner value is initialized when the first hook .e_call is - /// visited, where the '_s' symbol is reserved. Additional hook calls - /// append to the `hasher` and `user_hooks` as needed. - /// - /// When a function is done visiting, the stack location is checked, - /// and then it will insert `var _s = ...`, add the `_s()` call at - /// the start of the function, and then add the call to `_s(func, ...)`. - hook_ctx_storage: ?*?HookContext = null, - - pub const HookContext = struct { - hasher: std.hash.Wyhash, - signature_cb: Ref, - user_hooks: std.AutoArrayHashMapUnmanaged(Ref, Expr), - }; - - // https://github.com/facebook/react/blob/d1afcb43fd506297109c32ff462f6f659f9110ae/packages/react-refresh/src/ReactFreshBabelPlugin.js#L42 - pub fn isComponentishName(id: []const u8) bool { - if (id.len == 0) return false; - return switch (id[0]) { - 'A'...'Z' => true, - else => false, - }; - } - - // https://github.com/facebook/react/blob/d1afcb43fd506297109c32ff462f6f659f9110ae/packages/react-refresh/src/ReactFreshBabelPlugin.js#L408 - pub fn isHookName(id: []const u8) bool { - return id.len >= 4 and - strings.hasPrefixComptime(id, "use") and - switch (id[3]) { - 'A'...'Z' => true, - else => false, - }; - } - - pub const built_in_hooks = bun.ComptimeEnumMap(enum { - useState, - useReducer, - useEffect, - useLayoutEffect, - useMemo, - useCallback, - useRef, - useContext, - useImperativeHandle, - useDebugValue, - useId, - useDeferredValue, - useTransition, - useInsertionEffect, - useSyncExternalStore, - useFormStatus, - useFormState, - useActionState, - useOptimistic, - }); - }; - /// use this instead of checking p.source.index /// because when not bundling, p.source.index is `0` inline fn isSourceRuntime(p: *const P) bool { @@ -9399,7 +9263,7 @@ fn NewParser_( } pub fn newSymbol(p: *P, kind: Symbol.Kind, identifier: string) !Ref { - const inner_index = @as(Ref.Int, @truncate(p.symbols.items.len)); + const inner_index: Ref.Int = @truncate(p.symbols.items.len); try p.symbols.append(Symbol{ .kind = kind, .original_name = identifier, @@ -19393,6 +19257,10 @@ fn NewParser_( data.default_name = createDefaultName(p, stmt.loc) catch unreachable; } + if (p.options.features.react_fast_refresh) { + try p.handleReactRefreshRegister(stmts, name, data.default_name.ref.?, .default); + } + if (p.options.features.server_components.wrapsExports()) { data.value = .{ .expr = p.wrapValueForServerComponentReference(p.newExpr(E.Function{ .func = func.func }, stmt.loc), "default") }; } @@ -19542,7 +19410,7 @@ fn NewParser_( } if (p.current_scope == p.module_scope) { - try p.handleReactRefreshRegister(stmts, original_name, name_ref); + try p.handleReactRefreshRegister(stmts, original_name, name_ref, .named); } } @@ -19766,7 +19634,7 @@ fn NewParser_( else => break :try_register, }; const original_name = p.symbols.items[id.innerIndex()].original_name; - try p.handleReactRefreshRegister(stmts, original_name, id); + try p.handleReactRefreshRegister(stmts, original_name, id, .named); } } @@ -23295,7 +23163,7 @@ fn NewParser_( } }; - pub fn handleReactRefreshRegister(p: *P, stmts: *ListManaged(Stmt), original_name: []const u8, ref: Ref) !void { + pub fn handleReactRefreshRegister(p: *P, stmts: *ListManaged(Stmt), original_name: []const u8, ref: Ref, export_kind: enum { named, default }) !void { bun.assert(p.options.features.react_fast_refresh); bun.assert(p.current_scope == p.module_scope); @@ -23310,12 +23178,16 @@ fn NewParser_( .data = try bun.strings.concat(p.allocator, &.{ p.source.path.pretty, ":", - original_name, + switch (export_kind) { + .named => original_name, + .default => "default", + }, }), }, loc), }), }, loc) }, loc)); + p.recordUsage(ref); p.react_refresh.register_used = true; } } @@ -24053,12 +23925,148 @@ const WrapMode = enum { bun_commonjs, }; +/// "Fast Refresh" is React's solution for hot-module-reloading in the context of the UI framework +/// user guide: https://reactnative.dev/docs/fast-refresh (applies to react-dom and native) +/// +/// This depends on performing a couple extra transformations at bundle time, as well as +/// including the `react-refresh` NPM package, which is able to do the heavy lifting, +/// integrating with `react` and `react-dom`. +/// +/// Prior implementations: +/// [1]: https://github.com/facebook/react/blob/main/packages/react-refresh/src/ReactFreshBabelPlugin.js +/// [2]: https://github.com/swc-project/swc/blob/main/crates/swc_ecma_transforms_react/src/refresh/mod.rs +/// +/// Additional reading: +/// [3] https://github.com/facebook/react/issues/16604#issuecomment-528663101 +/// [4] https://github.com/facebook/react/blob/master/packages/react-refresh/src/__tests__/ReactFreshIntegration-test.js +/// +/// Instead of a plugin which visits the tree separately, Bun's implementation of fast refresh +/// happens in tandem with the visit pass. The responsibilities of the transform are as follows: +/// +/// 1. For all Components (which is defined as any top-level function/function variable, that is +/// named with a capital letter; see `isComponentishName`), register them to the runtime using +/// `$RefreshReg$(ComponentFunction, "Component");`. Implemented in `p.handleReactRefreshRegister` +/// HOC components are also registered, but only through a special case for `export default` +/// +/// 2. For all functions which call a Hook (a hook is an identifier matching /^use[A-Z]/): +/// a. Outside of the function, create a signature function `const _s = $RefreshSig$();` +/// b. At the start of the function, call `_s()` +/// c. Record all of the hooks called, the variables they are assigned to, and +/// arguments depending on which hook has been used. `useState` and `useReducer`, +/// for example, are special-cased. +/// d. Directly after the function, call `_s(hook, "", forceReset)` +/// - If a user-defined hook is called, the alterate form is used: +/// `_s(hook, "", forceReset, () => [useCustom1, useCustom2])` +/// +/// The upstream transforms do not declare `$RefreshReg$` or `$RefreshSig$`. A typical +/// implementation might look like this, prepending this data to the module start: +/// +/// import * as Refresh from 'react-refresh/runtime'; +/// const $RefreshReg$ = (type, id) => Refresh.register(type, "" + id); +/// const $RefreshSig$ = Refresh.createSignatureFunctionForTransform; +/// +/// Since Bun is a transpiler *and* bundler, we take a slightly different approach. Aside +/// from including the link to the refresh runtime, our notation of $RefreshReg$ is just +/// pointing at `Refresh.register`, which means when we call it, the second argument has +/// to be a string containing the filepath, not just the component name. +const ReactRefresh = struct { + // Set if this JSX/TSX file uses the refresh runtime. If so, + // we must insert an import statement to it. + register_used: bool = false, + signature_used: bool = false, + + /// $RefreshReg$ is called on all top-level variables that are + /// components, as well as HOCs found in the `export default` clause. + register_ref: Ref = Ref.None, + + /// $RefreshSig$ is called to create a signature function, which is + /// used by the refresh runtime to perform smart hook tracking. + create_signature_ref: Ref = Ref.None, + + /// If a comment with '@refresh reset' is seen, we will forward a + /// force refresh to the refresh runtime. This lets you reset the + /// state of hooks on an update on a per-component basis. + // TODO: this is never set + force_reset: bool = false, + + /// The last hook that was scanned. This is used when visiting + /// `.s_local`, as we must hash the variable destructure if the + /// hook's result is assigned directly to a local. + last_hook_seen: ?*E.Call = null, + + /// Every function sets up stack memory to hold data related to it's + /// hook tracking. This is a pointer to that ?HookContext, where an + /// inner null means there are no hook calls. + /// + /// The inner value is initialized when the first hook .e_call is + /// visited, where the '_s' symbol is reserved. Additional hook calls + /// append to the `hasher` and `user_hooks` as needed. + /// + /// When a function is done visiting, the stack location is checked, + /// and then it will insert `var _s = ...`, add the `_s()` call at + /// the start of the function, and then add the call to `_s(func, ...)`. + hook_ctx_storage: ?*?HookContext = null, + + pub const HookContext = struct { + hasher: std.hash.Wyhash, + signature_cb: Ref, + user_hooks: std.AutoArrayHashMapUnmanaged(Ref, Expr), + }; + + // https://github.com/facebook/react/blob/d1afcb43fd506297109c32ff462f6f659f9110ae/packages/react-refresh/src/ReactFreshBabelPlugin.js#L42 + pub fn isComponentishName(id: []const u8) bool { + if (id.len == 0) return false; + return switch (id[0]) { + 'A'...'Z' => true, + else => false, + }; + } + + // https://github.com/facebook/react/blob/d1afcb43fd506297109c32ff462f6f659f9110ae/packages/react-refresh/src/ReactFreshBabelPlugin.js#L408 + pub fn isHookName(id: []const u8) bool { + return id.len >= 4 and + strings.hasPrefixComptime(id, "use") and + switch (id[3]) { + 'A'...'Z' => true, + else => false, + }; + } + + pub const built_in_hooks = bun.ComptimeEnumMap(enum { + useState, + useReducer, + useEffect, + useLayoutEffect, + useMemo, + useCallback, + useRef, + useContext, + useImperativeHandle, + useDebugValue, + useId, + useDeferredValue, + useTransition, + useInsertionEffect, + useSyncExternalStore, + useFormStatus, + useFormState, + useActionState, + useOptimistic, + }); +}; + pub const ConvertESMExportsForHmr = struct { last_part: *js_ast.Part, - imports_seen: std.AutoArrayHashMapUnmanaged(u32, void) = .{}, + imports_seen: bun.StringArrayHashMapUnmanaged(ImportRef) = .{}, + export_star_props: std.ArrayListUnmanaged(G.Property) = .{}, export_props: std.ArrayListUnmanaged(G.Property) = .{}, stmts: std.ArrayListUnmanaged(Stmt) = .{}, + const ImportRef = struct { + /// Index into ConvertESMExportsForHmr.stmts + stmt_index: u32, + }; + fn convertStmt(ctx: *ConvertESMExportsForHmr, p: anytype, stmt: Stmt) !void { const new_stmt = switch (stmt.data) { else => stmt, @@ -24114,7 +24122,26 @@ pub const ConvertESMExportsForHmr = struct { break :stmt stmt; }, .s_export_default => |st| stmt: { - // Simple case: we can move this to the default property of the exports object + // When React Fast Refresh needs to tag the default export, the statement + // cannot be moved, since a local reference is required. + if (p.options.features.react_fast_refresh and + st.value == .stmt and st.value.stmt.data == .s_function) + fast_refresh_edge_case: { + const symbol = st.value.stmt.data.s_function.func.name orelse + break :fast_refresh_edge_case; + const name = p.symbols.items[symbol.ref.?.inner_index].original_name; + if (ReactRefresh.isComponentishName(name)) { + // Lower to a function statement, and reference the function in the export list. + try ctx.export_props.append(p.allocator, .{ + .key = Expr.init(E.String, .{ .data = "default" }, stmt.loc), + .value = Expr.initIdentifier(symbol.ref.?, stmt.loc), + }); + break :stmt st.value.stmt; + } + // All other functions can be properly moved. + } + + // Try to move the export default expression to the end. if (st.canBeMoved()) { try ctx.export_props.append(p.allocator, .{ .key = Expr.init(E.String, .{ .data = "default" }, stmt.loc), @@ -24124,7 +24151,7 @@ pub const ConvertESMExportsForHmr = struct { return; } - // Otherwise, we need a temporary + // Otherwise, a new symbol is needed const temp_id = p.generateTempRef("default_export"); try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = temp_id, .is_top_level = true }); try ctx.last_part.symbol_uses.putNoClobber(p.allocator, temp_id, .{ .count_estimate = 1 }); @@ -24185,13 +24212,22 @@ pub const ConvertESMExportsForHmr = struct { return; // do not emit a statement here }, - .s_export_from => |st| stmt: { + .s_export_from => |st| { + const namespace_ref = try ctx.deduplicatedImport( + p, + st.import_record_index, + st.namespace_ref, + st.items, + stmt.loc, + null, + stmt.loc, + ); for (st.items) |*item| { const ref = item.name.ref.?; const symbol = &p.symbols.items[ref.innerIndex()]; if (symbol.namespace_alias == null) { symbol.namespace_alias = .{ - .namespace_ref = st.namespace_ref, + .namespace_ref = namespace_ref, .alias = item.original_name, .import_record_index = st.import_record_index, }; @@ -24207,34 +24243,108 @@ pub const ConvertESMExportsForHmr = struct { item.alias = item.original_name; item.original_name = alias; } - - const gop = try ctx.imports_seen.getOrPut(p.allocator, st.import_record_index); - if (gop.found_existing) return; - break :stmt Stmt.alloc(S.Import, .{ - .import_record_index = st.import_record_index, - .is_single_line = true, - .default_name = null, - .items = st.items, - .namespace_ref = st.namespace_ref, - .star_name_loc = null, - }, stmt.loc); + return; }, - .s_export_star => { - bun.todoPanic(@src(), "hot-module-reloading instrumentation for 'export * from'", .{}); + .s_export_star => |st| { + const namespace_ref = try ctx.deduplicatedImport( + p, + st.import_record_index, + st.namespace_ref, + &.{}, + stmt.loc, + null, + stmt.loc, + ); + try ctx.export_star_props.append(p.allocator, .{ + .kind = .spread, + .value = Expr.initIdentifier(namespace_ref, stmt.loc), + }); + return; }, // De-duplicate import statements. It is okay to disregard // named/default imports here as we always rewrite them as - // full qualified property accesses (need to so live-bindings) - .s_import => |st| stmt: { - const gop = try ctx.imports_seen.getOrPut(p.allocator, st.import_record_index); - if (gop.found_existing) return; - break :stmt stmt; + // full qualified property accesses (needed for live-bindings) + .s_import => |st| { + _ = try ctx.deduplicatedImport( + p, + st.import_record_index, + st.namespace_ref, + st.items, + st.star_name_loc, + st.default_name, + stmt.loc, + ); + return; }, }; try ctx.stmts.append(p.allocator, new_stmt); } + /// Deduplicates imports, returning a previously used Ref if present. + fn deduplicatedImport( + ctx: *ConvertESMExportsForHmr, + p: anytype, + import_record_index: u32, + namespace_ref: Ref, + items: []js_ast.ClauseItem, + star_name_loc: ?logger.Loc, + default_name: ?js_ast.LocRef, + loc: logger.Loc, + ) !Ref { + const ir = &p.import_records.items[import_record_index]; + const gop = try ctx.imports_seen.getOrPut(p.allocator, ir.path.text); + if (gop.found_existing) { + // Disable this one since an older record is getting used. It isn't + // practical to delete this import record entry since an import or + // require expression can exist. + ir.is_unused = true; + + const stmt = ctx.stmts.items[gop.value_ptr.stmt_index].data.s_import; + if (items.len > 0) { + if (stmt.items.len == 0) { + stmt.items = items; + } else { + stmt.items = try std.mem.concat(p.allocator, js_ast.ClauseItem, &.{ stmt.items, items }); + } + } + if (namespace_ref.isValid()) { + if (!stmt.namespace_ref.isValid()) { + stmt.namespace_ref = namespace_ref; + return namespace_ref; + } else { + // Erase this namespace ref, but since it may be used in + // existing AST trees, a link must be established. + const symbol = &p.symbols.items[namespace_ref.innerIndex()]; + symbol.use_count_estimate = 0; + symbol.link = stmt.namespace_ref; + if (@hasField(@typeInfo(@TypeOf(p)).pointer.child, "symbol_uses")) { + _ = p.symbol_uses.swapRemove(namespace_ref); + } + } + } + if (stmt.star_name_loc == null) if (star_name_loc) |stl| { + stmt.star_name_loc = stl; + }; + if (stmt.default_name == null) if (default_name) |dn| { + stmt.default_name = dn; + }; + return stmt.namespace_ref; + } + + try ctx.stmts.append(p.allocator, Stmt.alloc(S.Import, .{ + .import_record_index = import_record_index, + .is_single_line = true, + .default_name = default_name, + .items = items, + .namespace_ref = namespace_ref, + .star_name_loc = star_name_loc, + }, loc)); + + gop.value_ptr.* = .{ .stmt_index = @intCast(ctx.stmts.items.len - 1) }; + return namespace_ref; + } + fn visitBindingToExport( ctx: *ConvertESMExportsForHmr, p: anytype, @@ -24316,6 +24426,18 @@ pub const ConvertESMExportsForHmr = struct { } pub fn finalize(ctx: *ConvertESMExportsForHmr, p: anytype, all_parts: []js_ast.Part) !void { + if (ctx.export_star_props.items.len > 0) { + if (ctx.export_props.items.len == 0) { + ctx.export_props = ctx.export_star_props; + } else { + const export_star_len = ctx.export_star_props.items.len; + try ctx.export_props.ensureUnusedCapacity(p.allocator, export_star_len); + const len = ctx.export_props.items.len; + ctx.export_props.items.len += export_star_len; + bun.copy(G.Property, ctx.export_props.items[export_star_len..], ctx.export_props.items[0..len]); + @memcpy(ctx.export_props.items[0..export_star_len], ctx.export_star_props.items); + } + } if (ctx.export_props.items.len > 0) { try ctx.stmts.append(p.allocator, Stmt.alloc(S.SExpr, .{ .value = Expr.assign( @@ -24335,6 +24457,8 @@ pub const ConvertESMExportsForHmr = struct { try ctx.last_part.declared_symbols.append(p.allocator, .{ .ref = p.module_ref, .is_top_level = true }); } + // TODO: emit a marker for HMR runtime to know the non-star export fields. + // TODO: this is a tiny mess. it is honestly trying to hard to merge all parts into one for (all_parts[0 .. all_parts.len - 1]) |*part| { try ctx.last_part.declared_symbols.appendList(p.allocator, part.declared_symbols); diff --git a/src/js_printer.zig b/src/js_printer.zig index 42bfb573a6a9fd..a9fcaa86406492 100644 --- a/src/js_printer.zig +++ b/src/js_printer.zig @@ -450,6 +450,7 @@ pub const Options = struct { module_hash: u32 = 0, source_path: ?fs.Path = null, allocator: std.mem.Allocator = default_allocator, + source_map_allocator: ?std.mem.Allocator = null, source_map_handler: ?SourceMapHandler = null, source_map_builder: ?*bun.sourcemap.Chunk.Builder = null, css_import_behavior: Api.CssInJsBehavior = Api.CssInJsBehavior.facade, @@ -1856,12 +1857,17 @@ fn NewPrinter( p.printSpaceBeforeIdentifier(); // Allow it to fail at runtime, if it should - p.print("import("); - p.printImportRecordPath(record); + if (module_type != .internal_bake_dev) { + p.print("import("); + p.printImportRecordPath(record); + } else { + p.printSymbol(p.options.commonjs_module_ref); + p.print(".dynamicImport("); + const path = record.path; + p.printStringLiteralUTF8(path.pretty, false); + } if (!import_options.isMissing()) { - // since we previously stripped type, it is a breaking change to - // enable this for non-bun platforms p.printWhitespacer(ws(", ")); p.printExpr(import_options, .comma, .{}); } @@ -2016,6 +2022,7 @@ fn NewPrinter( switch (expr.data) { .e_missing => {}, .e_undefined => { + p.addSourceMapping(expr.loc); p.printUndefined(expr.loc, level); }, .e_super => { @@ -2356,8 +2363,6 @@ fn NewPrinter( p.printExpr(e.expr, .comma, ExprFlag.None()); if (!e.options.isMissing()) { - // since we previously stripped type, it is a breaking change to - // enable this for non-bun platforms p.printWhitespacer(ws(", ")); p.printExpr(e.options, .comma, .{}); } @@ -2558,8 +2563,8 @@ fn NewPrinter( } p.printSpaceBeforeIdentifier(); + p.addSourceMapping(expr.loc); if (e.func.flags.contains(.is_async)) { - p.addSourceMapping(expr.loc); p.print("async "); } p.print("function"); @@ -2890,7 +2895,6 @@ fn NewPrinter( // } if (!didPrint) { - // assert(p.options.module_type != .internal_bake_dev); p.printSpaceBeforeIdentifier(); p.addSourceMapping(expr.loc); p.printSymbol(e.ref); @@ -2950,6 +2954,7 @@ fn NewPrinter( if (entry.is_keyword) { p.printSpaceBeforeIdentifier(); + p.addSourceMapping(expr.loc); p.print(entry.text); p.printSpace(); } else { @@ -3624,7 +3629,6 @@ fn NewPrinter( p.prev_stmt_tag = std.meta.activeTag(stmt.data); } - p.addSourceMapping(stmt.loc); switch (stmt.data) { .s_comment => |s| { p.printIndentedComment(s.text); @@ -3632,6 +3636,7 @@ fn NewPrinter( .s_function => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); const name = s.func.name orelse Output.panic("Internal error: expected func to have a name ref\n{any}", .{s}); const nameRef = name.ref orelse Output.panic("Internal error: expected func to have a name\n{any}", .{s}); @@ -3647,9 +3652,10 @@ fn NewPrinter( if (s.func.flags.contains(.is_generator)) { p.print("*"); p.printSpace(); + } else { + p.printSpaceBeforeIdentifier(); } - p.printSpaceBeforeIdentifier(); p.addSourceMapping(name.loc); p.printSymbol(nameRef); p.printFunc(s.func); @@ -3679,6 +3685,7 @@ fn NewPrinter( p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); const nameRef = s.class.class_name.?.ref.?; if (s.is_export) { if (!rewrite_esm_to_cjs) { @@ -3709,12 +3716,14 @@ fn NewPrinter( if (p.prev_stmt_tag == .s_empty and p.options.indent.count == 0) return; p.printIndent(); + p.addSourceMapping(stmt.loc); p.print(";"); p.printNewline(); }, .s_export_default => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("export default "); switch (s.value) { @@ -3781,6 +3790,7 @@ fn NewPrinter( } p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); if (s.alias != null) p.printWhitespacer(comptime ws("export *").append(" as ")) @@ -3800,6 +3810,7 @@ fn NewPrinter( if (rewrite_esm_to_cjs) { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); switch (s.items.len) { 0 => {}, @@ -3860,6 +3871,7 @@ fn NewPrinter( p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("export"); p.printSpace(); @@ -3959,6 +3971,7 @@ fn NewPrinter( .s_export_from => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); const import_record = p.importRecord(s.import_record_index); @@ -3998,6 +4011,9 @@ fn NewPrinter( p.printSemicolonAfterStatement(); }, .s_local => |s| { + p.printIndent(); + p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); switch (s.kind) { .k_const => { p.printDeclStmt(s.is_export, "const", s.decls.slice()); @@ -4018,11 +4034,12 @@ fn NewPrinter( }, .s_if => |s| { p.printIndent(); - p.printIf(s); + p.printIf(s, stmt.loc); }, .s_do_while => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("do"); switch (s.body.data) { .s_block => { @@ -4050,6 +4067,7 @@ fn NewPrinter( .s_for_in => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("for"); p.printSpace(); p.print("("); @@ -4065,6 +4083,7 @@ fn NewPrinter( .s_for_of => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("for"); if (s.is_await) { p.print(" await"); @@ -4084,6 +4103,7 @@ fn NewPrinter( .s_while => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("while"); p.printSpace(); p.print("("); @@ -4094,6 +4114,7 @@ fn NewPrinter( .s_with => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("with"); p.printSpace(); p.print("("); @@ -4103,10 +4124,10 @@ fn NewPrinter( }, .s_label => |s| { if (!p.options.minify_whitespace and p.options.indent.count > 0) { - p.addSourceMapping(stmt.loc); p.printIndent(); } p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.printSymbol(s.name.ref orelse Output.panic("Internal error: expected label to have a name {any}", .{s})); p.print(":"); p.printBody(s.stmt); @@ -4114,12 +4135,14 @@ fn NewPrinter( .s_try => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("try"); p.printSpace(); p.printBlock(s.body_loc, s.body, null); if (s.catch_) |catch_| { p.printSpace(); + p.addSourceMapping(catch_.loc); p.print("catch"); if (catch_.binding) |binding| { p.printSpace(); @@ -4128,7 +4151,7 @@ fn NewPrinter( p.print(")"); } p.printSpace(); - p.printBlock(catch_.loc, catch_.body, null); + p.printBlock(catch_.body_loc, catch_.body, null); } if (s.finally) |finally| { @@ -4143,6 +4166,7 @@ fn NewPrinter( .s_for => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("for"); p.printSpace(); p.print("("); @@ -4170,6 +4194,7 @@ fn NewPrinter( .s_switch => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("switch"); p.printSpace(); p.print("("); @@ -4232,7 +4257,7 @@ fn NewPrinter( .css => { switch (p.options.css_import_behavior) { .facade => { - + p.addSourceMapping(stmt.loc); // This comment exists to let tooling authors know which files CSS originated from // To parse this, you just look for a line that starts with //@import url(" p.print("//@import url(\""); @@ -4252,6 +4277,7 @@ fn NewPrinter( }, .auto_onimportcss, .facade_onimportcss => { + p.addSourceMapping(stmt.loc); p.print("globalThis.document?.dispatchEvent(new CustomEvent(\"onimportcss\", {detail: "); p.printStringLiteralUTF8(record.path.text, false); p.print("}));\n"); @@ -4268,6 +4294,7 @@ fn NewPrinter( return; }, .import_path => { + p.addSourceMapping(stmt.loc); if (s.default_name) |name| { p.print("var "); p.printSymbol(name.ref.?); @@ -4289,6 +4316,8 @@ fn NewPrinter( .napi_module => { if (comptime is_bun_platform) { p.printIndent(); + p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("var "); p.printSymbol(s.namespace_ref); p.@"print = "(); @@ -4305,6 +4334,7 @@ fn NewPrinter( p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); if (comptime is_bun_platform) { switch (record.tag) { @@ -4496,6 +4526,7 @@ fn NewPrinter( .s_debugger => { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("debugger"); p.printSemicolonAfterStatement(); }, @@ -4505,12 +4536,14 @@ fn NewPrinter( p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.printStringLiteralUTF8(s.value, false); p.printSemicolonAfterStatement(); }, .s_break => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("break"); if (s.label) |label| { p.print(" "); @@ -4522,6 +4555,7 @@ fn NewPrinter( .s_continue => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("continue"); if (s.label) |label| { @@ -4533,6 +4567,7 @@ fn NewPrinter( .s_return => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("return"); if (s.value) |value| { @@ -4544,6 +4579,7 @@ fn NewPrinter( .s_throw => |s| { p.printIndent(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(stmt.loc); p.print("throw"); p.printSpace(); p.printExpr(s.value, .lowest, ExprFlag.None()); @@ -4551,7 +4587,6 @@ fn NewPrinter( }, .s_expr => |s| { if (!p.options.minify_whitespace and p.options.indent.count > 0) { - p.addSourceMapping(stmt.loc); p.printIndent(); } @@ -4784,8 +4819,9 @@ fn NewPrinter( }, } } - pub fn printIf(p: *Printer, s: *const S.If) void { + pub fn printIf(p: *Printer, s: *const S.If, loc: logger.Loc) void { p.printSpaceBeforeIdentifier(); + p.addSourceMapping(loc); p.print("if"); p.printSpace(); p.print("("); @@ -4838,6 +4874,7 @@ fn NewPrinter( if (s.no) |no_block| { p.printSemicolonIfNeeded(); p.printSpaceBeforeIdentifier(); + p.addSourceMapping(no_block.loc); p.print("else"); switch (no_block.data) { @@ -4847,7 +4884,7 @@ fn NewPrinter( p.printNewline(); }, .s_if => { - p.printIf(no_block.data.s_if); + p.printIf(no_block.data.s_if, no_block.loc); }, else => { p.printNewline(); @@ -4934,9 +4971,6 @@ fn NewPrinter( } pub fn printDeclStmt(p: *Printer, is_export: bool, comptime keyword: string, decls: []G.Decl) void { - p.printIndent(); - p.printSpaceBeforeIdentifier(); - if (!rewrite_esm_to_cjs and is_export) { p.print("export "); } @@ -5637,8 +5671,8 @@ pub fn getSourceMapBuilder( return undefined; return .{ - .source_map = SourceMap.Chunk.Builder.SourceMapper.init( - opts.allocator, + .source_map = .init( + opts.source_map_allocator orelse opts.allocator, is_bun_platform and generate_source_map == .lazy, ), .cover_lines_without_mappings = true, @@ -5646,15 +5680,14 @@ pub fn getSourceMapBuilder( .prepend_count = is_bun_platform and generate_source_map == .lazy, .line_offset_tables = opts.line_offset_tables orelse brk: { if (generate_source_map == .lazy) break :brk SourceMap.LineOffsetTable.generate( - opts.allocator, + opts.source_map_allocator orelse opts.allocator, source.contents, @as( i32, @intCast(tree.approximate_newline_count), ), ); - - break :brk SourceMap.LineOffsetTable.List{}; + break :brk .empty; }, }; } @@ -5899,7 +5932,7 @@ pub fn print( pub fn printWithWriter( comptime Writer: type, - _writer: Writer, + writer: Writer, target: options.Target, ast: Ast, source: *const logger.Source, @@ -5912,7 +5945,7 @@ pub fn printWithWriter( return switch (target.isBun()) { inline else => |is_bun| printWithWriterAndPlatform( Writer, - _writer, + writer, is_bun, ast, source, @@ -5928,7 +5961,7 @@ pub fn printWithWriter( /// The real one pub fn printWithWriterAndPlatform( comptime Writer: type, - _writer: Writer, + writer: Writer, comptime is_bun_platform: bool, ast: Ast, source: *const logger.Source, @@ -5951,7 +5984,6 @@ pub fn printWithWriterAndPlatform( false, generate_source_maps, ); - const writer = _writer; var printer = PrinterType.init( writer, import_records, @@ -5964,7 +5996,7 @@ pub fn printWithWriterAndPlatform( defer printer.binary_expression_stack.clearAndFree(); defer printer.temporary_bindings.deinit(bun.default_allocator); - defer _writer.* = printer.writer.*; + defer writer.* = printer.writer.*; defer { imported_module_ids_list = printer.imported_module_ids; } @@ -5972,6 +6004,9 @@ pub fn printWithWriterAndPlatform( if (opts.module_type == .internal_bake_dev) { printer.indent(); printer.printIndent(); + if (!ast.top_level_await_keyword.isEmpty()) { + printer.print("async "); + } printer.printStringLiteralUTF8(source.path.pretty, false); const func = parts[0].stmts[0].data.s_expr.value.data.e_function.func; if (!(func.body.stmts.len == 1 and func.body.stmts[0].data == .s_lazy_export)) { diff --git a/src/node-fallbacks/bun.lock b/src/node-fallbacks/bun.lock index 26187aaf6f5de7..1730d5da2dc3be 100644 --- a/src/node-fallbacks/bun.lock +++ b/src/node-fallbacks/bun.lock @@ -19,6 +19,7 @@ "process": "^0.11.10", "punycode": "^2.1.1", "querystring-es3": "^1.0.0-0", + "react-refresh": "^0.16.0", "readable-stream": "^4.1.0", "stream-http": "^3.2.0", "string_decoder": "^1.3.0", @@ -31,7 +32,7 @@ }, }, "packages": { - "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.14.54", "", { "os": "linux", "cpu": "none" }, "sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw=="], + "@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.14.54", "", { "os":"linux", "cpu":"none" }, "sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw=="], "abort-controller": ["abort-controller@3.0.0", "", { "dependencies": { "event-target-shim": "^5.0.0" } }, "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg=="], @@ -95,45 +96,45 @@ "esbuild": ["esbuild@0.14.54", "", { "dependencies": { "@esbuild/linux-loong64": "0.14.54", "esbuild-android-64": "0.14.54", "esbuild-android-arm64": "0.14.54", "esbuild-darwin-64": "0.14.54", "esbuild-darwin-arm64": "0.14.54", "esbuild-freebsd-64": "0.14.54", "esbuild-freebsd-arm64": "0.14.54", "esbuild-linux-32": "0.14.54", "esbuild-linux-64": "0.14.54", "esbuild-linux-arm": "0.14.54", "esbuild-linux-arm64": "0.14.54", "esbuild-linux-mips64le": "0.14.54", "esbuild-linux-ppc64le": "0.14.54", "esbuild-linux-riscv64": "0.14.54", "esbuild-linux-s390x": "0.14.54", "esbuild-netbsd-64": "0.14.54", "esbuild-openbsd-64": "0.14.54", "esbuild-sunos-64": "0.14.54", "esbuild-windows-32": "0.14.54", "esbuild-windows-64": "0.14.54", "esbuild-windows-arm64": "0.14.54" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA=="], - "esbuild-android-64": ["esbuild-android-64@0.14.54", "", { "os": "android", "cpu": "x64" }, "sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ=="], + "esbuild-android-64": ["esbuild-android-64@0.14.54", "", { "os":"android", "cpu":"x64" }, "sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ=="], - "esbuild-android-arm64": ["esbuild-android-arm64@0.14.54", "", { "os": "android", "cpu": "arm64" }, "sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg=="], + "esbuild-android-arm64": ["esbuild-android-arm64@0.14.54", "", { "os":"android", "cpu":"arm64" }, "sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg=="], - "esbuild-darwin-64": ["esbuild-darwin-64@0.14.54", "", { "os": "darwin", "cpu": "x64" }, "sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug=="], + "esbuild-darwin-64": ["esbuild-darwin-64@0.14.54", "", { "os":"darwin", "cpu":"x64" }, "sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug=="], - "esbuild-darwin-arm64": ["esbuild-darwin-arm64@0.14.54", "", { "os": "darwin", "cpu": "arm64" }, "sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw=="], + "esbuild-darwin-arm64": ["esbuild-darwin-arm64@0.14.54", "", { "os":"darwin", "cpu":"arm64" }, "sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw=="], - "esbuild-freebsd-64": ["esbuild-freebsd-64@0.14.54", "", { "os": "freebsd", "cpu": "x64" }, "sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg=="], + "esbuild-freebsd-64": ["esbuild-freebsd-64@0.14.54", "", { "os":"freebsd", "cpu":"x64" }, "sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg=="], - "esbuild-freebsd-arm64": ["esbuild-freebsd-arm64@0.14.54", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q=="], + "esbuild-freebsd-arm64": ["esbuild-freebsd-arm64@0.14.54", "", { "os":"freebsd", "cpu":"arm64" }, "sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q=="], - "esbuild-linux-32": ["esbuild-linux-32@0.14.54", "", { "os": "linux", "cpu": "ia32" }, "sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw=="], + "esbuild-linux-32": ["esbuild-linux-32@0.14.54", "", { "os":"linux", "cpu":"ia32" }, "sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw=="], - "esbuild-linux-64": ["esbuild-linux-64@0.14.54", "", { "os": "linux", "cpu": "x64" }, "sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg=="], + "esbuild-linux-64": ["esbuild-linux-64@0.14.54", "", { "os":"linux", "cpu":"x64" }, "sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg=="], - "esbuild-linux-arm": ["esbuild-linux-arm@0.14.54", "", { "os": "linux", "cpu": "arm" }, "sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw=="], + "esbuild-linux-arm": ["esbuild-linux-arm@0.14.54", "", { "os":"linux", "cpu":"arm" }, "sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw=="], - "esbuild-linux-arm64": ["esbuild-linux-arm64@0.14.54", "", { "os": "linux", "cpu": "arm64" }, "sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig=="], + "esbuild-linux-arm64": ["esbuild-linux-arm64@0.14.54", "", { "os":"linux", "cpu":"arm64" }, "sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig=="], - "esbuild-linux-mips64le": ["esbuild-linux-mips64le@0.14.54", "", { "os": "linux", "cpu": "none" }, "sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw=="], + "esbuild-linux-mips64le": ["esbuild-linux-mips64le@0.14.54", "", { "os":"linux", "cpu":"none" }, "sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw=="], - "esbuild-linux-ppc64le": ["esbuild-linux-ppc64le@0.14.54", "", { "os": "linux", "cpu": "ppc64" }, "sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ=="], + "esbuild-linux-ppc64le": ["esbuild-linux-ppc64le@0.14.54", "", { "os":"linux", "cpu":"ppc64" }, "sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ=="], - "esbuild-linux-riscv64": ["esbuild-linux-riscv64@0.14.54", "", { "os": "linux", "cpu": "none" }, "sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg=="], + "esbuild-linux-riscv64": ["esbuild-linux-riscv64@0.14.54", "", { "os":"linux", "cpu":"none" }, "sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg=="], - "esbuild-linux-s390x": ["esbuild-linux-s390x@0.14.54", "", { "os": "linux", "cpu": "s390x" }, "sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA=="], + "esbuild-linux-s390x": ["esbuild-linux-s390x@0.14.54", "", { "os":"linux", "cpu":"s390x" }, "sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA=="], - "esbuild-netbsd-64": ["esbuild-netbsd-64@0.14.54", "", { "os": "none", "cpu": "x64" }, "sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w=="], + "esbuild-netbsd-64": ["esbuild-netbsd-64@0.14.54", "", { "os":"none", "cpu":"x64" }, "sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w=="], - "esbuild-openbsd-64": ["esbuild-openbsd-64@0.14.54", "", { "os": "openbsd", "cpu": "x64" }, "sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw=="], + "esbuild-openbsd-64": ["esbuild-openbsd-64@0.14.54", "", { "os":"openbsd", "cpu":"x64" }, "sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw=="], - "esbuild-sunos-64": ["esbuild-sunos-64@0.14.54", "", { "os": "sunos", "cpu": "x64" }, "sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw=="], + "esbuild-sunos-64": ["esbuild-sunos-64@0.14.54", "", { "os":"sunos", "cpu":"x64" }, "sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw=="], - "esbuild-windows-32": ["esbuild-windows-32@0.14.54", "", { "os": "win32", "cpu": "ia32" }, "sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w=="], + "esbuild-windows-32": ["esbuild-windows-32@0.14.54", "", { "os":"win32", "cpu":"ia32" }, "sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w=="], - "esbuild-windows-64": ["esbuild-windows-64@0.14.54", "", { "os": "win32", "cpu": "x64" }, "sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ=="], + "esbuild-windows-64": ["esbuild-windows-64@0.14.54", "", { "os":"win32", "cpu":"x64" }, "sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ=="], - "esbuild-windows-arm64": ["esbuild-windows-arm64@0.14.54", "", { "os": "win32", "cpu": "arm64" }, "sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg=="], + "esbuild-windows-arm64": ["esbuild-windows-arm64@0.14.54", "", { "os":"win32", "cpu":"arm64" }, "sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg=="], "event-target-shim": ["event-target-shim@5.0.1", "", {}, "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ=="], @@ -215,6 +216,8 @@ "randomfill": ["randomfill@1.0.4", "", { "dependencies": { "randombytes": "^2.0.5", "safe-buffer": "^5.1.0" } }, "sha512-87lcbR8+MhcWcUiQ+9e+Rwx8MyR2P7qnt15ynUlbm3TU/fjbgz4GsvfSUDTemtCCtVCqb4ZcEFlyPNTh9bBTLw=="], + "react-refresh": ["react-refresh@0.16.0", "", {}, "sha512-FPvF2XxTSikpJxcr+bHut2H4gJ17+18Uy20D5/F+SKzFap62R3cM5wH6b8WN3LyGSYeQilLEcJcR1fjBSI2S1A=="], + "readable-stream": ["readable-stream@4.3.0", "", { "dependencies": { "abort-controller": "^3.0.0", "buffer": "^6.0.3", "events": "^3.3.0", "process": "^0.11.10" } }, "sha512-MuEnA0lbSi7JS8XM+WNJlWZkHAAdm7gETHdFK//Q/mChGyj2akEFtdLZh32jSdkWGbRwCW9pn6g3LWDdDeZnBQ=="], "ripemd160": ["ripemd160@2.0.2", "", { "dependencies": { "hash-base": "^3.0.0", "inherits": "^2.0.1" } }, "sha512-ii4iagi25WusVoiC4B4lq7pbXfAp3D9v5CwfkY33vffw2+pkDjY1D8GaN7spsxvCSx8dkPqOZCEZyfxcmJG2IA=="], diff --git a/src/node-fallbacks/package.json b/src/node-fallbacks/package.json index 4c54b4c438df99..99dedbb47bab27 100644 --- a/src/node-fallbacks/package.json +++ b/src/node-fallbacks/package.json @@ -6,7 +6,8 @@ "scripts": { "test": "echo \"Error: no test specified\" && exit 1", "build-gen": "bash -c 'esbuild --bundle *.js --outdir=bun --format=esm --platform=browser --external:buffer --external:stream --external:util --external:util/ --external:assert'", - "build": "bash -c 'esbuild --bundle *.js --outdir=out --format=esm --minify --platform=browser'" + "build": "bash -c 'esbuild --bundle *.js --outdir=out --format=esm --minify --platform=browser'", + "build-react-refresh": "NODE_ENV=development bun build --target=browser --external=* --format=cjs --outfile=out/react-refresh.js ./node_modules/react-refresh/cjs/react-refresh-runtime.development.js --define=process.env.NODE_ENV=development --minify" }, "author": "", "license": "ISC", @@ -26,6 +27,7 @@ "process": "^0.11.10", "punycode": "^2.1.1", "querystring-es3": "^1.0.0-0", + "react-refresh": "^0.16.0", "readable-stream": "^4.1.0", "stream-http": "^3.2.0", "string_decoder": "^1.3.0", diff --git a/src/renamer.zig b/src/renamer.zig index c41e4ca66c2f88..ac4c38cda3f03b 100644 --- a/src/renamer.zig +++ b/src/renamer.zig @@ -547,7 +547,7 @@ pub const NumberRenamer = struct { .fixed_buffer_allocator = undefined, }; renamer.name_temp_allocator = renamer.name_stack_fallback.get(); - renamer.number_scope_pool = bun.HiveArray(NumberScope, 128).Fallback.init(renamer.arena.allocator()); + renamer.number_scope_pool = .init(renamer.arena.allocator()); renamer.root.name_counts = root_names; if (comptime Environment.allow_assert and !Environment.isWindows) { if (std.posix.getenv("BUN_DUMP_SYMBOLS") != null) @@ -597,7 +597,7 @@ pub const NumberRenamer = struct { std.sort.pdq(u32, sorted.items, {}, std.sort.asc(u32)); for (sorted.items) |inner_index| { - r.assignName(s, Ref.init(@as(Ref.Int, @intCast(inner_index)), source_index, false)); + r.assignName(s, Ref.init(@intCast(inner_index), source_index, false)); } } diff --git a/src/resolver/resolve_path.zig b/src/resolver/resolve_path.zig index c1f9b3e9959ed8..66038e5d37e721 100644 --- a/src/resolver/resolve_path.zig +++ b/src/resolver/resolve_path.zig @@ -86,8 +86,7 @@ pub fn isParentOrEqual(parent_: []const u8, child: []const u8) ParentEqual { return .unrelated; } -pub fn getIfExistsLongestCommonPathGeneric(input: []const []const u8, comptime _platform: Platform) ?[]const u8 { - const platform = comptime _platform.resolve(); +pub fn getIfExistsLongestCommonPathGeneric(input: []const []const u8, comptime platform: Platform) ?[]const u8 { const separator = comptime platform.separator(); const isPathSeparator = comptime platform.getSeparatorFunc(); @@ -178,8 +177,7 @@ pub fn getIfExistsLongestCommonPathGeneric(input: []const []const u8, comptime _ // TODO: is it faster to determine longest_common_separator in the while loop // or as an extra step at the end? // only boether to check if this function appears in benchmarking -pub fn longestCommonPathGeneric(input: []const []const u8, comptime _platform: Platform) []const u8 { - const platform = comptime _platform.resolve(); +pub fn longestCommonPathGeneric(input: []const []const u8, comptime platform: Platform) []const u8 { const separator = comptime platform.separator(); const isPathSeparator = comptime platform.getSeparatorFunc(); @@ -318,9 +316,8 @@ pub fn relativeToCommonPath( normalized_to_: []const u8, buf: []u8, comptime always_copy: bool, - comptime _platform: Platform, + comptime platform: Platform, ) []const u8 { - const platform = comptime _platform.resolve(); var normalized_from = normalized_from_; var normalized_to = normalized_to_; const win_root_len = if (platform == .windows) k: { @@ -463,8 +460,7 @@ pub fn relativeToCommonPath( return out_slice; } -pub fn relativeNormalizedBuf(buf: []u8, from: []const u8, to: []const u8, comptime _platform: Platform, comptime always_copy: bool) []const u8 { - const platform = comptime _platform.resolve(); +pub fn relativeNormalizedBuf(buf: []u8, from: []const u8, to: []const u8, comptime platform: Platform, comptime always_copy: bool) []const u8 { if ((if (platform == .windows) strings.eqlCaseInsensitiveASCII(from, to, true) else @@ -480,11 +476,11 @@ pub fn relativeNormalizedBuf(buf: []u8, from: []const u8, to: []const u8, compti } pub fn relativeNormalized(from: []const u8, to: []const u8, comptime platform: Platform, comptime always_copy: bool) []const u8 { - return relativeNormalizedBuf(&relative_to_common_path_buf, from, to, comptime platform.resolve(), always_copy); + return relativeNormalizedBuf(&relative_to_common_path_buf, from, to, platform, always_copy); } pub fn dirname(str: []const u8, comptime platform: Platform) []const u8 { - switch (comptime platform.resolve()) { + switch (platform) { .loose => { const separator = lastIndexOfSeparatorLoose(str) orelse return ""; return str[0..separator]; @@ -499,7 +495,7 @@ pub fn dirname(str: []const u8, comptime platform: Platform) []const u8 { const separator = lastIndexOfSeparatorWindows(str) orelse return std.fs.path.diskDesignatorWindows(str); return str[0..separator]; }, - else => @compileError("unreachable"), + else => @compileError("not implemented"), } } @@ -531,8 +527,7 @@ pub fn relativeBufZ(buf: []u8, from: []const u8, to: []const u8) [:0]const u8 { return buf[0..rel.len :0]; } -pub fn relativePlatformBuf(buf: []u8, from: []const u8, to: []const u8, comptime _platform: Platform, comptime always_copy: bool) []const u8 { - const platform = comptime _platform.resolve(); +pub fn relativePlatformBuf(buf: []u8, from: []const u8, to: []const u8, comptime platform: Platform, comptime always_copy: bool) []const u8 { const normalized_from = if (platform.isAbsolute(from)) brk: { if (platform == .loose and bun.Environment.isWindows) { // we want to invoke the windows resolution behavior but end up with a @@ -577,11 +572,11 @@ pub fn relativePlatformBuf(buf: []u8, from: []const u8, to: []const u8, comptime } pub fn relativePlatform(from: []const u8, to: []const u8, comptime platform: Platform, comptime always_copy: bool) []const u8 { - return relativePlatformBuf(&relative_to_common_path_buf, from, to, comptime platform.resolve(), always_copy); + return relativePlatformBuf(&relative_to_common_path_buf, from, to, platform, always_copy); } pub fn relativeAlloc(allocator: std.mem.Allocator, from: []const u8, to: []const u8) ![]const u8 { - const result = relativePlatform(from, to, Platform.current, false); + const result = relativePlatform(from, to, .auto, false); return try allocator.dupe(u8, result); } @@ -961,20 +956,24 @@ pub fn normalizeStringGenericTZ( } pub const Platform = enum { - auto, loose, windows, posix, nt, + pub const auto: Platform = switch (bun.Environment.os) { + .windows => .windows, + .linux, .mac => .posix, + .wasm => .loose, + }; + pub fn isAbsolute(comptime platform: Platform, path: []const u8) bool { return isAbsoluteT(platform, u8, path); } pub fn isAbsoluteT(comptime platform: Platform, comptime T: type, path: []const T) bool { - if (comptime T != u8 and T != u16) @compileError("Unsupported type given to isAbsoluteT"); - return switch (comptime platform) { - .auto => (comptime platform.resolve()).isAbsoluteT(T, path), + if (T != u8 and T != u16) @compileError("Unsupported type given to isAbsoluteT"); + return switch (platform) { .posix => path.len > 0 and path[0] == '/', .nt, .windows, @@ -986,116 +985,73 @@ pub const Platform = enum { }; } - pub fn separator(comptime platform: Platform) u8 { - return comptime switch (platform) { - .auto => platform.resolve().separator(), + pub inline fn separator(comptime platform: Platform) u8 { + return switch (platform) { .loose, .posix => std.fs.path.sep_posix, .nt, .windows => std.fs.path.sep_windows, }; } - pub fn separatorString(comptime platform: Platform) []const u8 { - return comptime switch (platform) { - .auto => platform.resolve().separatorString(), + pub inline fn separatorString(comptime platform: Platform) []const u8 { + return switch (platform) { .loose, .posix => std.fs.path.sep_str_posix, .nt, .windows => std.fs.path.sep_str_windows, }; } - pub const current: Platform = switch (@import("builtin").target.os.tag) { - .windows => Platform.windows, - else => Platform.posix, - }; - - pub fn getSeparatorFunc(comptime _platform: Platform) IsSeparatorFunc { - switch (comptime _platform.resolve()) { - .auto => @compileError("unreachable"), - .loose => { - return isSepAny; - }, - .nt, .windows => { - return isSepAny; - }, - .posix => { - return isSepPosix; - }, - } + pub fn getSeparatorFunc(comptime platform: Platform) IsSeparatorFunc { + return switch (platform) { + .loose => isSepAny, + .nt, .windows => isSepAny, + .posix => isSepPosix, + }; } - pub fn getSeparatorFuncT(comptime _platform: Platform) IsSeparatorFuncT { - switch (comptime _platform.resolve()) { - .auto => @compileError("unreachable"), - .loose => { - return isSepAnyT; - }, - .nt, .windows => { - return isSepAnyT; - }, - .posix => { - return isSepPosixT; - }, - } + pub fn getSeparatorFuncT(comptime platform: Platform) IsSeparatorFuncT { + return switch (platform) { + .loose => isSepAnyT, + .nt, .windows => isSepAnyT, + .posix => isSepPosixT, + }; } - pub fn getLastSeparatorFunc(comptime _platform: Platform) LastSeparatorFunction { - switch (comptime _platform.resolve()) { - .auto => @compileError("unreachable"), - .loose => { - return lastIndexOfSeparatorLoose; - }, - .nt, .windows => { - return lastIndexOfSeparatorWindows; - }, - .posix => { - return lastIndexOfSeparatorPosix; - }, - } + pub fn getLastSeparatorFunc(comptime platform: Platform) LastSeparatorFunction { + return switch (platform) { + .loose => lastIndexOfSeparatorLoose, + .nt, .windows => lastIndexOfSeparatorWindows, + .posix => lastIndexOfSeparatorPosix, + }; } - pub fn getLastSeparatorFuncT(comptime _platform: Platform) LastSeparatorFunctionT { - switch (comptime _platform.resolve()) { - .auto => @compileError("unreachable"), - .loose => { - return lastIndexOfSeparatorLooseT; - }, - .nt, .windows => { - return lastIndexOfSeparatorWindowsT; - }, - .posix => { - return lastIndexOfSeparatorPosixT; - }, - } + pub fn getLastSeparatorFuncT(comptime platform: Platform) LastSeparatorFunctionT { + return switch (platform) { + .loose => lastIndexOfSeparatorLooseT, + .nt, .windows => lastIndexOfSeparatorWindowsT, + .posix => lastIndexOfSeparatorPosixT, + }; } - pub inline fn isSeparator(comptime _platform: Platform, char: u8) bool { - return isSeparatorT(_platform, u8, char); + pub inline fn isSeparator(comptime platform: Platform, char: u8) bool { + return isSeparatorT(platform, u8, char); } - pub inline fn isSeparatorT(comptime _platform: Platform, comptime T: type, char: T) bool { - switch (comptime _platform.resolve()) { - .auto => @compileError("unreachable"), - .loose => { - return isSepAnyT(T, char); - }, - .nt, .windows => { - return isSepAnyT(T, char); - }, - .posix => { - return isSepPosixT(T, char); - }, - } + pub inline fn isSeparatorT(comptime platform: Platform, comptime T: type, char: T) bool { + return switch (platform) { + .loose => isSepAnyT(T, char), + .nt, .windows => isSepAnyT(T, char), + .posix => isSepPosixT(T, char), + }; } - pub fn trailingSeparator(comptime _platform: Platform) [2]u8 { - return comptime switch (_platform) { - .auto => _platform.resolve().trailingSeparator(), + pub fn trailingSeparator(comptime platform: Platform) [2]u8 { + return switch (platform) { .nt, .windows => ".\\".*, .posix, .loose => "./".*, }; } - pub fn leadingSeparatorIndex(comptime _platform: Platform, path: anytype) ?usize { - switch (comptime _platform.resolve()) { + pub fn leadingSeparatorIndex(comptime platform: Platform, path: anytype) ?usize { + switch (platform) { .nt, .windows => { if (path.len < 1) return null; @@ -1129,66 +1085,51 @@ pub const Platform = enum { return null; } }, - else => { - return leadingSeparatorIndex(.windows, path) orelse leadingSeparatorIndex(.posix, path); - }, + .loose => return leadingSeparatorIndex(.windows, path) orelse + leadingSeparatorIndex(.posix, path), } } - - pub inline fn resolve(comptime _platform: Platform) Platform { - if (comptime _platform == .auto) { - return switch (@import("builtin").target.os.tag) { - .windows => Platform.windows, - - .freestanding, .emscripten, .other => Platform.loose, - - else => Platform.posix, - }; - } - - return _platform; - } }; -pub fn normalizeString(str: []const u8, comptime allow_above_root: bool, comptime _platform: Platform) []u8 { - return normalizeStringBuf(str, &parser_buffer, allow_above_root, _platform, false); +pub fn normalizeString(str: []const u8, comptime allow_above_root: bool, comptime platform: Platform) []u8 { + return normalizeStringBuf(str, &parser_buffer, allow_above_root, platform, false); } -pub fn normalizeStringZ(str: []const u8, comptime allow_above_root: bool, comptime _platform: Platform) [:0]u8 { - const normalized = normalizeStringBuf(str, &parser_buffer, allow_above_root, _platform, false); +pub fn normalizeStringZ(str: []const u8, comptime allow_above_root: bool, comptime platform: Platform) [:0]u8 { + const normalized = normalizeStringBuf(str, &parser_buffer, allow_above_root, platform, false); parser_buffer[normalized.len] = 0; return parser_buffer[0..normalized.len :0]; } -pub fn normalizeBuf(str: []const u8, buf: []u8, comptime _platform: Platform) []u8 { - return normalizeBufT(u8, str, buf, _platform); +pub fn normalizeBuf(str: []const u8, buf: []u8, comptime platform: Platform) []u8 { + return normalizeBufT(u8, str, buf, platform); } -pub fn normalizeBufZ(str: []const u8, buf: []u8, comptime _platform: Platform) [:0]u8 { - const norm = normalizeBufT(u8, str, buf, _platform); +pub fn normalizeBufZ(str: []const u8, buf: []u8, comptime platform: Platform) [:0]u8 { + const norm = normalizeBufT(u8, str, buf, platform); buf[norm.len] = 0; return buf[0..norm.len :0]; } -pub fn normalizeBufT(comptime T: type, str: []const T, buf: []T, comptime _platform: Platform) []T { +pub fn normalizeBufT(comptime T: type, str: []const T, buf: []T, comptime platform: Platform) []T { if (str.len == 0) { buf[0] = '.'; return buf[0..1]; } - const is_absolute = _platform.isAbsoluteT(T, str); + const is_absolute = platform.isAbsoluteT(T, str); - const trailing_separator = _platform.getLastSeparatorFuncT()(T, str) == str.len - 1; + const trailing_separator = platform.getLastSeparatorFuncT()(T, str) == str.len - 1; if (is_absolute and trailing_separator) - return normalizeStringBufT(T, str, buf, true, _platform, true); + return normalizeStringBufT(T, str, buf, true, platform, true); if (is_absolute and !trailing_separator) - return normalizeStringBufT(T, str, buf, true, _platform, false); + return normalizeStringBufT(T, str, buf, true, platform, false); if (!is_absolute and !trailing_separator) - return normalizeStringBufT(T, str, buf, false, _platform, false); + return normalizeStringBufT(T, str, buf, false, platform, false); - return normalizeStringBufT(T, str, buf, false, _platform, true); + return normalizeStringBufT(T, str, buf, false, platform, true); } pub fn normalizeStringBuf( @@ -1209,9 +1150,8 @@ pub fn normalizeStringBufT( comptime platform: Platform, comptime preserve_trailing_slash: bool, ) []T { - switch (comptime platform.resolve()) { - .nt, .auto => @compileError("unreachable"), - + switch (platform) { + .nt => @compileError("not implemented"), .windows => { return normalizeStringWindowsT( T, @@ -1243,18 +1183,18 @@ pub fn normalizeStringBufT( } } -pub fn normalizeStringAlloc(allocator: std.mem.Allocator, str: []const u8, comptime allow_above_root: bool, comptime _platform: Platform) ![]const u8 { - return try allocator.dupe(u8, normalizeString(str, allow_above_root, _platform)); +pub fn normalizeStringAlloc(allocator: std.mem.Allocator, str: []const u8, comptime allow_above_root: bool, comptime platform: Platform) ![]const u8 { + return try allocator.dupe(u8, normalizeString(str, allow_above_root, platform)); } -pub fn joinAbs2(_cwd: []const u8, comptime _platform: Platform, part: anytype, part2: anytype) []const u8 { +pub fn joinAbs2(_cwd: []const u8, comptime platform: Platform, part: anytype, part2: anytype) []const u8 { const parts = [_][]const u8{ part, part2 }; - const slice = joinAbsString(_cwd, &parts, _platform); + const slice = joinAbsString(_cwd, &parts, platform); return slice; } -pub fn joinAbs(cwd: []const u8, comptime _platform: Platform, part: []const u8) []const u8 { - return joinAbsString(cwd, &.{part}, _platform); +pub fn joinAbs(cwd: []const u8, comptime platform: Platform, part: []const u8) []const u8 { + return joinAbsString(cwd, &.{part}, platform); } /// Convert parts of potentially invalid file paths into a single valid filpeath @@ -1262,12 +1202,12 @@ pub fn joinAbs(cwd: []const u8, comptime _platform: Platform, part: []const u8) /// This is the equivalent of path.resolve /// /// Returned path is stored in a temporary buffer. It must be copied if it needs to be stored. -pub fn joinAbsString(_cwd: []const u8, parts: anytype, comptime _platform: Platform) []const u8 { +pub fn joinAbsString(_cwd: []const u8, parts: anytype, comptime platform: Platform) []const u8 { return joinAbsStringBuf( _cwd, &parser_join_input_buffer, parts, - _platform, + platform, ); } @@ -1276,48 +1216,46 @@ pub fn joinAbsString(_cwd: []const u8, parts: anytype, comptime _platform: Platf /// This is the equivalent of path.resolve /// /// Returned path is stored in a temporary buffer. It must be copied if it needs to be stored. -pub fn joinAbsStringZ(_cwd: []const u8, parts: anytype, comptime _platform: Platform) [:0]const u8 { +pub fn joinAbsStringZ(_cwd: []const u8, parts: anytype, comptime platform: Platform) [:0]const u8 { return joinAbsStringBufZ( _cwd, &parser_join_input_buffer, parts, - _platform, + platform, ); } pub threadlocal var join_buf: [4096]u8 = undefined; -pub fn join(_parts: anytype, comptime _platform: Platform) []const u8 { - return joinStringBuf(&join_buf, _parts, _platform); +pub fn join(_parts: anytype, comptime platform: Platform) []const u8 { + return joinStringBuf(&join_buf, _parts, platform); } -pub fn joinZ(_parts: anytype, comptime _platform: Platform) [:0]const u8 { - return joinZBuf(&join_buf, _parts, _platform); +pub fn joinZ(_parts: anytype, comptime platform: Platform) [:0]const u8 { + return joinZBuf(&join_buf, _parts, platform); } -pub fn joinZBuf(buf: []u8, _parts: anytype, comptime _platform: Platform) [:0]const u8 { - const joined = joinStringBuf(buf[0 .. buf.len - 1], _parts, _platform); +pub fn joinZBuf(buf: []u8, _parts: anytype, comptime platform: Platform) [:0]const u8 { + const joined = joinStringBuf(buf[0 .. buf.len - 1], _parts, platform); assert(bun.isSliceInBuffer(joined, buf)); const start_offset = @intFromPtr(joined.ptr) - @intFromPtr(buf.ptr); buf[joined.len + start_offset] = 0; return buf[start_offset..][0..joined.len :0]; } -pub fn joinStringBuf(buf: []u8, parts: anytype, comptime _platform: Platform) []const u8 { - return joinStringBufT(u8, buf, parts, _platform); +pub fn joinStringBuf(buf: []u8, parts: anytype, comptime platform: Platform) []const u8 { + return joinStringBufT(u8, buf, parts, platform); } -pub fn joinStringBufW(buf: []u16, parts: anytype, comptime _platform: Platform) []const u16 { - return joinStringBufT(u16, buf, parts, _platform); +pub fn joinStringBufW(buf: []u16, parts: anytype, comptime platform: Platform) []const u16 { + return joinStringBufT(u16, buf, parts, platform); } -pub fn joinStringBufWZ(buf: []u16, parts: anytype, comptime _platform: Platform) [:0]const u16 { - const joined = joinStringBufT(u16, buf[0 .. buf.len - 1], parts, _platform); +pub fn joinStringBufWZ(buf: []u16, parts: anytype, comptime platform: Platform) [:0]const u16 { + const joined = joinStringBufT(u16, buf[0 .. buf.len - 1], parts, platform); assert(bun.isSliceInBufferT(u16, joined, buf)); const start_offset = @intFromPtr(joined.ptr) / 2 - @intFromPtr(buf.ptr) / 2; buf[joined.len + start_offset] = 0; return buf[start_offset..][0..joined.len :0]; } -pub fn joinStringBufT(comptime T: type, buf: []T, parts: anytype, comptime _platform: Platform) []const T { - const platform = comptime _platform.resolve(); - +pub fn joinStringBufT(comptime T: type, buf: []T, parts: anytype, comptime platform: Platform) []const T { var written: usize = 0; var temp_buf_: [4096]T = undefined; var temp_buf: []T = &temp_buf_; @@ -1367,26 +1305,26 @@ pub fn joinStringBufT(comptime T: type, buf: []T, parts: anytype, comptime _plat return normalizeStringNodeT(T, temp_buf[0..written], buf, platform); } -pub fn joinAbsStringBuf(cwd: []const u8, buf: []u8, _parts: anytype, comptime _platform: Platform) []const u8 { - return _joinAbsStringBuf(false, []const u8, cwd, buf, _parts, _platform); +pub fn joinAbsStringBuf(cwd: []const u8, buf: []u8, _parts: anytype, comptime platform: Platform) []const u8 { + return _joinAbsStringBuf(false, []const u8, cwd, buf, _parts, platform); } -pub fn joinAbsStringBufZ(cwd: []const u8, buf: []u8, _parts: anytype, comptime _platform: Platform) [:0]const u8 { - return _joinAbsStringBuf(true, [:0]const u8, cwd, buf, _parts, _platform); +pub fn joinAbsStringBufZ(cwd: []const u8, buf: []u8, _parts: anytype, comptime platform: Platform) [:0]const u8 { + return _joinAbsStringBuf(true, [:0]const u8, cwd, buf, _parts, platform); } -pub fn joinAbsStringBufZNT(cwd: []const u8, buf: []u8, _parts: anytype, comptime _platform: Platform) [:0]const u8 { - if ((_platform == .auto or _platform == .loose or _platform == .windows) and bun.Environment.isWindows) { +pub fn joinAbsStringBufZNT(cwd: []const u8, buf: []u8, _parts: anytype, comptime platform: Platform) [:0]const u8 { + if ((platform == .auto or platform == .loose or platform == .windows) and bun.Environment.isWindows) { return _joinAbsStringBuf(true, [:0]const u8, cwd, buf, _parts, .nt); } - return _joinAbsStringBuf(true, [:0]const u8, cwd, buf, _parts, _platform); + return _joinAbsStringBuf(true, [:0]const u8, cwd, buf, _parts, platform); } -pub fn joinAbsStringBufZTrailingSlash(cwd: []const u8, buf: []u8, _parts: anytype, comptime _platform: Platform) [:0]const u8 { - const out = _joinAbsStringBuf(true, [:0]const u8, cwd, buf, _parts, _platform); - if (out.len + 2 < buf.len and out.len > 0 and out[out.len - 1] != _platform.separator()) { - buf[out.len] = _platform.separator(); +pub fn joinAbsStringBufZTrailingSlash(cwd: []const u8, buf: []u8, _parts: anytype, comptime platform: Platform) [:0]const u8 { + const out = _joinAbsStringBuf(true, [:0]const u8, cwd, buf, _parts, platform); + if (out.len + 2 < buf.len and out.len > 0 and out[out.len - 1] != platform.separator()) { + buf[out.len] = platform.separator(); buf[out.len + 1] = 0; return buf[0 .. out.len + 1 :0]; } @@ -1394,15 +1332,14 @@ pub fn joinAbsStringBufZTrailingSlash(cwd: []const u8, buf: []u8, _parts: anytyp return out; } -fn _joinAbsStringBuf(comptime is_sentinel: bool, comptime ReturnType: type, _cwd: []const u8, buf: []u8, _parts: anytype, comptime _platform: Platform) ReturnType { - const platform = comptime _platform.resolve(); +fn _joinAbsStringBuf(comptime is_sentinel: bool, comptime ReturnType: type, _cwd: []const u8, buf: []u8, _parts: anytype, comptime platform: Platform) ReturnType { if (platform == .windows or (bun.Environment.os == .windows and platform == .loose)) { return _joinAbsStringBufWindows(is_sentinel, ReturnType, _cwd, buf, _parts); } - if (comptime platform.resolve() == .nt) { + if (platform == .nt) { const end_path = _joinAbsStringBufWindows(is_sentinel, ReturnType, _cwd, buf[4..], _parts); buf[0..4].* = "\\\\?\\".*; if (comptime is_sentinel) { @@ -1744,9 +1681,8 @@ pub fn normalizeStringNodeT( comptime T: type, str: []const T, buf: []T, - comptime _platform: Platform, + comptime platform: Platform, ) []const T { - const platform = comptime _platform.resolve(); if (str.len == 0) { buf[0] = '.'; return buf[0..1]; @@ -1764,7 +1700,7 @@ pub fn normalizeStringNodeT( str, buf_, true, - comptime platform.resolve().separator(), + comptime platform.separator(), comptime platform.getSeparatorFuncT(), false, ) else normalizeStringGenericT( @@ -1772,7 +1708,7 @@ pub fn normalizeStringNodeT( str, buf_, false, - comptime platform.resolve().separator(), + comptime platform.separator(), comptime platform.getSeparatorFuncT(), false, ); @@ -2065,7 +2001,7 @@ export fn ResolvePath__joinAbsStringBufCurrentPlatformBunString( globalObject.bunVM().transpiler.fs.top_level_dir, &join_buf, &.{str.slice()}, - comptime Platform.auto.resolve(), + .auto, ); return bun.String.createUTF8(out_slice); diff --git a/src/resolver/resolver.zig b/src/resolver/resolver.zig index b714d33ace181c..b8ce712bac62c7 100644 --- a/src/resolver/resolver.zig +++ b/src/resolver/resolver.zig @@ -1629,6 +1629,13 @@ pub const Resolver = struct { /// bust both the named file and a parent directory, because `./hello` can resolve /// to `./hello.js` or `./hello/index.js` pub fn bustDirCacheFromSpecifier(r: *ThisResolver, import_source: []const u8, specifier: []const u8) bool { + if (std.fs.path.isAbsolute(specifier)) { + const dir = bun.path.dirname(specifier, .auto); + const a = r.bustDirCache(dir); + const b = r.bustDirCache(specifier); + return a or b; + } + if (!(bun.strings.startsWith(specifier, "./") or bun.strings.startsWith(specifier, "../"))) return false; if (!std.fs.path.isAbsolute(import_source)) return false; diff --git a/src/sourcemap/sourcemap.zig b/src/sourcemap/sourcemap.zig index 50c37980c19953..bca26ab6acdcfa 100644 --- a/src/sourcemap/sourcemap.zig +++ b/src/sourcemap/sourcemap.zig @@ -1158,14 +1158,11 @@ const vlq_lookup_table: [256]VLQ = brk: { break :brk entries; }; -const vlq_max_in_bytes = 8; +/// Source map VLQ values are limited to i32 +/// Encoding min and max ints are "//////D" and "+/////D", respectively. +/// These are 7 bytes long. This makes the `VLQ` struct 8 bytes. +const vlq_max_in_bytes = 7; pub const VLQ = struct { - // We only need to worry about i32 - // That means the maximum VLQ-encoded value is 8 bytes - // because there are only 4 bits of number inside each VLQ value - // and it expects i32 - // therefore, it can never be more than 32 bits long - // I believe the actual number is 7 bytes long, however we can add an extra byte to be more cautious bytes: [vlq_max_in_bytes]u8, len: u4 = 0, @@ -1602,6 +1599,14 @@ pub const Chunk = struct { /// ignore empty chunks should_ignore: bool = true, + pub const empty: Chunk = .{ + .buffer = MutableString.initEmpty(bun.default_allocator), + .mappings_count = 0, + .end_state = .{}, + .final_generated_column = 0, + .should_ignore = true, + }; + pub fn printSourceMapContents( chunk: Chunk, source: Logger.Source, @@ -1660,13 +1665,14 @@ pub const Chunk = struct { return output; } + // TODO: remove the indirection by having generic functions for SourceMapFormat and NewBuilder. Source maps are always VLQ pub fn SourceMapFormat(comptime Type: type) type { return struct { ctx: Type, const Format = @This(); pub fn init(allocator: std.mem.Allocator, prepend_count: bool) Format { - return Format{ .ctx = Type.init(allocator, prepend_count) }; + return .{ .ctx = Type.init(allocator, prepend_count) }; } pub inline fn appendLineSeparator(this: *Format) anyerror!void { diff --git a/src/sys.zig b/src/sys.zig index b90fbbbe2095c7..1062fcffbcd297 100644 --- a/src/sys.zig +++ b/src/sys.zig @@ -4541,3 +4541,12 @@ pub const coreutils_error_map = brk: { break :brk map; }; + +extern fn getRSS(rss: *usize) c_int; +pub fn selfProcessMemoryUsage() ?usize { + var rss: usize = undefined; + if (getRSS(&rss) != 0) { + return null; + } + return rss; +} diff --git a/src/watcher.zig b/src/watcher.zig deleted file mode 100644 index 3f710c64ceb354..00000000000000 --- a/src/watcher.zig +++ /dev/null @@ -1,670 +0,0 @@ -//! Bun's cross-platform filesystem watcher. Runs on its own thread. -const Watcher = @This(); -pub const max_count = 128; - -pub const Event = WatchEvent; -pub const Item = WatchItem; -pub const ItemList = WatchList; -pub const WatchList = std.MultiArrayList(WatchItem); -pub const HashType = u32; -const no_watch_item: WatchItemIndex = std.math.maxInt(WatchItemIndex); - -// Consumer-facing -watch_events: [128]WatchEvent, -changed_filepaths: [128]?[:0]u8, - -/// The platform-specific implementation of the watcher -platform: Platform, - -watchlist: WatchList, -watched_count: usize, -mutex: Mutex, - -fs: *bun.fs.FileSystem, -allocator: std.mem.Allocator, -watchloop_handle: ?std.Thread.Id = null, -cwd: string, -thread: std.Thread = undefined, -running: bool = true, -close_descriptors: bool = false, - -evict_list: [max_eviction_count]WatchItemIndex = undefined, -evict_list_i: WatchItemIndex = 0, - -ctx: *anyopaque, -onFileUpdate: *const fn (this: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void, -onError: *const fn (this: *anyopaque, err: bun.sys.Error) void, - -thread_lock: bun.DebugThreadLock = bun.DebugThreadLock.unlocked, - -/// Initializes a watcher. Each watcher is tied to some context type, which -/// recieves watch callbacks on the watcher thread. This function does not -/// actually start the watcher thread. -/// -/// const watcher = try Watcher.init(T, instance_of_t, fs, bun.default_allocator) -/// errdefer watcher.deinit(false); -/// try watcher.start(); -/// -/// To integrate a started watcher into module resolution: -/// -/// transpiler.resolver.watcher = watcher.getResolveWatcher(); -/// -/// To integrate a started watcher into bundle_v2: -/// -/// bundle_v2.bun_watcher = watcher; -pub fn init(comptime T: type, ctx: *T, fs: *bun.fs.FileSystem, allocator: std.mem.Allocator) !*Watcher { - const wrapped = struct { - fn onFileUpdateWrapped(ctx_opaque: *anyopaque, events: []WatchEvent, changed_files: []?[:0]u8, watchlist: WatchList) void { - T.onFileUpdate(@alignCast(@ptrCast(ctx_opaque)), events, changed_files, watchlist); - } - fn onErrorWrapped(ctx_opaque: *anyopaque, err: bun.sys.Error) void { - if (@hasDecl(T, "onWatchError")) { - T.onWatchError(@alignCast(@ptrCast(ctx_opaque)), err); - } else { - T.onError(@alignCast(@ptrCast(ctx_opaque)), err); - } - } - }; - - const watcher = try allocator.create(Watcher); - errdefer allocator.destroy(watcher); - watcher.* = Watcher{ - .fs = fs, - .allocator = allocator, - .watched_count = 0, - .watchlist = WatchList{}, - .mutex = .{}, - .cwd = fs.top_level_dir, - .ctx = ctx, - .onFileUpdate = &wrapped.onFileUpdateWrapped, - .onError = &wrapped.onErrorWrapped, - .platform = .{}, - .watch_events = undefined, - .changed_filepaths = [_]?[:0]u8{null} ** 128, - }; - - try Platform.init(&watcher.platform, fs.top_level_dir); - - return watcher; -} - -pub fn start(this: *Watcher) !void { - bun.assert(this.watchloop_handle == null); - this.thread = try std.Thread.spawn(.{}, threadMain, .{this}); -} - -pub fn deinit(this: *Watcher, close_descriptors: bool) void { - if (this.watchloop_handle != null) { - this.mutex.lock(); - defer this.mutex.unlock(); - this.close_descriptors = close_descriptors; - this.running = false; - } else { - if (close_descriptors and this.running) { - const fds = this.watchlist.items(.fd); - for (fds) |fd| { - _ = bun.sys.close(fd); - } - } - this.watchlist.deinit(this.allocator); - const allocator = this.allocator; - allocator.destroy(this); - } -} - -pub fn getHash(filepath: string) HashType { - return @as(HashType, @truncate(bun.hash(filepath))); -} - -pub const WatchItemIndex = u16; -pub const max_eviction_count = 8096; - -const log = bun.Output.scoped(.watcher, false); - -const WindowsWatcher = @import("./watcher/WindowsWatcher.zig"); -// TODO: some platform-specific behavior is implemented in -// this file instead of the platform-specific file. -// ideally, the constants above can be inlined -const Platform = switch (Environment.os) { - .linux => @import("./watcher/INotifyWatcher.zig"), - .mac => @import("./watcher/KEventWatcher.zig"), - .windows => WindowsWatcher, - else => @compileError("Unsupported platform"), -}; - -pub const WatchEvent = struct { - index: WatchItemIndex, - op: Op, - name_off: u8 = 0, - name_len: u8 = 0, - - pub fn names(this: WatchEvent, buf: []?[:0]u8) []?[:0]u8 { - if (this.name_len == 0) return &[_]?[:0]u8{}; - return buf[this.name_off..][0..this.name_len]; - } - - pub const Sorter = void; - - pub fn sortByIndex(_: Sorter, event: WatchEvent, rhs: WatchEvent) bool { - return event.index < rhs.index; - } - - pub fn merge(this: *WatchEvent, other: WatchEvent) void { - this.name_len += other.name_len; - this.op = Op{ - .delete = this.op.delete or other.op.delete, - .metadata = this.op.metadata or other.op.metadata, - .rename = this.op.rename or other.op.rename, - .write = this.op.write or other.op.write, - }; - } - - pub const Op = packed struct { - delete: bool = false, - metadata: bool = false, - rename: bool = false, - write: bool = false, - move_to: bool = false, - - pub fn merge(before: Op, after: Op) Op { - return .{ - .delete = before.delete or after.delete, - .write = before.write or after.write, - .metadata = before.metadata or after.metadata, - .rename = before.rename or after.rename, - .move_to = before.move_to or after.move_to, - }; - } - - pub fn format(op: Op, comptime _: []const u8, _: std.fmt.FormatOptions, w: anytype) !void { - try w.writeAll("{"); - var first = true; - inline for (comptime std.meta.fieldNames(Op)) |name| { - if (@field(op, name)) { - if (!first) { - try w.writeAll(","); - } - first = false; - try w.writeAll(name); - } - } - try w.writeAll("}"); - } - }; -}; - -pub const WatchItem = struct { - file_path: string, - // filepath hash for quick comparison - hash: u32, - loader: options.Loader, - fd: bun.FileDescriptor, - count: u32, - parent_hash: u32, - kind: Kind, - package_json: ?*PackageJSON, - eventlist_index: if (Environment.isLinux) Platform.EventListIndex else u0 = 0, - - pub const Kind = enum { file, directory }; -}; - -fn threadMain(this: *Watcher) !void { - this.watchloop_handle = std.Thread.getCurrentId(); - this.thread_lock.lock(); - Output.Source.configureNamedThread("File Watcher"); - - defer Output.flush(); - if (FeatureFlags.verbose_watcher) Output.prettyln("Watcher started", .{}); - - switch (this.watchLoop()) { - .err => |err| { - this.watchloop_handle = null; - this.platform.stop(); - if (this.running) { - this.onError(this.ctx, err); - } - }, - .result => {}, - } - - // deinit and close descriptors if needed - if (this.close_descriptors) { - const fds = this.watchlist.items(.fd); - for (fds) |fd| { - _ = bun.sys.close(fd); - } - } - this.watchlist.deinit(this.allocator); - - const allocator = this.allocator; - allocator.destroy(this); -} - -pub fn flushEvictions(this: *Watcher) void { - if (this.evict_list_i == 0) return; - defer this.evict_list_i = 0; - - // swapRemove messes up the order - // But, it only messes up the order if any elements in the list appear after the item being removed - // So if we just sort the list by the biggest index first, that should be fine - std.sort.pdq( - WatchItemIndex, - this.evict_list[0..this.evict_list_i], - {}, - comptime std.sort.desc(WatchItemIndex), - ); - - var slice = this.watchlist.slice(); - const fds = slice.items(.fd); - var last_item = no_watch_item; - - for (this.evict_list[0..this.evict_list_i]) |item| { - // catch duplicates, since the list is sorted, duplicates will appear right after each other - if (item == last_item) continue; - - if (!Environment.isWindows) { - // on mac and linux we can just close the file descriptor - // TODO do we need to call inotify_rm_watch on linux? - _ = bun.sys.close(fds[item]); - } - last_item = item; - } - - last_item = no_watch_item; - // This is split into two passes because reading the slice while modified is potentially unsafe. - for (this.evict_list[0..this.evict_list_i]) |item| { - if (item == last_item) continue; - this.watchlist.swapRemove(item); - last_item = item; - } -} - -fn watchLoop(this: *Watcher) bun.JSC.Maybe(void) { - while (this.running) { - // individual platform implementation will call onFileUpdate - switch (Platform.watchLoopCycle(this)) { - .err => |err| return .{ .err = err }, - .result => |iter| iter, - } - } - return .{ .result = {} }; -} - -fn appendFileAssumeCapacity( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - loader: options.Loader, - parent_hash: HashType, - package_json: ?*PackageJSON, - comptime copy_file_path: bool, -) bun.JSC.Maybe(void) { - if (comptime Environment.isWindows) { - // on windows we can only watch items that are in the directory tree of the top level dir - const rel = bun.path.isParentOrEqual(this.fs.top_level_dir, file_path); - if (rel == .unrelated) { - Output.warn("File {s} is not in the project directory and will not be watched\n", .{file_path}); - return .{ .result = {} }; - } - } - - const watchlist_id = this.watchlist.len; - - const file_path_: string = if (comptime copy_file_path) - bun.asByteSlice(this.allocator.dupeZ(u8, file_path) catch bun.outOfMemory()) - else - file_path; - - var item = WatchItem{ - .file_path = file_path_, - .fd = fd, - .hash = hash, - .count = 0, - .loader = loader, - .parent_hash = parent_hash, - .package_json = package_json, - .kind = .file, - }; - - if (comptime Environment.isMac) { - const KEvent = std.c.Kevent; - - // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html - var event = std.mem.zeroes(KEvent); - - event.flags = std.c.EV.ADD | std.c.EV.CLEAR | std.c.EV.ENABLE; - // we want to know about the vnode - event.filter = std.c.EVFILT.VNODE; - - event.fflags = std.c.NOTE.WRITE | std.c.NOTE.RENAME | std.c.NOTE.DELETE; - - // id - event.ident = @intCast(fd.int()); - - // Store the hash for fast filtering later - event.udata = @as(usize, @intCast(watchlist_id)); - var events: [1]KEvent = .{event}; - - // This took a lot of work to figure out the right permutation - // Basically: - // - We register the event here. - // our while(true) loop above receives notification of changes to any of the events created here. - _ = std.posix.system.kevent( - this.platform.fd.cast(), - @as([]KEvent, events[0..1]).ptr, - 1, - @as([]KEvent, events[0..1]).ptr, - 0, - null, - ); - } else if (comptime Environment.isLinux) { - // var file_path_to_use_ = std.mem.trimRight(u8, file_path_, "/"); - // var buf: [bun.MAX_PATH_BYTES+1]u8 = undefined; - // bun.copy(u8, &buf, file_path_to_use_); - // buf[file_path_to_use_.len] = 0; - var buf = file_path_.ptr; - const slice: [:0]const u8 = buf[0..file_path_.len :0]; - item.eventlist_index = switch (this.platform.watchPath(slice)) { - .err => |err| return .{ .err = err }, - .result => |r| r, - }; - } - - this.watchlist.appendAssumeCapacity(item); - return .{ .result = {} }; -} - -fn appendDirectoryAssumeCapacity( - this: *Watcher, - stored_fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - comptime copy_file_path: bool, -) bun.JSC.Maybe(WatchItemIndex) { - if (comptime Environment.isWindows) { - // on windows we can only watch items that are in the directory tree of the top level dir - const rel = bun.path.isParentOrEqual(this.fs.top_level_dir, file_path); - if (rel == .unrelated) { - Output.warn("Directory {s} is not in the project directory and will not be watched\n", .{file_path}); - return .{ .result = no_watch_item }; - } - } - - const fd = brk: { - if (stored_fd != .zero) break :brk stored_fd; - break :brk switch (bun.sys.openA(file_path, 0, 0)) { - .err => |err| return .{ .err = err }, - .result => |fd| fd, - }; - }; - - const parent_hash = getHash(bun.fs.PathName.init(file_path).dirWithTrailingSlash()); - - const file_path_: string = if (comptime copy_file_path) - bun.asByteSlice(this.allocator.dupeZ(u8, file_path) catch bun.outOfMemory()) - else - file_path; - - const watchlist_id = this.watchlist.len; - - var item = WatchItem{ - .file_path = file_path_, - .fd = fd, - .hash = hash, - .count = 0, - .loader = options.Loader.file, - .parent_hash = parent_hash, - .kind = .directory, - .package_json = null, - }; - - if (Environment.isMac) { - const KEvent = std.c.Kevent; - - // https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/kqueue.2.html - var event = std.mem.zeroes(KEvent); - - event.flags = std.c.EV.ADD | std.c.EV.CLEAR | std.c.EV.ENABLE; - // we want to know about the vnode - event.filter = std.c.EVFILT.VNODE; - - // monitor: - // - Write - // - Rename - // - Delete - event.fflags = std.c.NOTE.WRITE | std.c.NOTE.RENAME | std.c.NOTE.DELETE; - - // id - event.ident = @intCast(fd.int()); - - // Store the hash for fast filtering later - event.udata = @as(usize, @intCast(watchlist_id)); - var events: [1]KEvent = .{event}; - - // This took a lot of work to figure out the right permutation - // Basically: - // - We register the event here. - // our while(true) loop above receives notification of changes to any of the events created here. - _ = std.posix.system.kevent( - this.platform.fd.cast(), - @as([]KEvent, events[0..1]).ptr, - 1, - @as([]KEvent, events[0..1]).ptr, - 0, - null, - ); - } else if (Environment.isLinux) { - const file_path_to_use_ = std.mem.trimRight(u8, file_path_, "/"); - var buf: bun.PathBuffer = undefined; - bun.copy(u8, &buf, file_path_to_use_); - buf[file_path_to_use_.len] = 0; - const slice: [:0]u8 = buf[0..file_path_to_use_.len :0]; - item.eventlist_index = switch (this.platform.watchDir(slice)) { - .err => |err| return .{ .err = err }, - .result => |r| r, - }; - } - - this.watchlist.appendAssumeCapacity(item); - return .{ - .result = @as(WatchItemIndex, @truncate(this.watchlist.len - 1)), - }; -} - -// Below is platform-independent - -pub fn appendFileMaybeLock( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - loader: options.Loader, - dir_fd: bun.FileDescriptor, - package_json: ?*PackageJSON, - comptime copy_file_path: bool, - comptime lock: bool, -) bun.JSC.Maybe(void) { - if (comptime lock) this.mutex.lock(); - defer if (comptime lock) this.mutex.unlock(); - bun.assert(file_path.len > 1); - const pathname = bun.fs.PathName.init(file_path); - - const parent_dir = pathname.dirWithTrailingSlash(); - const parent_dir_hash: HashType = getHash(parent_dir); - - var parent_watch_item: ?WatchItemIndex = null; - const autowatch_parent_dir = (comptime FeatureFlags.watch_directories) and this.isEligibleDirectory(parent_dir); - if (autowatch_parent_dir) { - var watchlist_slice = this.watchlist.slice(); - - if (dir_fd != .zero) { - const fds = watchlist_slice.items(.fd); - if (std.mem.indexOfScalar(bun.FileDescriptor, fds, dir_fd)) |i| { - parent_watch_item = @as(WatchItemIndex, @truncate(i)); - } - } - - if (parent_watch_item == null) { - const hashes = watchlist_slice.items(.hash); - if (std.mem.indexOfScalar(HashType, hashes, parent_dir_hash)) |i| { - parent_watch_item = @as(WatchItemIndex, @truncate(i)); - } - } - } - this.watchlist.ensureUnusedCapacity(this.allocator, 1 + @as(usize, @intCast(@intFromBool(parent_watch_item == null)))) catch bun.outOfMemory(); - - if (autowatch_parent_dir) { - parent_watch_item = parent_watch_item orelse switch (this.appendDirectoryAssumeCapacity(dir_fd, parent_dir, parent_dir_hash, copy_file_path)) { - .err => |err| return .{ .err = err }, - .result => |r| r, - }; - } - - switch (this.appendFileAssumeCapacity( - fd, - file_path, - hash, - loader, - parent_dir_hash, - package_json, - copy_file_path, - )) { - .err => |err| return .{ .err = err }, - .result => {}, - } - - if (comptime FeatureFlags.verbose_watcher) { - if (strings.indexOf(file_path, this.cwd)) |i| { - Output.prettyln("Added ./{s} to watch list.", .{file_path[i + this.cwd.len ..]}); - } else { - Output.prettyln("Added {s} to watch list.", .{file_path}); - } - } - - return .{ .result = {} }; -} - -inline fn isEligibleDirectory(this: *Watcher, dir: string) bool { - return strings.contains(dir, this.fs.top_level_dir) and !strings.contains(dir, "node_modules"); -} - -pub fn appendFile( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - loader: options.Loader, - dir_fd: bun.FileDescriptor, - package_json: ?*PackageJSON, - comptime copy_file_path: bool, -) bun.JSC.Maybe(void) { - return appendFileMaybeLock(this, fd, file_path, hash, loader, dir_fd, package_json, copy_file_path, true); -} - -pub fn addDirectory( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - comptime copy_file_path: bool, -) bun.JSC.Maybe(WatchItemIndex) { - this.mutex.lock(); - defer this.mutex.unlock(); - - if (this.indexOf(hash)) |idx| { - return .{ .result = @truncate(idx) }; - } - - this.watchlist.ensureUnusedCapacity(this.allocator, 1) catch bun.outOfMemory(); - - return this.appendDirectoryAssumeCapacity(fd, file_path, hash, copy_file_path); -} - -pub fn addFile( - this: *Watcher, - fd: bun.FileDescriptor, - file_path: string, - hash: HashType, - loader: options.Loader, - dir_fd: bun.FileDescriptor, - package_json: ?*PackageJSON, - comptime copy_file_path: bool, -) bun.JSC.Maybe(void) { - // This must lock due to concurrent transpiler - this.mutex.lock(); - defer this.mutex.unlock(); - - if (this.indexOf(hash)) |index| { - if (comptime FeatureFlags.atomic_file_watcher) { - // On Linux, the file descriptor might be out of date. - if (fd.int() > 0) { - var fds = this.watchlist.items(.fd); - fds[index] = fd; - } - } - return .{ .result = {} }; - } - - return this.appendFileMaybeLock(fd, file_path, hash, loader, dir_fd, package_json, copy_file_path, false); -} - -pub fn indexOf(this: *Watcher, hash: HashType) ?u32 { - for (this.watchlist.items(.hash), 0..) |other, i| { - if (hash == other) { - return @as(u32, @truncate(i)); - } - } - return null; -} - -pub fn remove(this: *Watcher, hash: HashType) void { - this.mutex.lock(); - defer this.mutex.unlock(); - if (this.indexOf(hash)) |index| { - this.removeAtIndex(@truncate(index), hash, &[_]HashType{}, .file); - } -} - -pub fn removeAtIndex(this: *Watcher, index: WatchItemIndex, hash: HashType, parents: []HashType, comptime kind: WatchItem.Kind) void { - bun.assert(index != no_watch_item); - - this.evict_list[this.evict_list_i] = index; - this.evict_list_i += 1; - - if (comptime kind == .directory) { - for (parents) |parent| { - if (parent == hash) { - this.evict_list[this.evict_list_i] = @as(WatchItemIndex, @truncate(parent)); - this.evict_list_i += 1; - } - } - } -} - -pub fn getResolveWatcher(watcher: *Watcher) bun.resolver.AnyResolveWatcher { - return bun.resolver.ResolveWatcher(*@This(), onMaybeWatchDirectory).init(watcher); -} - -pub fn onMaybeWatchDirectory(watch: *Watcher, file_path: string, dir_fd: bun.StoredFileDescriptorType) void { - // We don't want to watch: - // - Directories outside the root directory - // - Directories inside node_modules - if (std.mem.indexOf(u8, file_path, "node_modules") == null and std.mem.indexOf(u8, file_path, watch.fs.top_level_dir) != null) { - _ = watch.addDirectory(dir_fd, file_path, getHash(file_path), false); - } -} - -const std = @import("std"); -const bun = @import("root").bun; -const string = bun.string; -const Output = bun.Output; -const Global = bun.Global; -const Environment = bun.Environment; -const strings = bun.strings; -const stringZ = bun.stringZ; -const FeatureFlags = bun.FeatureFlags; -const options = @import("./options.zig"); -const Mutex = bun.Mutex; -const Futex = @import("./futex.zig"); -const PackageJSON = @import("./resolver/package_json.zig").PackageJSON; diff --git a/src/watcher/INotifyWatcher.zig b/src/watcher/INotifyWatcher.zig index d86694d6338bf3..653841431bf7dd 100644 --- a/src/watcher/INotifyWatcher.zig +++ b/src/watcher/INotifyWatcher.zig @@ -1,7 +1,7 @@ //! Bun's filesystem watcher implementation for linux using inotify //! https://man7.org/linux/man-pages/man7/inotify.7.html const INotifyWatcher = @This(); -const log = Output.scoped(.inotify, false); +const log = Output.scoped(.watcher, false); // inotify events are variable-sized, so a byte buffer is used (also needed // since communication is done via the `read` syscall). what is notable about diff --git a/src/watcher/KEventWatcher.zig b/src/watcher/KEventWatcher.zig index e1037db01a3506..b6d6181450e5ca 100644 --- a/src/watcher/KEventWatcher.zig +++ b/src/watcher/KEventWatcher.zig @@ -1,4 +1,5 @@ const KEventWatcher = @This(); +const log = Output.scoped(.watcher, false); pub const EventListIndex = u32; const KEvent = std.c.Kevent; @@ -48,32 +49,29 @@ pub fn watchLoopCycle(this: *Watcher) bun.JSC.Maybe(void) { var count = std.posix.system.kevent( this.platform.fd.cast(), - @as([*]KEvent, changelist), + changelist, 0, - @as([*]KEvent, changelist), + changelist, 128, - - null, + null, // timeout ); // Give the events more time to coalesce if (count < 128 / 2) { const remain = 128 - count; - var timespec = std.posix.timespec{ .sec = 0, .nsec = 100_000 }; const extra = std.posix.system.kevent( this.platform.fd.cast(), - @as([*]KEvent, changelist[@as(usize, @intCast(count))..].ptr), + changelist[@intCast(count)..].ptr, 0, - @as([*]KEvent, changelist[@as(usize, @intCast(count))..].ptr), + changelist[@intCast(count)..].ptr, remain, - - ×pec, + &.{ .sec = 0, .nsec = 100_000 }, // 0.0001 seconds ); count += extra; } - var changes = changelist[0..@as(usize, @intCast(@max(0, count)))]; + var changes = changelist[0..@intCast(@max(0, count))]; var watchevents = this.watch_events[0..changes.len]; var out_len: usize = 0; if (changes.len > 0) { diff --git a/test/cli/test/__snapshots__/coverage.test.ts.snap b/test/cli/test/__snapshots__/coverage.test.ts.snap index 92503498655382..12122efc4d3238 100644 --- a/test/cli/test/__snapshots__/coverage.test.ts.snap +++ b/test/cli/test/__snapshots__/coverage.test.ts.snap @@ -16,8 +16,8 @@ SF:demo2.ts FNF:2 FNH:1 DA:2,28 -DA:4,10 -DA:6,10 +DA:4,11 +DA:6,9 DA:9,0 DA:10,0 DA:11,1 diff --git a/test/js/bun/http/bun-serve-html-entry.test.ts b/test/js/bun/http/bun-serve-html-entry.test.ts index 250629671db154..b31c9e88fbd988 100644 --- a/test/js/bun/http/bun-serve-html-entry.test.ts +++ b/test/js/bun/http/bun-serve-html-entry.test.ts @@ -95,7 +95,7 @@ test("bun ./index.html", async () => { cmd: [bunExe(), "index.html", "--port=0"], env: { ...bunEnv, - NODE_ENV: undefined, + NODE_ENV: "production", }, cwd: dir, stdout: "pipe", @@ -127,7 +127,7 @@ test("bun ./index.html", async () => { expect(cssResponse.headers.get("content-type")).toContain("text/css"); const css = await cssResponse.text(); expect(css).toContain(".container"); - expect(css).toContain("max-width: 800px"); + expect(css).toContain("max-width:800px"); } // Get and verify the bundled JS @@ -213,7 +213,7 @@ test("bun ./index.html ./about.html", async () => { cmd: [bunExe(), "index.html", "about.html", "--port=0"], env: { ...bunEnv, - NODE_ENV: undefined, + NODE_ENV: "production", }, cwd: dir, stdout: "pipe", @@ -258,7 +258,7 @@ test("bun ./index.html ./about.html", async () => { expect(cssResponse.status).toBe(200); const css = await cssResponse.text(); expect(css).toContain(".container"); - expect(css).toContain("max-width: 800px"); + expect(css).toContain("max-width:800px"); } // Verify both JS bundles work @@ -408,7 +408,7 @@ test("bun *.html", async () => { cmd: [bunExe(), "*.html", "--port=0"], env: { ...bunEnv, - NODE_ENV: undefined, + NODE_ENV: "production", }, cwd: dir, stdout: "pipe", @@ -450,9 +450,9 @@ test("bun *.html", async () => { const cssResponse = await fetch(new URL(cssMatches[0]!, serverUrl).href); expect(cssResponse.status).toBe(200); const css = await cssResponse.text(); - expect(css).toContain("nav {"); - expect(css).toContain(".container {"); - expect(css).toContain("form {"); + expect(css).toContain("nav{"); + expect(css).toContain(".container{"); + expect(css).toContain("form{"); // Verify each page has its own JS functionality const jsMatches = responses.map(html => html.match(/src="(\/chunk-[a-z0-9]+\.js)"/)?.[1]!); diff --git a/test/js/bun/http/bun-serve-html.test.ts b/test/js/bun/http/bun-serve-html.test.ts index 4c0860fec4cb0d..f7206fc2801382 100644 --- a/test/js/bun/http/bun-serve-html.test.ts +++ b/test/js/bun/http/bun-serve-html.test.ts @@ -199,21 +199,21 @@ console.log("How...dashing?"); const sourceMap = await (await fetch(new URL(sourceMapURL, "http://" + hostname + ":" + port))).json(); sourceMap.sourcesContent = sourceMap.sourcesContent.map(a => a.trim()); expect(JSON.stringify(sourceMap, null, 2)).toMatchInlineSnapshot(` -"{ - "version": 3, - "sources": [ - "script.js", - "dashboard.js" - ], - "sourcesContent": [ - "let count = 0;\\n const button = document.getElementById('counter');\\n button.addEventListener('click', () => {\\n count++;\\n button.textContent = \`Click me: \${count}\`;\\n });", - "import './script.js';\\n // Additional dashboard-specific code could go here\\n console.log(\\"How...dashing?\\")" - ], - "mappings": ";AACM,IAAI,QAAQ;AACZ,IAAM,SAAS,SAAS,eAAe,SAAS;AAChD,OAAO,iBAAiB,SAAS,MAAM;AACrC;AACA,SAAO,cAAc,aAAa;AAAA,CACnC;;;ACHD,QAAQ,IAAI,gBAAgB;", - "debugId": "0B3DD451DC3D66B564756E2164756E21", - "names": [] -}" -`); + "{ + "version": 3, + "sources": [ + "script.js", + "dashboard.js" + ], + "sourcesContent": [ + "let count = 0;\\n const button = document.getElementById('counter');\\n button.addEventListener('click', () => {\\n count++;\\n button.textContent = \`Click me: \${count}\`;\\n });", + "import './script.js';\\n // Additional dashboard-specific code could go here\\n console.log(\\"How...dashing?\\")" + ], + "mappings": ";AACM,IAAI,QAAQ;AACZ,IAAM,SAAS,SAAS,eAAe,SAAS;AAChD,OAAO,iBAAiB,SAAS,MAAM;AAAA,EACrC;AAAA,EACA,OAAO,cAAc,aAAa;AAAA,CACnC;;;ACHD,QAAQ,IAAI,gBAAgB;", + "debugId": "0B3DD451DC3D66B564756E2164756E21", + "names": [] + }" + `); const headers = response.headers.toJSON(); headers.date = ""; headers.sourcemap = headers.sourcemap.replace(/chunk-[a-z0-9]+\.js.map/g, "chunk-HASH.js.map"); @@ -593,18 +593,20 @@ async function waitForServer( port: number; hostname: string; }> { + console.log("waitForServer", dir, entryPoints); let defer = Promise.withResolvers<{ subprocess: Subprocess; port: number; hostname: string; }>(); const process = Bun.spawn({ - cmd: [bunExe(), join(import.meta.dir, "bun-serve-static-fixture.js")], + cmd: [bunExe(), "--no-hmr", join(import.meta.dir, "bun-serve-static-fixture.js")], env: { ...bunEnv, NODE_ENV: undefined, }, cwd: dir, + stdio: ["inherit", "inherit", "inherit"], ipc(message, subprocess) { subprocess.send({ files: entryPoints, diff --git a/test/js/bun/util/inspect-error.test.js b/test/js/bun/util/inspect-error.test.js index a439b5c5ad2dc8..0405064cba6dde 100644 --- a/test/js/bun/util/inspect-error.test.js +++ b/test/js/bun/util/inspect-error.test.js @@ -110,18 +110,18 @@ test("Error inside minified file (no color) ", () => { .trim(), ), ).toMatchInlineSnapshot(` -"21 | exports.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=Z; -22 | exports.cache=function(a){return function(){var b=U.current;if(!b)return a.apply(null,arguments);var c=b.getCacheForType(V);b=c.get(a);void 0===b&&(b=W(),c.set(a,b));c=0;for(var f=arguments.length;c ([dir]/inspect-error-fixture.min.js:26:2846) - at ([dir]/inspect-error-fixture.min.js:26:2890) - at ([dir]/inspect-error.test.js:102:5)" -`); + error: error inside long minified file! + at ([dir]/inspect-error-fixture.min.js:26:2846) + at ([dir]/inspect-error-fixture.min.js:26:2890) + at ([dir]/inspect-error.test.js:101:7)" + `); } }); @@ -141,18 +141,18 @@ test("Error inside minified file (color) ", () => { ).trim(), ), ).toMatchInlineSnapshot(` -"21 | exports.__SECRET_INTERNALS_DO_NOT_USE_OR_YOU_WILL_BE_FIRED=Z; -22 | exports.cache=function(a){return function(){var b=U.current;if(!b)return a.apply(null,arguments);var c=b.getCacheForType(V);b=c.get(a);void 0===b&&(b=W(),c.set(a,b));c=0;for(var f=arguments.length;c ([dir]/inspect-error-fixture.min.js:26:2846) - at ([dir]/inspect-error-fixture.min.js:26:2890) - at ([dir]/inspect-error.test.js:130:5)" -`); + error: error inside long minified file! + at ([dir]/inspect-error-fixture.min.js:26:2846) + at ([dir]/inspect-error-fixture.min.js:26:2890) + at ([dir]/inspect-error.test.js:129:7)" + `); } });