With debug logs WRANGLER_LOG=debug , the last few lines are
↗️ Done syncing assets
Retrieving cached values for userId from node_modules/.cache/wrangler
-- START CF API REQUEST: PUT https://api.cloudflare.com/client/v4/accounts/yizbbx?include_subdomain_availability=true&excludeScript=true
HEADERS: {
"metricsEnabled": "true",
"Authorization": "Bearer",
"User-Agent": "wrangler/2.4.4"
}
INIT: {
"method": "PUT",
"body": {},
"headers": {
"metricsEnabled": "true"
}
}
-- END CF API REQUEST
Total Upload: 1503.19 KiB / gzip: 393.42 KiB
-- START CF API RESPONSE: Bad Request 400
HEADERS: {}
RESPONSE: {
"result": null,
"success": false,
"errors": [
{
"code": 10021,
"message": "Error: Script startup exceeded CPU time limit.\n"
}
],
"messages": []
}
-- END CF API RESPONSE
▲ [WARNING] Here are the 1 largest dependencies included in your script:
- dist/worker/index.js - 1499.53 KiB
If these are unnecessary, consider removing them
✘ [ERROR] A request to the Cloudflare API (/accounts/3642ac/zbbx) failed.
Error: Script startup exceeded CPU time limit.
[code: 10021]
I am aware of the 200ms startup time limit and the time is not related to actual handler, but I am not sure how to measure the time locally, nor where should I look into to make it faster.
// If the request path matches any of your assets, then use the `getAssetFromKV`
// function from `@cloudflare/kv-asset-handler` to serve it. Otherwise, call the
// `handleRequest` function, which is imported from your `App.server.jsx` file,
// to return a Hydrogen response.
import {getAssetFromKV} from '@cloudflare/kv-asset-handler';
import handleRequest from './src/App.server';
import indexTemplate from './dist/client/index.html?raw';
function isAsset(url) {
// Update this RE to fit your assets
return /\.(png|jpe?g|gif|css|js|svg|ico|map)$/i.test(url.pathname);
}
async function handleAsset(url, event) {
const response = await getAssetFromKV(event, {});
// Custom cache-control for assets
if (response.status < 400) {
const filename = url.pathname.split('/').pop();
const maxAge =
filename.split('.').length > 2
? 31536000 // hashed asset, will never be updated
: 86400; // favicon and other public assets
response.headers.append('cache-control', `public, max-age=${maxAge}`);
}
return response;
}
async function handleEvent(event) {
try {
const url = new URL(event.request.url);
if (isAsset(url)) {
return await handleAsset(url, event);
}
return await handleRequest(event.request, {
indexTemplate,
cache: caches.default,
context: event,
// Buyer IP varies by hosting provider and runtime. You should provide this
// as an argument to the `handleRequest` function for your runtime.
// Defaults to `x-forwarded-for` header value.
buyerIpHeader: 'cf-connecting-ip',
});
} catch (error) {
return new Response(error.message || error.toString(), {status: 500});
}
}
addEventListener('fetch', (event) => event.respondWith(handleEvent(event)));
Yes, my old site was parsing large static content, I converted to on demand loading.
Unfortunately I did this by look at code and thinking, unable to do it in a more systemic way like profiling. If there are tools on profile that would be awesome