LLM input shaping
Shapes LLM prompts to fit within a token budget before they hit the API. Uses tiktoken for token counting and drops oldest conversation turns first, then trims the query if still over budget. The budgeting logic itself is model-agnostic — the example uses gpt-4o-mini as the tokenizer target, but the pattern works the same way for any model with a token limit.
How it works
Section titled “How it works”- The host generates prompt inputs (same system prefix, different conversation history and queries).
- Each task builds the full prompt and counts tokens with
tiktoken. - If over budget, it drops the oldest turns first, then trims query tokens.
- The host aggregates token savings and trim counts.
Three files:
run_prompt_token_budget.ts— practical prompt-budgeting example for app logicbench_prompt_token_budget.ts— dedicatedmitatabenchmark measuring budgeting throughputtoken_budget.ts— the budgeting logic itself
Example budget decision
Section titled “Example budget decision”Input:
const input = { model: "gpt-4o-mini", systemPrefix: "You are a docs assistant.", history: [ "Need guidance on schema validation.", "Compare workers and batching.", "Keep the answer short.", ], query: "Give a migration plan and one code example.", maxInputTokens: 900,};Output shape:
{ prompt: "...final prompt string...", rawInputTokens: 1120, inputTokens: 884, trimmedTurns: 1, queryWasTrimmed: false,}The useful part is not just the final prompt; you also get the bookkeeping needed to explain
why a prompt was trimmed and by how much.
Install
Section titled “Install”deno add --npm jsr:@vixeny/knittingdeno add npm:tiktoken npm:openai npm:mitatanpx jsr add @vixeny/knittingnpm i tiktoken openai mitata# pnpm 10.9+pnpm add jsr:@vixeny/knitting
# fallback (older pnpm)pnpm dlx jsr add @vixeny/knitting
pnpm add tiktoken openai mitata# yarn 4.9+yarn add jsr:@vixeny/knitting
# fallback (older yarn)yarn dlx jsr add @vixeny/knitting
yarn add tiktoken openai mitatabunx jsr add @vixeny/knittingbun add tiktoken openai mitataopenai is optional. These examples only prepare prompts and token budgets.
mitata is only needed for the benchmark script.
bun src/run_prompt_token_budget.ts --threads 2 --requests 2000 --maxInputTokens 900 --model gpt-4o-mini --mode knittingdeno run -A src/run_prompt_token_budget.ts --threads 2 --requests 2000 --maxInputTokens 900 --model gpt-4o-mini --mode knittingnpx tsx src/run_prompt_token_budget.ts --threads 2 --requests 2000 --maxInputTokens 900 --model gpt-4o-mini --mode knittingExpected output:
mode: knitting (2 threads)requests: 2000budget: 900 tokens (gpt-4o-mini)
trimmed: 1,247 / 2,000 requests (62.4%)avg tokens saved: 312 per trimmed requesttotal tokens saved: 389,064elapsed: 1.82sOptional benchmark
Section titled “Optional benchmark”bun src/bench_prompt_token_budget.ts --threads 2 --requests 20000 --maxInputTokens 900 --model gpt-4o-mini --batch 32npx tsx src/bench_prompt_token_budget.ts --threads 2 --requests 20000 --maxInputTokens 900 --model gpt-4o-mini --batch 32Compares budgeting throughput on the host vs through workers. Worker tasks return compact totals (token/trim counters), not full prompt strings.
import { createPool, isMain } from "@vixeny/knitting";import { preparePrompt, preparePromptHost, type PromptInput, type PromptPlan,} from "./token_budget.ts";
function intArg(name: string, fallback: number): number { const i = process.argv.indexOf(`--${name}`); if (i !== -1 && i + 1 < process.argv.length) { const value = Number(process.argv[i + 1]); if (Number.isFinite(value)) return Math.floor(value); } return fallback;}
function strArg(name: string, fallback: string): string { const i = process.argv.indexOf(`--${name}`); if (i !== -1 && i + 1 < process.argv.length) { return String(process.argv[i + 1]); } return fallback;}
const THREADS = Math.max(1, intArg("threads", 2));const REQUESTS = Math.max(1, intArg("requests", 20_000));const MAX_INPUT_TOKENS = Math.max(64, intArg("maxInputTokens", 900));const MODE = strArg("mode", "knitting");const MODEL = strArg("model", "gpt-4o-mini");
const SYSTEM_PREFIX = [ "You are a docs assistant.", "Prefer concrete and short answers.", "If data is missing, say it directly.", "Do not invent unsupported behavior.",].join("\n");
const TOPICS = [ "token budgeting", "prompt caching", "parallel workers", "schema validation", "rendering pipelines", "markdown output", "compression tradeoffs", "latency under load",];
function pick<T>(arr: T[], i: number): T { return arr[i % arr.length]!;}
function makeHistory(i: number): string[] { const turns = 3 + (i % 10); const history = new Array<string>(turns);
for (let t = 0; t < turns; t++) { const topic = pick(TOPICS, i + t); history[t] = `Need guidance on ${topic}. Include practical steps and one small code example.`; }
return history;}
function makeInput(i: number): PromptInput { const topicA = pick(TOPICS, i); const topicB = pick(TOPICS, i + 3); const query = [ `Please compare ${topicA} with ${topicB}.`, "I care about cost per request and response quality.", "Give a short recommendation and a migration path.", ].join(" ");
return { model: MODEL, systemPrefix: SYSTEM_PREFIX, history: makeHistory(i), query, maxInputTokens: MAX_INPUT_TOKENS, };}
type Totals = { rawTokens: number; budgetedTokens: number; staticTokens: number; dynamicTokens: number; trimmedRuns: number; queryTrimmedRuns: number; turnsDropped: number;};
function summarize(plans: PromptPlan[]): Totals { let totals: Totals = { rawTokens: 0, budgetedTokens: 0, staticTokens: 0, dynamicTokens: 0, trimmedRuns: 0, queryTrimmedRuns: 0, turnsDropped: 0, };
for (const plan of plans) { totals.rawTokens += plan.rawInputTokens; totals.budgetedTokens += plan.inputTokens; totals.staticTokens += plan.staticTokens; totals.dynamicTokens += plan.dynamicTokens; totals.turnsDropped += plan.trimmedTurns; if (plan.trimmedTurns > 0) totals.trimmedRuns++; if (plan.queryWasTrimmed) totals.queryTrimmedRuns++; }
return totals;}
function runHost(inputs: PromptInput[]): Totals { const plans = inputs.map((input) => preparePromptHost(input)); return summarize(plans);}
async function runWorkers(inputs: PromptInput[]): Promise<Totals> { const pool = createPool({ threads: THREADS })({ preparePrompt }); try { const jobs: Promise<PromptPlan>[] = []; for (let i = 0; i < inputs.length; i++) { jobs.push(pool.call.preparePrompt(inputs[i]!)); }
const plans = await Promise.all(jobs); return summarize(plans); } finally { pool.shutdown(); }}
function percent(saved: number, base: number): string { if (base <= 0) return "0.0%"; return `${((saved / base) * 100).toFixed(1)}%`;}
async function main() { const inputs = new Array<PromptInput>(REQUESTS); for (let i = 0; i < REQUESTS; i++) inputs[i] = makeInput(i);
const started = performance.now(); const totals = MODE === "host" ? runHost(inputs) : await runWorkers(inputs); const finished = performance.now();
const tookMs = finished - started; const secs = Math.max(1e-9, tookMs / 1000); const reqPerSec = REQUESTS / secs; const savedTokens = Math.max(0, totals.rawTokens - totals.budgetedTokens); const cacheableTokensEstimate = totals.staticTokens;
console.log("Prompt token budgeting"); console.log("mode :", MODE); console.log("model :", MODEL); console.log("threads :", MODE === "host" ? 0 : THREADS); console.log("requests :", REQUESTS.toLocaleString()); console.log("maxInputTokens :", MAX_INPUT_TOKENS.toLocaleString()); console.log("raw tokens :", totals.rawTokens.toLocaleString()); console.log("budgeted tokens :", totals.budgetedTokens.toLocaleString()); console.log( "saved tokens :", `${savedTokens.toLocaleString()} (${ percent(savedTokens, totals.rawTokens) })`, ); console.log("trimmed runs :", totals.trimmedRuns.toLocaleString()); console.log("query trimmed runs:", totals.queryTrimmedRuns.toLocaleString()); console.log("turns dropped :", totals.turnsDropped.toLocaleString()); console.log("cacheable estimate:", cacheableTokensEstimate.toLocaleString()); console.log("took :", tookMs.toFixed(2), "ms"); console.log("throughput :", reqPerSec.toFixed(0), "req/s");}
if (isMain) { main().catch((error) => { console.error(error); process.exitCode = 1; });}import { createPool, isMain } from "@vixeny/knitting";import { bench, boxplot, run, summary } from "mitata";import { preparePromptBatchFast, preparePromptBatchFastHost, type PromptBudgetSummary, type PromptInput,} from "./token_budget.ts";
function intArg(name: string, fallback: number): number { const i = process.argv.indexOf(`--${name}`); if (i !== -1 && i + 1 < process.argv.length) { const value = Number(process.argv[i + 1]); if (Number.isFinite(value)) return Math.floor(value); } return fallback;}
function strArg(name: string, fallback: string): string { const i = process.argv.indexOf(`--${name}`); if (i !== -1 && i + 1 < process.argv.length) { return String(process.argv[i + 1]); } return fallback;}
const THREADS = Math.max(1, intArg("threads", 2));const REQUESTS = Math.max(1, intArg("requests", 10));const MAX_INPUT_TOKENS = Math.max(64, intArg("maxInputTokens", 500));const BATCH = Math.max(1, intArg("batch", 32));const MODEL = strArg("model", "gpt-4o-mini");
const SYSTEM_PREFIX = [ "You are a docs assistant.", "Prefer concrete and short answers.", "If data is missing, say it directly.", "Do not invent unsupported behavior.",].join("\n");
const TOPICS = [ "token budgeting", "prompt caching", "parallel workers", "schema validation", "rendering pipelines", "markdown output", "compression tradeoffs", "latency under load",];
function pick<T>(arr: T[], i: number): T { return arr[i % arr.length]!;}
function makeHistory(i: number): string[] { const turns = 3 + (i % 10); const history = new Array<string>(turns);
for (let t = 0; t < turns; t++) { const topic = pick(TOPICS, i + t); history[t] = `Need guidance on ${topic}. Include practical steps and one small code example.`; }
return history;}
function makeInput(i: number): PromptInput { const topicA = pick(TOPICS, i); const topicB = pick(TOPICS, i + 3); const query = [ `Please compare ${topicA} with ${topicB}.`, "I care about cost per request and response quality.", "Give a short recommendation and a migration path.", ].join(" ");
return { model: MODEL, systemPrefix: SYSTEM_PREFIX, history: makeHistory(i), query, maxInputTokens: MAX_INPUT_TOKENS, };}
function makeBatches<T>(values: T[], batchSize: number): T[][] { const batches: T[][] = []; for (let i = 0; i < values.length; i += batchSize) { batches.push(values.slice(i, i + batchSize)); } return batches;}
function mergeSummary( a: PromptBudgetSummary, b: PromptBudgetSummary,): PromptBudgetSummary { return { rawTokens: a.rawTokens + b.rawTokens, budgetedTokens: a.budgetedTokens + b.budgetedTokens, staticTokens: a.staticTokens + b.staticTokens, dynamicTokens: a.dynamicTokens + b.dynamicTokens, trimmedRuns: a.trimmedRuns + b.trimmedRuns, queryTrimmedRuns: a.queryTrimmedRuns + b.queryTrimmedRuns, turnsDropped: a.turnsDropped + b.turnsDropped, };}
function runHostBatches(inputBatches: PromptInput[][]): PromptBudgetSummary { let totals: PromptBudgetSummary = { rawTokens: 0, budgetedTokens: 0, staticTokens: 0, dynamicTokens: 0, trimmedRuns: 0, queryTrimmedRuns: 0, turnsDropped: 0, };
for (let i = 0; i < inputBatches.length; i++) { totals = mergeSummary(totals, preparePromptBatchFastHost(inputBatches[i]!)); }
return totals;}
async function runWorkerBatches( callBatch: (inputs: PromptInput[]) => Promise<PromptBudgetSummary>, inputBatches: PromptInput[][],): Promise<PromptBudgetSummary> { const jobs: Promise<PromptBudgetSummary>[] = []; for (let i = 0; i < inputBatches.length; i++) { jobs.push(callBatch(inputBatches[i]!)); }
const results = await Promise.all(jobs);
let totals: PromptBudgetSummary = { rawTokens: 0, budgetedTokens: 0, staticTokens: 0, dynamicTokens: 0, trimmedRuns: 0, queryTrimmedRuns: 0, turnsDropped: 0, };
for (let i = 0; i < results.length; i++) { totals = mergeSummary(totals, results[i]!); }
return totals;}
function sameSummary(a: PromptBudgetSummary, b: PromptBudgetSummary): boolean { return a.rawTokens === b.rawTokens && a.budgetedTokens === b.budgetedTokens && a.staticTokens === b.staticTokens && a.dynamicTokens === b.dynamicTokens && a.trimmedRuns === b.trimmedRuns && a.queryTrimmedRuns === b.queryTrimmedRuns && a.turnsDropped === b.turnsDropped;}
async function main() { const inputs = new Array<PromptInput>(REQUESTS); for (let i = 0; i < REQUESTS; i++) { inputs[i] = makeInput(i); } const inputBatches = makeBatches(inputs, BATCH);
const pool = createPool({ threads: THREADS - 1, inliner: { batchSize: 8, }, })({ preparePromptBatchFast }); let sink = 0;
try { const hostCheck = runHostBatches(inputBatches); const workerCheck = await runWorkerBatches( pool.call.preparePromptBatchFast, inputBatches, ); if (!sameSummary(hostCheck, workerCheck)) { throw new Error("Host and worker prompt-budget totals differ."); }
console.log("Prompt token budgeting benchmark (mitata)"); console.log("workload: build prompt + tokenize + trim to budget"); console.log("model:", MODEL); console.log("requests per iteration:", REQUESTS.toLocaleString()); console.log("max input tokens:", MAX_INPUT_TOKENS.toLocaleString()); console.log("batch size:", BATCH); console.log("threads:", THREADS);
boxplot(() => { summary(() => { bench(`host (${REQUESTS.toLocaleString()} req, batch ${BATCH})`, () => { const totals = runHostBatches(inputBatches); sink = totals.budgetedTokens; });
bench( `knitting (${THREADS} thread${ THREADS === 1 ? "" : "s" }, ${REQUESTS.toLocaleString()} req, batch ${BATCH})`, async () => { const totals = await runWorkerBatches( pool.call.preparePromptBatchFast, inputBatches, ); sink = totals.budgetedTokens; }, ); }); });
await run(); console.log("last budgeted tokens:", sink.toLocaleString()); } finally { pool.shutdown(); }}
if (isMain) { main().catch((error) => { console.error(error); process.exitCode = 1; });}import { task } from "@vixeny/knitting";import { encoding_for_model } from "tiktoken";
export type PromptInput = { model: string; systemPrefix: string; history: string[]; query: string; maxInputTokens: number;};
export type PromptPlan = { prompt: string; rawInputTokens: number; inputTokens: number; staticTokens: number; dynamicTokens: number; trimmedTurns: number; queryWasTrimmed: boolean;};
export type PromptPlanFast = Omit<PromptPlan, "prompt">;
export type PromptBudgetSummary = { rawTokens: number; budgetedTokens: number; staticTokens: number; dynamicTokens: number; trimmedRuns: number; queryTrimmedRuns: number; turnsDropped: number;};
const decoder = new TextDecoder();type Encoder = ReturnType<typeof encoding_for_model>;const MAX_ENCODER_CACHE = 4;const MAX_STATIC_TOKEN_CACHE = 512;const encoderCache = new Map<string, Encoder>();const staticTokenCache = new Map<string, number>();
function normalizeText(value: string): string { return value.replace(/\s+/g, " ").trim();}
function countTokens(enc: Encoder, text: string): number { return enc.encode(text).length;}
function touchMapEntry<V>(map: Map<string, V>, key: string, value: V): void { map.delete(key); map.set(key, value);}
function evictOldestEncoderIfNeeded(): void { if (encoderCache.size <= MAX_ENCODER_CACHE) return; const oldest = encoderCache.keys().next().value; if (oldest === undefined) return; const enc = encoderCache.get(oldest); if (enc) enc.free(); encoderCache.delete(oldest);}
function evictOldestStaticTokenIfNeeded(): void { if (staticTokenCache.size <= MAX_STATIC_TOKEN_CACHE) return; const oldest = staticTokenCache.keys().next().value; if (oldest !== undefined) staticTokenCache.delete(oldest);}
function getEncoder(model: string): Encoder { const cached = encoderCache.get(model); if (cached) { touchMapEntry(encoderCache, model, cached); return cached; }
const enc = encoding_for_model(model as never); encoderCache.set(model, enc); evictOldestEncoderIfNeeded(); return enc;}
function getStaticTokens( model: string, systemPrefix: string, enc: Encoder,): number { const key = `${model}\x1f${systemPrefix}`; const cached = staticTokenCache.get(key); if (cached !== undefined) { touchMapEntry(staticTokenCache, key, cached); return cached; }
const value = countTokens(enc, systemPrefix); staticTokenCache.set(key, value); evictOldestStaticTokenIfNeeded(); return value;}
export function clearPromptBudgetCaches(): void { for (const enc of encoderCache.values()) enc.free(); encoderCache.clear(); staticTokenCache.clear();}
function truncateToTokenBudget( enc: Encoder, text: string, maxTokens: number,): string { if (maxTokens <= 0) return "";
const tokens = enc.encode(text); if (tokens.length <= maxTokens) return text; const clipped = tokens.slice(0, maxTokens); return decoder.decode(enc.decode(clipped));}
function buildPrompt( systemPrefix: string, history: string[], query: string,): string { const rows: string[] = []; rows.push(systemPrefix.trim()); rows.push(""); rows.push("Conversation context:");
for (let i = 0; i < history.length; i++) { rows.push(`- Turn ${i + 1}: ${history[i]}`); }
rows.push(""); rows.push(`User request: ${query}`); return rows.join("\n");}
export function preparePromptHost(input: PromptInput): PromptPlan { const model = input.model; const maxInputTokens = Math.max(64, input.maxInputTokens); const cleanHistory = input.history.map(normalizeText).filter(Boolean); let history = [...cleanHistory]; let query = normalizeText(input.query); const enc = getEncoder(model);
const staticTokens = getStaticTokens(model, input.systemPrefix, enc);
let prompt = buildPrompt(input.systemPrefix, history, query); const rawInputTokens = countTokens(enc, prompt); let inputTokens = rawInputTokens; let trimmedTurns = 0; let queryWasTrimmed = false;
while (inputTokens > maxInputTokens && history.length > 0) { history.shift(); trimmedTurns++; prompt = buildPrompt(input.systemPrefix, history, query); inputTokens = countTokens(enc, prompt); }
if (inputTokens > maxInputTokens) { const promptWithoutQuery = buildPrompt(input.systemPrefix, history, ""); const promptWithoutQueryTokens = countTokens(enc, promptWithoutQuery); const remainingBudget = Math.max( 16, maxInputTokens - promptWithoutQueryTokens, ); const clipped = truncateToTokenBudget(enc, query, remainingBudget); queryWasTrimmed = clipped.length < query.length; query = clipped; prompt = buildPrompt(input.systemPrefix, history, query); inputTokens = countTokens(enc, prompt); }
return { prompt, rawInputTokens, inputTokens, staticTokens, dynamicTokens: Math.max(0, inputTokens - staticTokens), trimmedTurns, queryWasTrimmed, };}
export const preparePrompt = task<PromptInput, PromptPlan>({ f: (input) => preparePromptHost(input),});
export function preparePromptFastHost(input: PromptInput): PromptPlanFast { const plan = preparePromptHost(input); return { rawInputTokens: plan.rawInputTokens, inputTokens: plan.inputTokens, staticTokens: plan.staticTokens, dynamicTokens: plan.dynamicTokens, trimmedTurns: plan.trimmedTurns, queryWasTrimmed: plan.queryWasTrimmed, };}
export function preparePromptBatchFastHost( inputs: PromptInput[],): PromptBudgetSummary { let totals: PromptBudgetSummary = { rawTokens: 0, budgetedTokens: 0, staticTokens: 0, dynamicTokens: 0, trimmedRuns: 0, queryTrimmedRuns: 0, turnsDropped: 0, };
for (let i = 0; i < inputs.length; i++) { const plan = preparePromptFastHost(inputs[i]!); totals.rawTokens += plan.rawInputTokens; totals.budgetedTokens += plan.inputTokens; totals.staticTokens += plan.staticTokens; totals.dynamicTokens += plan.dynamicTokens; totals.turnsDropped += plan.trimmedTurns; if (plan.trimmedTurns > 0) totals.trimmedRuns++; if (plan.queryWasTrimmed) totals.queryTrimmedRuns++; }
return totals;}
export const preparePromptBatchFast = task<PromptInput[], PromptBudgetSummary>({ f: (inputs) => preparePromptBatchFastHost(inputs),});When this matters
Section titled “When this matters”Token budgeting is a preflight step that runs on every LLM request. If you’re handling high-throughput chat traffic — multiple users, long conversation histories — the tokenization and trimming work adds up. Offloading it to workers keeps your main thread focused on routing and I/O while budget calculations happen in parallel. It also gives you predictable input sizes, which helps with cost control and latency.