From 58af7eb3a822084d386b887d7b82e30a069e8be7 Mon Sep 17 00:00:00 2001 From: Yanlong Wang Date: Mon, 15 Sep 2025 12:46:08 +0800 Subject: [PATCH] saas: llm usage gets a x3 multiplier (#124) * pricing-change: llm consumptions x3 * fix: llm usage condition --- src/tools/read.ts | 2 +- src/utils/token-tracker.ts | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/src/tools/read.ts b/src/tools/read.ts index c950fae..50a733a 100644 --- a/src/tools/read.ts +++ b/src/tools/read.ts @@ -58,7 +58,7 @@ export async function readUrl( tokenTracker.trackUsage('read', { totalTokens: tokens, promptTokens: url.length, - completionTokens: tokens + completionTokens: 0 }); return { response: data }; diff --git a/src/utils/token-tracker.ts b/src/utils/token-tracker.ts index 1ed2d1f..892115f 100644 --- a/src/utils/token-tracker.ts +++ b/src/utils/token-tracker.ts @@ -31,9 +31,11 @@ export class TokenTracker extends EventEmitter { getTotalUsage(): LanguageModelUsage { return this.usages.reduce((acc, { usage }) => { - acc.promptTokens += usage.promptTokens; - acc.completionTokens += usage.completionTokens; - acc.totalTokens += usage.totalTokens; + // CompletionTokens > 0 means LLM usage, apply 3x multiplier + const scaler = usage.completionTokens > 0 ? 3 : 1; + acc.promptTokens += usage.promptTokens * scaler; + acc.completionTokens += usage.completionTokens * scaler; + acc.totalTokens += usage.totalTokens * scaler; return acc; }, { promptTokens: 0, completionTokens: 0, totalTokens: 0 }); }