mirror of
https://github.com/jina-ai/node-DeepResearch.git
synced 2025-12-25 22:16:49 +08:00
saas: llm usage gets a x3 multiplier (#124)
* pricing-change: llm consumptions x3 * fix: llm usage condition
This commit is contained in:
parent
06b91dbcf8
commit
58af7eb3a8
@ -58,7 +58,7 @@ export async function readUrl(
|
||||
tokenTracker.trackUsage('read', {
|
||||
totalTokens: tokens,
|
||||
promptTokens: url.length,
|
||||
completionTokens: tokens
|
||||
completionTokens: 0
|
||||
});
|
||||
|
||||
return { response: data };
|
||||
|
||||
@ -31,9 +31,11 @@ export class TokenTracker extends EventEmitter {
|
||||
|
||||
getTotalUsage(): LanguageModelUsage {
|
||||
return this.usages.reduce((acc, { usage }) => {
|
||||
acc.promptTokens += usage.promptTokens;
|
||||
acc.completionTokens += usage.completionTokens;
|
||||
acc.totalTokens += usage.totalTokens;
|
||||
// CompletionTokens > 0 means LLM usage, apply 3x multiplier
|
||||
const scaler = usage.completionTokens > 0 ? 3 : 1;
|
||||
acc.promptTokens += usage.promptTokens * scaler;
|
||||
acc.completionTokens += usage.completionTokens * scaler;
|
||||
acc.totalTokens += usage.totalTokens * scaler;
|
||||
return acc;
|
||||
}, { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
|
||||
}
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user