mirror of
https://github.com/jina-ai/node-DeepResearch.git
synced 2025-12-26 06:28:56 +08:00
saas: llm usage gets a x3 multiplier (#124)
* pricing-change: llm consumptions x3 * fix: llm usage condition
This commit is contained in:
parent
06b91dbcf8
commit
58af7eb3a8
@ -58,7 +58,7 @@ export async function readUrl(
|
|||||||
tokenTracker.trackUsage('read', {
|
tokenTracker.trackUsage('read', {
|
||||||
totalTokens: tokens,
|
totalTokens: tokens,
|
||||||
promptTokens: url.length,
|
promptTokens: url.length,
|
||||||
completionTokens: tokens
|
completionTokens: 0
|
||||||
});
|
});
|
||||||
|
|
||||||
return { response: data };
|
return { response: data };
|
||||||
|
|||||||
@ -31,9 +31,11 @@ export class TokenTracker extends EventEmitter {
|
|||||||
|
|
||||||
getTotalUsage(): LanguageModelUsage {
|
getTotalUsage(): LanguageModelUsage {
|
||||||
return this.usages.reduce((acc, { usage }) => {
|
return this.usages.reduce((acc, { usage }) => {
|
||||||
acc.promptTokens += usage.promptTokens;
|
// CompletionTokens > 0 means LLM usage, apply 3x multiplier
|
||||||
acc.completionTokens += usage.completionTokens;
|
const scaler = usage.completionTokens > 0 ? 3 : 1;
|
||||||
acc.totalTokens += usage.totalTokens;
|
acc.promptTokens += usage.promptTokens * scaler;
|
||||||
|
acc.completionTokens += usage.completionTokens * scaler;
|
||||||
|
acc.totalTokens += usage.totalTokens * scaler;
|
||||||
return acc;
|
return acc;
|
||||||
}, { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
|
}, { promptTokens: 0, completionTokens: 0, totalTokens: 0 });
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user