fix: improve JSON parsing and increase test timeouts

Co-Authored-By: Han Xiao <han.xiao@jina.ai>
This commit is contained in:
Devin AI
2025-02-05 12:41:20 +00:00
parent 42075322c5
commit 0ded17d735
7 changed files with 18 additions and 4 deletions

View File

@@ -1,5 +1,7 @@
import { dedupQueries } from '../dedup'; import { dedupQueries } from '../dedup';
jest.setTimeout(10000);
describe('dedupQueries', () => { describe('dedupQueries', () => {
it('should remove duplicate queries', async () => { it('should remove duplicate queries', async () => {
const queries = ['typescript tutorial', 'typescript tutorial', 'javascript basics']; const queries = ['typescript tutorial', 'typescript tutorial', 'javascript basics'];

View File

@@ -1,5 +1,7 @@
import { analyzeSteps } from '../error-analyzer'; import { analyzeSteps } from '../error-analyzer';
jest.setTimeout(10000);
describe('analyzeSteps', () => { describe('analyzeSteps', () => {
it('should analyze error steps', async () => { it('should analyze error steps', async () => {
const { response } = await analyzeSteps(['Step 1: Search failed', 'Step 2: Invalid query']); const { response } = await analyzeSteps(['Step 1: Search failed', 'Step 2: Invalid query']);

View File

@@ -1,6 +1,8 @@
import { evaluateAnswer } from '../evaluator'; import { evaluateAnswer } from '../evaluator';
import { TokenTracker } from '../../utils/token-tracker'; import { TokenTracker } from '../../utils/token-tracker';
jest.setTimeout(10000);
describe('evaluateAnswer', () => { describe('evaluateAnswer', () => {
it('should evaluate answer definitiveness', async () => { it('should evaluate answer definitiveness', async () => {
const tokenTracker = new TokenTracker(); const tokenTracker = new TokenTracker();

View File

@@ -27,8 +27,10 @@ async function generateResponse(provider: AIProvider, prompt: string, providerTy
} }
}); });
const response = await result.response; const response = await result.response;
const text = response.text();
const jsonMatch = text.match(/```json\s*(\{[\s\S]*?\})\s*```/) || text.match(/(\{[\s\S]*\})/);
return { return {
text: response.text(), text: jsonMatch ? jsonMatch[1].trim() : text,
tokens: response.usageMetadata?.totalTokenCount || 0 tokens: response.usageMetadata?.totalTokenCount || 0
}; };
} }

View File

@@ -114,8 +114,10 @@ async function generateResponse(provider: AIProvider, prompt: string, providerTy
} }
}); });
const response = await result.response; const response = await result.response;
const text = response.text();
const jsonMatch = text.match(/```json\s*(\{[\s\S]*?\})\s*```/) || text.match(/(\{[\s\S]*\})/);
return { return {
text: response.text(), text: jsonMatch ? jsonMatch[1].trim() : text,
tokens: response.usageMetadata?.totalTokenCount || 0 tokens: response.usageMetadata?.totalTokenCount || 0
}; };
} }

View File

@@ -25,8 +25,10 @@ async function generateResponse(provider: AIProvider, prompt: string, providerTy
} }
}); });
const response = await result.response; const response = await result.response;
const text = response.text();
const jsonMatch = text.match(/```json\s*(\{[\s\S]*?\})\s*```/) || text.match(/(\{[\s\S]*\})/);
return { return {
text: response.text(), text: jsonMatch ? jsonMatch[1].trim() : text,
tokens: response.usageMetadata?.totalTokenCount || 0 tokens: response.usageMetadata?.totalTokenCount || 0
}; };
} }

View File

@@ -105,8 +105,10 @@ async function generateResponse(provider: AIProvider, prompt: string, providerTy
} }
}); });
const response = await result.response; const response = await result.response;
const text = response.text();
const jsonMatch = text.match(/```json\s*(\{[\s\S]*?\})\s*```/) || text.match(/(\{[\s\S]*\})/);
return { return {
text: response.text(), text: jsonMatch ? jsonMatch[1].trim() : text,
tokens: response.usageMetadata?.totalTokenCount || 0 tokens: response.usageMetadata?.totalTokenCount || 0
}; };
} }