diff --git a/src/__tests__/llm-client.test.ts b/src/__tests__/llm-client.test.ts index 4da98c1..8f7db59 100644 --- a/src/__tests__/llm-client.test.ts +++ b/src/__tests__/llm-client.test.ts @@ -1,17 +1,4 @@ import { GoogleAIWrapper, LocalLLMClient } from '../utils/llm-client'; -import { llmClient } from '../config'; - -jest.mock('../config', () => { - let mockConfig = { llmClient: null }; - return { - get llmClient() { - return mockConfig.llmClient; - }, - __setMockConfig: (config: { llmClient: any }) => { - mockConfig = config; - } - }; -}); describe('LLM Client', () => { const originalEnv = process.env; @@ -25,23 +12,19 @@ describe('LLM Client', () => { process.env = originalEnv; }); - it('should use GoogleAIWrapper by default', () => { + it('should use GoogleAIWrapper by default', async () => { process.env.LLM_PROVIDER = 'gemini'; process.env.GEMINI_API_KEY = 'test-key'; - jest.isolateModules(() => { - const { llmClient } = require('../config'); - expect(llmClient).toBeInstanceOf(GoogleAIWrapper); - }); + const { llmClient } = await import('../config'); + expect(llmClient).toBeInstanceOf(GoogleAIWrapper); }); - it('should use LocalLLMClient when configured', () => { + it('should use LocalLLMClient when configured', async () => { process.env.LLM_PROVIDER = 'local'; process.env.LOCAL_LLM_HOSTNAME = 'localhost'; process.env.LOCAL_LLM_PORT = '8000'; process.env.LOCAL_LLM_MODEL = 'test-model'; - jest.isolateModules(() => { - const { llmClient } = require('../config'); - expect(llmClient).toBeInstanceOf(LocalLLMClient); - }); + const { llmClient } = await import('../config'); + expect(llmClient).toBeInstanceOf(LocalLLMClient); }); }); diff --git a/src/tools/__tests__/utils/llm-mock.ts b/src/tools/__tests__/utils/llm-mock.ts index 20fec91..5296ad9 100644 --- a/src/tools/__tests__/utils/llm-mock.ts +++ b/src/tools/__tests__/utils/llm-mock.ts @@ -3,7 +3,7 @@ import { LLMClient, LLMClientConfig } from '../../../utils/llm-client'; export class MockLLMClient implements LLMClient { constructor(private mockResponse: string = '{"queries": ["test query"]}') {} - getGenerativeModel(_config: LLMClientConfig) { + getGenerativeModel(_: LLMClientConfig) { return { generateContent: async () => ({ response: {