refactor: replace Google's Gemini client with OpenAI

- Remove @google/generative-ai dependency
- Add OpenAI client
- Update all tool files to use OpenAI's chat completions API
- Convert schema definitions to use Zod
- Maintain existing temperature configurations
- Add proper type safety with TypeScript

Co-Authored-By: Han Xiao <han.xiao@jina.ai>
This commit is contained in:
Devin AI
2025-02-05 12:00:57 +00:00
parent 2b84a577c8
commit 22c2244225
10 changed files with 673 additions and 261 deletions

445
package-lock.json generated
View File

@@ -9,16 +9,18 @@
"version": "1.0.0",
"license": "ISC",
"dependencies": {
"@google/generative-ai": "^0.21.0",
"@types/cors": "^2.8.17",
"@types/express": "^5.0.0",
"@types/node-fetch": "^2.6.12",
"ai": "^4.1.19",
"axios": "^1.7.9",
"cors": "^2.8.5",
"duck-duck-scrape": "^2.2.7",
"express": "^4.21.2",
"node-fetch": "^3.3.2",
"undici": "^7.3.0"
"openai": "^4.82.0",
"undici": "^7.3.0",
"zod": "^3.24.1"
},
"devDependencies": {
"@types/jest": "^29.5.14",
@@ -33,6 +35,90 @@
"typescript": "^5.7.3"
}
},
"node_modules/@ai-sdk/provider": {
"version": "1.0.7",
"resolved": "https://registry.npmjs.org/@ai-sdk/provider/-/provider-1.0.7.tgz",
"integrity": "sha512-q1PJEZ0qD9rVR+8JFEd01/QM++csMT5UVwYXSN2u54BrVw/D8TZLTeg2FEfKK00DgAx0UtWd8XOhhwITP9BT5g==",
"license": "Apache-2.0",
"dependencies": {
"json-schema": "^0.4.0"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@ai-sdk/provider-utils": {
"version": "2.1.6",
"resolved": "https://registry.npmjs.org/@ai-sdk/provider-utils/-/provider-utils-2.1.6.tgz",
"integrity": "sha512-Pfyaj0QZS22qyVn5Iz7IXcJ8nKIKlu2MeSAdKJzTwkAks7zdLaKVB+396Rqcp1bfQnxl7vaduQVMQiXUrgK8Gw==",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/provider": "1.0.7",
"eventsource-parser": "^3.0.0",
"nanoid": "^3.3.8",
"secure-json-parse": "^2.7.0"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"zod": "^3.0.0"
},
"peerDependenciesMeta": {
"zod": {
"optional": true
}
}
},
"node_modules/@ai-sdk/react": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@ai-sdk/react/-/react-1.1.9.tgz",
"integrity": "sha512-2si293+NYs3WbPfHXSZ4/71NtYV0zxYhhHSL4H1EPyHU9Gf/H81rhjsslvt45mguPecPkMG19/VIXDjJ4uTwsw==",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/provider-utils": "2.1.6",
"@ai-sdk/ui-utils": "1.1.9",
"swr": "^2.2.5",
"throttleit": "2.1.0"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"react": "^18 || ^19 || ^19.0.0-rc",
"zod": "^3.0.0"
},
"peerDependenciesMeta": {
"react": {
"optional": true
},
"zod": {
"optional": true
}
}
},
"node_modules/@ai-sdk/ui-utils": {
"version": "1.1.9",
"resolved": "https://registry.npmjs.org/@ai-sdk/ui-utils/-/ui-utils-1.1.9.tgz",
"integrity": "sha512-o0tDopdtHqgr9FAx0qSkdwPUDSdX+4l42YOn70zvs6+O+PILeTpf2YYV5Xr32TbNfSUq1DWLLhU1O7/3Dsxm1Q==",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/provider": "1.0.7",
"@ai-sdk/provider-utils": "2.1.6",
"zod-to-json-schema": "^3.24.1"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"zod": "^3.0.0"
},
"peerDependenciesMeta": {
"zod": {
"optional": true
}
}
},
"node_modules/@ampproject/remapping": {
"version": "2.3.0",
"resolved": "https://registry.npmjs.org/@ampproject/remapping/-/remapping-2.3.0.tgz",
@@ -685,15 +771,6 @@
"node": "^12.22.0 || ^14.17.0 || >=16.0.0"
}
},
"node_modules/@google/generative-ai": {
"version": "0.21.0",
"resolved": "https://registry.npmjs.org/@google/generative-ai/-/generative-ai-0.21.0.tgz",
"integrity": "sha512-7XhUbtnlkSEZK15kN3t+tzIMxsbKm/dSkKBFalj+20NvPKe1kBY7mR2P7vuijEn+f06z5+A8bVGKO0v39cr6Wg==",
"license": "Apache-2.0",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/@humanwhocodes/config-array": {
"version": "0.13.0",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.13.0.tgz",
@@ -1300,6 +1377,15 @@
"node": ">= 8"
}
},
"node_modules/@opentelemetry/api": {
"version": "1.9.0",
"resolved": "https://registry.npmjs.org/@opentelemetry/api/-/api-1.9.0.tgz",
"integrity": "sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==",
"license": "Apache-2.0",
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/@sinclair/typebox": {
"version": "0.27.8",
"resolved": "https://registry.npmjs.org/@sinclair/typebox/-/typebox-0.27.8.tgz",
@@ -1428,6 +1514,12 @@
"@types/node": "*"
}
},
"node_modules/@types/diff-match-patch": {
"version": "1.0.36",
"resolved": "https://registry.npmjs.org/@types/diff-match-patch/-/diff-match-patch-1.0.36.tgz",
"integrity": "sha512-xFdR6tkm0MWvBfO8xXCSsinYxHcqkQUlcHeSpMC2ukzOb6lwQAfDmW+Qt0AvlGd8HpsS28qKsB+oPeJn9I39jg==",
"license": "MIT"
},
"node_modules/@types/express": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/@types/express/-/express-5.0.0.tgz",
@@ -1788,6 +1880,18 @@
"dev": true,
"license": "ISC"
},
"node_modules/abort-controller": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
"integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
"license": "MIT",
"dependencies": {
"event-target-shim": "^5.0.0"
},
"engines": {
"node": ">=6.5"
}
},
"node_modules/accepts": {
"version": "1.3.8",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-1.3.8.tgz",
@@ -1837,6 +1941,47 @@
"node": ">=0.4.0"
}
},
"node_modules/agentkeepalive": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.6.0.tgz",
"integrity": "sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==",
"license": "MIT",
"dependencies": {
"humanize-ms": "^1.2.1"
},
"engines": {
"node": ">= 8.0.0"
}
},
"node_modules/ai": {
"version": "4.1.19",
"resolved": "https://registry.npmjs.org/ai/-/ai-4.1.19.tgz",
"integrity": "sha512-Xx498vbFVN4Y3F4kWF59ojLyn/d++NbSZwENq1zuSFW4OjwzTf79jtMxD+BYeMiDH+mgIrmROY/ONtqMOchZGw==",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/provider": "1.0.7",
"@ai-sdk/provider-utils": "2.1.6",
"@ai-sdk/react": "1.1.9",
"@ai-sdk/ui-utils": "1.1.9",
"@opentelemetry/api": "1.9.0",
"jsondiffpatch": "0.6.0"
},
"engines": {
"node": ">=18"
},
"peerDependencies": {
"react": "^18 || ^19 || ^19.0.0-rc",
"zod": "^3.0.0"
},
"peerDependenciesMeta": {
"react": {
"optional": true
},
"zod": {
"optional": true
}
}
},
"node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
@@ -2625,6 +2770,15 @@
"node": ">= 0.8"
}
},
"node_modules/dequal": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/dequal/-/dequal-2.0.3.tgz",
"integrity": "sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==",
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/destroy": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/destroy/-/destroy-1.2.0.tgz",
@@ -2655,6 +2809,12 @@
"node": ">=0.3.1"
}
},
"node_modules/diff-match-patch": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/diff-match-patch/-/diff-match-patch-1.0.5.tgz",
"integrity": "sha512-IayShXAgj/QMXgB0IWmKx+rOPuGMhqm5w6jvFxmVenXKIzRqTAAsbBPT3kWQeGANj3jGgvcvv4yK6SxqYmikgw==",
"license": "Apache-2.0"
},
"node_modules/diff-sequences": {
"version": "29.6.3",
"resolved": "https://registry.npmjs.org/diff-sequences/-/diff-sequences-29.6.3.tgz",
@@ -3056,6 +3216,24 @@
"node": ">= 0.6"
}
},
"node_modules/event-target-shim": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
"integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
"license": "MIT",
"engines": {
"node": ">=6"
}
},
"node_modules/eventsource-parser": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-3.0.0.tgz",
"integrity": "sha512-T1C0XCUimhxVQzW4zFipdx0SficT651NnkR0ZSH3yQwh+mFMdLfgjABVi4YtMTtaL4s168593DaoaRLMqryavA==",
"license": "MIT",
"engines": {
"node": ">=18.0.0"
}
},
"node_modules/execa": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/execa/-/execa-5.1.1.tgz",
@@ -3416,6 +3594,34 @@
"node": ">= 6"
}
},
"node_modules/form-data-encoder": {
"version": "1.7.2",
"resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
"integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==",
"license": "MIT"
},
"node_modules/formdata-node": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
"integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
"license": "MIT",
"dependencies": {
"node-domexception": "1.0.0",
"web-streams-polyfill": "4.0.0-beta.3"
},
"engines": {
"node": ">= 12.20"
}
},
"node_modules/formdata-node/node_modules/web-streams-polyfill": {
"version": "4.0.0-beta.3",
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
"integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
"license": "MIT",
"engines": {
"node": ">= 14"
}
},
"node_modules/formdata-polyfill": {
"version": "4.0.10",
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
@@ -3762,6 +3968,15 @@
"node": ">=10.17.0"
}
},
"node_modules/humanize-ms": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
"integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
"license": "MIT",
"dependencies": {
"ms": "^2.0.0"
}
},
"node_modules/iconv-lite": {
"version": "0.6.3",
"resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz",
@@ -4708,6 +4923,12 @@
"dev": true,
"license": "MIT"
},
"node_modules/json-schema": {
"version": "0.4.0",
"resolved": "https://registry.npmjs.org/json-schema/-/json-schema-0.4.0.tgz",
"integrity": "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA==",
"license": "(AFL-2.1 OR BSD-3-Clause)"
},
"node_modules/json-schema-traverse": {
"version": "0.4.1",
"resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz",
@@ -4735,6 +4956,35 @@
"node": ">=6"
}
},
"node_modules/jsondiffpatch": {
"version": "0.6.0",
"resolved": "https://registry.npmjs.org/jsondiffpatch/-/jsondiffpatch-0.6.0.tgz",
"integrity": "sha512-3QItJOXp2AP1uv7waBkao5nCvhEv+QmJAd38Ybq7wNI74Q+BBmnLn4EDKz6yI9xGAIQoUF87qHt+kc1IVxB4zQ==",
"license": "MIT",
"dependencies": {
"@types/diff-match-patch": "^1.0.36",
"chalk": "^5.3.0",
"diff-match-patch": "^1.0.5"
},
"bin": {
"jsondiffpatch": "bin/jsondiffpatch.js"
},
"engines": {
"node": "^18.0.0 || >=20.0.0"
}
},
"node_modules/jsondiffpatch/node_modules/chalk": {
"version": "5.4.1",
"resolved": "https://registry.npmjs.org/chalk/-/chalk-5.4.1.tgz",
"integrity": "sha512-zgVZuo2WcZgfUEmsn6eO3kINexW8RAE4maiQ8QNs8CtpPCSyMiYsULR3HQYkm3w8FIA3SberyMJMSldGsW+U3w==",
"license": "MIT",
"engines": {
"node": "^12.17.0 || ^14.13 || >=16.0.0"
},
"funding": {
"url": "https://github.com/chalk/chalk?sponsor=1"
}
},
"node_modules/keyv": {
"version": "4.5.4",
"resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
@@ -4991,6 +5241,24 @@
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==",
"license": "MIT"
},
"node_modules/nanoid": {
"version": "3.3.8",
"resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.8.tgz",
"integrity": "sha512-WNLf5Sd8oZxOm+TzppcYk8gVOgP+l58xNy58D0nbUnOxOWRWvlcCV4kUF7ltmI6PsrLl/BgKEyS4mqsGChFN0w==",
"funding": [
{
"type": "github",
"url": "https://github.com/sponsors/ai"
}
],
"license": "MIT",
"bin": {
"nanoid": "bin/nanoid.cjs"
},
"engines": {
"node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1"
}
},
"node_modules/natural-compare": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz",
@@ -5156,6 +5424,71 @@
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/openai": {
"version": "4.82.0",
"resolved": "https://registry.npmjs.org/openai/-/openai-4.82.0.tgz",
"integrity": "sha512-1bTxOVGZuVGsKKUWbh3BEwX1QxIXUftJv+9COhhGGVDTFwiaOd4gWsMynF2ewj1mg6by3/O+U8+EEHpWRdPaJg==",
"license": "Apache-2.0",
"dependencies": {
"@types/node": "^18.11.18",
"@types/node-fetch": "^2.6.4",
"abort-controller": "^3.0.0",
"agentkeepalive": "^4.2.1",
"form-data-encoder": "1.7.2",
"formdata-node": "^4.3.2",
"node-fetch": "^2.6.7"
},
"bin": {
"openai": "bin/cli"
},
"peerDependencies": {
"ws": "^8.18.0",
"zod": "^3.23.8"
},
"peerDependenciesMeta": {
"ws": {
"optional": true
},
"zod": {
"optional": true
}
}
},
"node_modules/openai/node_modules/@types/node": {
"version": "18.19.75",
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.75.tgz",
"integrity": "sha512-UIksWtThob6ZVSyxcOqCLOUNg/dyO1Qvx4McgeuhrEtHTLFTf7BBhEazaE4K806FGTPtzd/2sE90qn4fVr7cyw==",
"license": "MIT",
"dependencies": {
"undici-types": "~5.26.4"
}
},
"node_modules/openai/node_modules/node-fetch": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
"integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
"license": "MIT",
"dependencies": {
"whatwg-url": "^5.0.0"
},
"engines": {
"node": "4.x || >=6.0.0"
},
"peerDependencies": {
"encoding": "^0.1.0"
},
"peerDependenciesMeta": {
"encoding": {
"optional": true
}
}
},
"node_modules/openai/node_modules/undici-types": {
"version": "5.26.5",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
"license": "MIT"
},
"node_modules/optionator": {
"version": "0.9.4",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz",
@@ -5579,6 +5912,16 @@
"node": ">=0.10.0"
}
},
"node_modules/react": {
"version": "19.0.0",
"resolved": "https://registry.npmjs.org/react/-/react-19.0.0.tgz",
"integrity": "sha512-V8AVnmPIICiWpGfm6GLzCR/W5FXLchHop40W4nXBmdlEceh16rCN8O8LNWm5bh5XUX91fh7KpA+W0TgMKmgTpQ==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=0.10.0"
}
},
"node_modules/react-is": {
"version": "18.3.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz",
@@ -5744,6 +6087,12 @@
"integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==",
"license": "ISC"
},
"node_modules/secure-json-parse": {
"version": "2.7.0",
"resolved": "https://registry.npmjs.org/secure-json-parse/-/secure-json-parse-2.7.0.tgz",
"integrity": "sha512-6aU+Rwsezw7VR8/nyvKTx8QpWH9FrcYiXXlqC4z5d5XQBDRqtbfsRjnwGyqbi3gddNtWHuEk9OANUotL26qKUw==",
"license": "BSD-3-Clause"
},
"node_modules/semver": {
"version": "7.7.0",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.0.tgz",
@@ -6106,6 +6455,19 @@
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/swr": {
"version": "2.3.1",
"resolved": "https://registry.npmjs.org/swr/-/swr-2.3.1.tgz",
"integrity": "sha512-ALcpdX8Q2WGkuSKrxb1SSGCzoRb3xfkq0SH+AhtF9OXIWIXGSA+uJzGT682UJjqSTC2uN0myJJikFz43ApUPAw==",
"license": "MIT",
"dependencies": {
"dequal": "^2.0.3",
"use-sync-external-store": "^1.4.0"
},
"peerDependencies": {
"react": "^16.11.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
},
"node_modules/test-exclude": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/test-exclude/-/test-exclude-6.0.0.tgz",
@@ -6152,6 +6514,18 @@
"dev": true,
"license": "MIT"
},
"node_modules/throttleit": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/throttleit/-/throttleit-2.1.0.tgz",
"integrity": "sha512-nt6AMGKW1p/70DF/hGBdJB57B8Tspmbp5gfJ8ilhLnt7kkr2ye7hzD6NVG8GGErk2HWF34igrL2CXmNIkzKqKw==",
"license": "MIT",
"engines": {
"node": ">=18"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/tmpl": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/tmpl/-/tmpl-1.0.5.tgz",
@@ -6181,6 +6555,12 @@
"node": ">=0.6"
}
},
"node_modules/tr46": {
"version": "0.0.3",
"resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
"integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==",
"license": "MIT"
},
"node_modules/ts-api-utils": {
"version": "1.4.3",
"resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.4.3.tgz",
@@ -6415,6 +6795,15 @@
"punycode": "^2.1.0"
}
},
"node_modules/use-sync-external-store": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.4.0.tgz",
"integrity": "sha512-9WXSPC5fMv61vaupRkCKCxsPxBocVnwakBEkMIHHpkTTg6icbJtg6jzgtLDm4bl3cSHAca52rYWih0k4K3PfHw==",
"license": "MIT",
"peerDependencies": {
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
}
},
"node_modules/utils-merge": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz",
@@ -6485,6 +6874,22 @@
"node": ">= 8"
}
},
"node_modules/webidl-conversions": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
"integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==",
"license": "BSD-2-Clause"
},
"node_modules/whatwg-url": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
"integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
"license": "MIT",
"dependencies": {
"tr46": "~0.0.3",
"webidl-conversions": "^3.0.0"
}
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
@@ -6618,6 +7023,24 @@
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/zod": {
"version": "3.24.1",
"resolved": "https://registry.npmjs.org/zod/-/zod-3.24.1.tgz",
"integrity": "sha512-muH7gBL9sI1nciMZV67X5fTKKBLtwpZ5VBp1vsOQzj1MhrBZ4wlVCm3gedKZWLp0Oyel8sIGfeiz54Su+OVT+A==",
"license": "MIT",
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/zod-to-json-schema": {
"version": "3.24.1",
"resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.24.1.tgz",
"integrity": "sha512-3h08nf3Vw3Wl3PK+q3ow/lIil81IT2Oa7YpQyUUDsEWbXveMesdfK1xBd2RhCkynwZndAxixji/7SYJJowr62w==",
"license": "ISC",
"peerDependencies": {
"zod": "^3.24.1"
}
}
}
}

View File

@@ -18,16 +18,18 @@
"license": "ISC",
"description": "",
"dependencies": {
"@google/generative-ai": "^0.21.0",
"@types/cors": "^2.8.17",
"@types/express": "^5.0.0",
"@types/node-fetch": "^2.6.12",
"ai": "^4.1.19",
"axios": "^1.7.9",
"cors": "^2.8.5",
"duck-duck-scrape": "^2.2.7",
"express": "^4.21.2",
"node-fetch": "^3.3.2",
"undici": "^7.3.0"
"openai": "^4.82.0",
"undici": "^7.3.0",
"zod": "^3.24.1"
},
"devDependencies": {
"@types/jest": "^29.5.14",

View File

@@ -1,4 +1,4 @@
import {GoogleGenerativeAI, SchemaType} from "@google/generative-ai";
import OpenAI from 'openai';
import {readUrl} from "./tools/read";
import fs from 'fs/promises';
import {SafeSearchType, search as duckSearch} from "duck-duck-scrape";
@@ -7,100 +7,71 @@ import {rewriteQuery} from "./tools/query-rewriter";
import {dedupQueries} from "./tools/dedup";
import {evaluateAnswer} from "./tools/evaluator";
import {analyzeSteps} from "./tools/error-analyzer";
import {GEMINI_API_KEY, SEARCH_PROVIDER, STEP_SLEEP, modelConfigs} from "./config";
import {OPENAI_API_KEY, SEARCH_PROVIDER, STEP_SLEEP, modelConfigs} from "./config";
import { z } from 'zod';
import {TokenTracker} from "./utils/token-tracker";
import {ActionTracker} from "./utils/action-tracker";
import {StepAction, SchemaProperty, ResponseSchema, AnswerAction} from "./types";
import {TrackerContext} from "./types";
import {jinaSearch} from "./tools/jinaSearch";
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
async function sleep(ms: number) {
const seconds = Math.ceil(ms / 1000);
console.log(`Waiting ${seconds}s...`);
return new Promise(resolve => setTimeout(resolve, ms));
}
function getSchema(allowReflect: boolean, allowRead: boolean, allowAnswer: boolean, allowSearch: boolean): ResponseSchema {
function getSchema(allowReflect: boolean, allowRead: boolean, allowAnswer: boolean, allowSearch: boolean) {
const actions: string[] = [];
const properties: Record<string, SchemaProperty> = {
action: {
type: SchemaType.STRING,
enum: actions,
description: "Must match exactly one action type"
},
think: {
type: SchemaType.STRING,
description: "Explain why choose this action, what's the thought process behind choosing this action"
}
};
let schema = z.object({
action: z.enum([]).describe("Must match exactly one action type"),
think: z.string().describe("Explain why choose this action, what's the thought process behind choosing this action")
});
if (allowSearch) {
actions.push("search");
properties.searchQuery = {
type: SchemaType.STRING,
description: "Only required when choosing 'search' action, must be a short, keyword-based query that BM25, tf-idf based search engines can understand."
};
schema = schema.extend({
searchQuery: z.string().describe("Only required when choosing 'search' action, must be a short, keyword-based query that BM25, tf-idf based search engines can understand.")
});
}
if (allowAnswer) {
actions.push("answer");
properties.answer = {
type: SchemaType.STRING,
description: "Only required when choosing 'answer' action, must be the final answer in natural language"
};
properties.references = {
type: SchemaType.ARRAY,
items: {
type: SchemaType.OBJECT,
properties: {
exactQuote: {
type: SchemaType.STRING,
description: "Exact relevant quote from the document"
},
url: {
type: SchemaType.STRING,
description: "URL of the document; must be directly from the context"
}
},
required: ["exactQuote", "url"]
},
description: "Must be an array of references that support the answer, each reference must contain an exact quote and the URL of the document"
};
schema = schema.extend({
answer: z.string().describe("Only required when choosing 'answer' action, must be the final answer in natural language"),
references: z.array(
z.object({
exactQuote: z.string().describe("Exact relevant quote from the document"),
url: z.string().describe("URL of the document; must be directly from the context")
})
).describe("Must be an array of references that support the answer, each reference must contain an exact quote and the URL of the document")
});
}
if (allowReflect) {
actions.push("reflect");
properties.questionsToAnswer = {
type: SchemaType.ARRAY,
items: {
type: SchemaType.STRING,
description: "each question must be a single line, concise and clear. not composite or compound, less than 20 words."
},
description: "List of most important questions to fill the knowledge gaps of finding the answer to the original question",
maxItems: 2
};
schema = schema.extend({
questionsToAnswer: z.array(
z.string().describe("each question must be a single line, concise and clear. not composite or compound, less than 20 words.")
).max(2).describe("List of most important questions to fill the knowledge gaps of finding the answer to the original question")
});
}
if (allowRead) {
actions.push("visit");
properties.URLTargets = {
type: SchemaType.ARRAY,
items: {
type: SchemaType.STRING
},
maxItems: 2,
description: "Must be an array of URLs, choose up the most relevant 2 URLs to visit"
};
schema = schema.extend({
URLTargets: z.array(z.string())
.max(2)
.describe("Must be an array of URLs, choose up the most relevant 2 URLs to visit")
});
}
// Update the enum values after collecting all actions
properties.action.enum = actions;
return {
type: SchemaType.OBJECT,
properties,
required: ["action", "think"]
};
return schema.extend({
action: z.enum(actions as [string, ...string[]]).describe("Must match exactly one action type")
}).shape;
}
function getPrompt(
@@ -356,22 +327,24 @@ export async function getResponse(question: string, tokenBudget: number = 1_000_
false
);
const model = genAI.getGenerativeModel({
const result = await openai.chat.completions.create({
messages: [{ role: 'user', content: prompt }],
model: modelConfigs.agent.model,
generationConfig: {
temperature: modelConfigs.agent.temperature,
responseMimeType: "application/json",
responseSchema: getSchema(allowReflect, allowRead, allowAnswer, allowSearch)
}
temperature: modelConfigs.agent.temperature,
max_tokens: 1000,
functions: [{
name: 'generate',
parameters: getSchema(allowReflect, allowRead, allowAnswer, allowSearch)
}],
function_call: { name: 'generate' }
});
const result = await model.generateContent(prompt);
const response = await result.response;
const usage = response.usageMetadata;
context.tokenTracker.trackUsage('agent', usage?.totalTokenCount || 0);
const functionCall = result.choices[0].message.function_call;
const responseData = functionCall ? JSON.parse(functionCall.arguments) as StepAction : null;
if (!responseData) throw new Error('No valid response generated');
thisStep = JSON.parse(response.text());
context.tokenTracker.trackUsage('agent', result.usage.total_tokens);
thisStep = responseData;
// print allowed and chose action
const actionsStr = [allowSearch, allowRead, allowAnswer, allowReflect].map((a, i) => a ? ['search', 'read', 'answer', 'reflect'][i] : null).filter(a => a).join(', ');
console.log(`${thisStep.action} <- [${actionsStr}]`);
@@ -699,22 +672,25 @@ You decided to think out of the box or cut from a completely different angle.`);
true
);
const model = genAI.getGenerativeModel({
const result = await openai.chat.completions.create({
messages: [{ role: 'user', content: prompt }],
model: modelConfigs.agentBeastMode.model,
generationConfig: {
temperature: modelConfigs.agentBeastMode.temperature,
responseMimeType: "application/json",
responseSchema: getSchema(false, false, allowAnswer, false)
}
temperature: modelConfigs.agentBeastMode.temperature,
max_tokens: 1000,
functions: [{
name: 'generate',
parameters: getSchema(false, false, allowAnswer, false)
}],
function_call: { name: 'generate' }
});
const result = await model.generateContent(prompt);
const response = await result.response;
const usage = response.usageMetadata;
context.tokenTracker.trackUsage('agent', usage?.totalTokenCount || 0);
const functionCall = result.choices[0].message.function_call;
const responseData = functionCall ? JSON.parse(functionCall.arguments) as StepAction : null;
if (!responseData) throw new Error('No valid response generated');
context.tokenTracker.trackUsage('agent', result.usage.total_tokens);
await storeContext(prompt, [allContext, allKeywords, allQuestions, allKnowledge], totalStep);
thisStep = JSON.parse(response.text());
thisStep = responseData;
console.log(thisStep)
return {result: thisStep, context};
}
@@ -733,7 +709,7 @@ async function storeContext(prompt: string, memory: any[][], step: number) {
}
}
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
// OpenAI client is initialized at the top of the file
export async function main() {

View File

@@ -29,12 +29,12 @@ if (process.env.https_proxy) {
}
}
export const GEMINI_API_KEY = process.env.GEMINI_API_KEY as string;
export const OPENAI_API_KEY = process.env.OPENAI_API_KEY as string;
export const JINA_API_KEY = process.env.JINA_API_KEY as string;
export const BRAVE_API_KEY = process.env.BRAVE_API_KEY as string;
export const SEARCH_PROVIDER: 'brave' | 'jina' | 'duck' = 'jina'
const DEFAULT_MODEL = 'gemini-1.5-flash';
const DEFAULT_MODEL = 'gpt-4';
const defaultConfig: ModelConfig = {
model: DEFAULT_MODEL,
@@ -68,5 +68,5 @@ export const modelConfigs: ToolConfigs = {
export const STEP_SLEEP = 1000;
if (!GEMINI_API_KEY) throw new Error("GEMINI_API_KEY not found");
if (!OPENAI_API_KEY) throw new Error("OPENAI_API_KEY not found");
if (!JINA_API_KEY) throw new Error("JINA_API_KEY not found");

View File

@@ -1,36 +1,16 @@
import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
import { GEMINI_API_KEY, modelConfigs } from "../config";
import OpenAI from 'openai';
import { OPENAI_API_KEY, modelConfigs } from "../config";
import { TokenTracker } from "../utils/token-tracker";
import { DedupResponse } from '../types';
import { z } from 'zod';
const responseSchema = {
type: SchemaType.OBJECT,
properties: {
think: {
type: SchemaType.STRING,
description: "Strategic reasoning about the overall deduplication approach"
},
unique_queries: {
type: SchemaType.ARRAY,
items: {
type: SchemaType.STRING,
description: "Unique query that passed the deduplication process, must be less than 30 characters"
},
description: "Array of semantically unique queries"
}
},
required: ["think", "unique_queries"]
};
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
const model = genAI.getGenerativeModel({
model: modelConfigs.dedup.model,
generationConfig: {
temperature: modelConfigs.dedup.temperature,
responseMimeType: "application/json",
responseSchema: responseSchema
}
const responseSchema = z.object({
think: z.string().describe("Strategic reasoning about the overall deduplication approach"),
unique_queries: z.array(
z.string().describe("Unique query that passed the deduplication process, must be less than 30 characters")
).describe("Array of semantically unique queries")
});
function getPrompt(newQueries: string[], existingQueries: string[]): string {
@@ -88,14 +68,26 @@ SetB: ${JSON.stringify(existingQueries)}`;
export async function dedupQueries(newQueries: string[], existingQueries: string[], tracker?: TokenTracker): Promise<{ unique_queries: string[], tokens: number }> {
try {
const prompt = getPrompt(newQueries, existingQueries);
const result = await model.generateContent(prompt);
const response = await result.response;
const usage = response.usageMetadata;
const json = JSON.parse(response.text()) as DedupResponse;
console.log('Dedup:', json.unique_queries);
const tokens = usage?.totalTokenCount || 0;
const result = await openai.chat.completions.create({
messages: [{ role: 'user', content: prompt }],
model: modelConfigs.dedup.model,
temperature: modelConfigs.dedup.temperature,
max_tokens: 1000,
functions: [{
name: 'generate',
parameters: responseSchema.shape
}],
function_call: { name: 'generate' }
});
const functionCall = result.choices[0].message.function_call;
const responseData = functionCall ? JSON.parse(functionCall.arguments) as DedupResponse : null;
if (!responseData) throw new Error('No valid response generated');
console.log('Dedup:', responseData.unique_queries);
const tokens = result.usage.total_tokens;
(tracker || new TokenTracker()).trackUsage('dedup', tokens);
return { unique_queries: json.unique_queries, tokens };
return { unique_queries: responseData.unique_queries, tokens };
} catch (error) {
console.error('Error in deduplication analysis:', error);
throw error;

View File

@@ -1,36 +1,15 @@
import {GoogleGenerativeAI, SchemaType} from "@google/generative-ai";
import { GEMINI_API_KEY, modelConfigs } from "../config";
import OpenAI from 'openai';
import { OPENAI_API_KEY, modelConfigs } from "../config";
import { TokenTracker } from "../utils/token-tracker";
import { ErrorAnalysisResponse } from '../types';
import { z } from 'zod';
const responseSchema = {
type: SchemaType.OBJECT,
properties: {
recap: {
type: SchemaType.STRING,
description: "Recap of the actions taken and the steps conducted"
},
blame: {
type: SchemaType.STRING,
description: "Which action or the step was the root cause of the answer rejection"
},
improvement: {
type: SchemaType.STRING,
description: "Suggested key improvement for the next iteration, do not use bullet points, be concise and hot-take vibe."
}
},
required: ["recap", "blame", "improvement"]
};
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
const model = genAI.getGenerativeModel({
model: modelConfigs.errorAnalyzer.model,
generationConfig: {
temperature: modelConfigs.errorAnalyzer.temperature,
responseMimeType: "application/json",
responseSchema: responseSchema
}
const responseSchema = z.object({
recap: z.string().describe("Recap of the actions taken and the steps conducted"),
blame: z.string().describe("Which action or the step was the root cause of the answer rejection"),
improvement: z.string().describe("Suggested key improvement for the next iteration, do not use bullet points, be concise and hot-take vibe.")
});
function getPrompt(diaryContext: string[]): string {
@@ -124,17 +103,30 @@ ${diaryContext.join('\n')}
export async function analyzeSteps(diaryContext: string[], tracker?: TokenTracker): Promise<{ response: ErrorAnalysisResponse, tokens: number }> {
try {
const prompt = getPrompt(diaryContext);
const result = await model.generateContent(prompt);
const response = await result.response;
const usage = response.usageMetadata;
const json = JSON.parse(response.text()) as ErrorAnalysisResponse;
console.log('Error analysis:', {
is_valid: !json.blame,
reason: json.blame || 'No issues found'
const result = await openai.chat.completions.create({
messages: [{ role: 'user', content: prompt }],
model: modelConfigs.errorAnalyzer.model,
temperature: modelConfigs.errorAnalyzer.temperature,
max_tokens: 1000,
functions: [{
name: 'generate',
parameters: responseSchema.shape
}],
function_call: { name: 'generate' }
});
const tokens = usage?.totalTokenCount || 0;
const functionCall = result.choices[0].message.function_call;
const responseData = functionCall ? JSON.parse(functionCall.arguments) as ErrorAnalysisResponse : null;
if (!responseData) throw new Error('No valid response generated');
console.log('Error analysis:', {
is_valid: !responseData.blame,
reason: responseData.blame || 'No issues found'
});
const tokens = result.usage.total_tokens;
(tracker || new TokenTracker()).trackUsage('error-analyzer', tokens);
return { response: json, tokens };
return { response: responseData, tokens };
} catch (error) {
console.error('Error in answer evaluation:', error);
throw error;

View File

@@ -1,32 +1,14 @@
import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
import { GEMINI_API_KEY, modelConfigs } from "../config";
import OpenAI from 'openai';
import { OPENAI_API_KEY, modelConfigs } from "../config";
import { TokenTracker } from "../utils/token-tracker";
import { EvaluationResponse } from '../types';
import { z } from 'zod';
const responseSchema = {
type: SchemaType.OBJECT,
properties: {
is_definitive: {
type: SchemaType.BOOLEAN,
description: "Whether the answer provides a definitive response without uncertainty or 'I don't know' type statements"
},
reasoning: {
type: SchemaType.STRING,
description: "Explanation of why the answer is or isn't definitive"
}
},
required: ["is_definitive", "reasoning"]
};
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
const model = genAI.getGenerativeModel({
model: modelConfigs.evaluator.model,
generationConfig: {
temperature: modelConfigs.evaluator.temperature,
responseMimeType: "application/json",
responseSchema: responseSchema
}
const responseSchema = z.object({
is_definitive: z.boolean().describe("Whether the answer provides a definitive response without uncertainty or 'I don't know' type statements"),
reasoning: z.string().describe("Explanation of why the answer is or isn't definitive")
});
function getPrompt(question: string, answer: string): string {
@@ -66,17 +48,30 @@ Answer: ${JSON.stringify(answer)}`;
export async function evaluateAnswer(question: string, answer: string, tracker?: TokenTracker): Promise<{ response: EvaluationResponse, tokens: number }> {
try {
const prompt = getPrompt(question, answer);
const result = await model.generateContent(prompt);
const response = await result.response;
const usage = response.usageMetadata;
const json = JSON.parse(response.text()) as EvaluationResponse;
console.log('Evaluation:', {
definitive: json.is_definitive,
reason: json.reasoning
const result = await openai.chat.completions.create({
messages: [{ role: 'user', content: prompt }],
model: modelConfigs.evaluator.model,
temperature: modelConfigs.evaluator.temperature,
max_tokens: 1000,
functions: [{
name: 'generate',
parameters: responseSchema.shape
}],
function_call: { name: 'generate' }
});
const tokens = usage?.totalTokenCount || 0;
const functionCall = result.choices[0].message.function_call;
const responseData = functionCall ? JSON.parse(functionCall.arguments) as EvaluationResponse : null;
if (!responseData) throw new Error('No valid response generated');
console.log('Evaluation:', {
definitive: responseData.is_definitive,
reason: responseData.reasoning
});
const tokens = result.usage.total_tokens;
(tracker || new TokenTracker()).trackUsage('evaluator', tokens);
return { response: json, tokens };
return { response: responseData, tokens };
} catch (error) {
console.error('Error in answer evaluation:', error);
throw error;

View File

@@ -1,39 +1,18 @@
import { GoogleGenerativeAI, SchemaType } from "@google/generative-ai";
import { GEMINI_API_KEY, modelConfigs } from "../config";
import OpenAI from 'openai';
import { OPENAI_API_KEY, modelConfigs } from "../config";
import { TokenTracker } from "../utils/token-tracker";
import { SearchAction } from "../types";
import { SearchAction, KeywordsResponse } from "../types";
import { z } from 'zod';
import { KeywordsResponse } from '../types';
const openai = new OpenAI({ apiKey: OPENAI_API_KEY });
const responseSchema = {
type: SchemaType.OBJECT,
properties: {
think: {
type: SchemaType.STRING,
description: "Strategic reasoning about query complexity and search approach"
},
queries: {
type: SchemaType.ARRAY,
items: {
type: SchemaType.STRING,
description: "Search query, must be less than 30 characters"
},
description: "Array of search queries, orthogonal to each other",
minItems: 1,
maxItems: 3
}
},
required: ["think", "queries"]
};
const genAI = new GoogleGenerativeAI(GEMINI_API_KEY);
const model = genAI.getGenerativeModel({
model: modelConfigs.queryRewriter.model,
generationConfig: {
temperature: modelConfigs.queryRewriter.temperature,
responseMimeType: "application/json",
responseSchema: responseSchema
}
const responseSchema = z.object({
think: z.string().describe("Strategic reasoning about query complexity and search approach"),
queries: z.array(
z.string().describe("Search query, must be less than 30 characters")
).describe("Array of search queries, orthogonal to each other")
.min(1)
.max(3)
});
function getPrompt(action: SearchAction): string {
@@ -115,18 +94,29 @@ Intention: ${action.think}
export async function rewriteQuery(action: SearchAction, tracker?: TokenTracker): Promise<{ queries: string[], tokens: number }> {
try {
const prompt = getPrompt(action);
const result = await model.generateContent(prompt);
const response = await result.response;
const usage = response.usageMetadata;
const json = JSON.parse(response.text()) as KeywordsResponse;
const result = await openai.chat.completions.create({
messages: [{ role: 'user', content: prompt }],
model: modelConfigs.queryRewriter.model,
temperature: modelConfigs.queryRewriter.temperature,
max_tokens: 1000,
functions: [{
name: 'generate',
parameters: responseSchema.shape
}],
function_call: { name: 'generate' }
});
console.log('Query rewriter:', json.queries);
const tokens = usage?.totalTokenCount || 0;
const functionCall = result.choices[0].message.function_call;
const responseData = functionCall ? JSON.parse(functionCall.arguments) as KeywordsResponse : null;
if (!responseData) throw new Error('No valid response generated');
console.log('Query rewriter:', responseData.queries);
const tokens = result.usage.total_tokens;
(tracker || new TokenTracker()).trackUsage('query-rewriter', tokens);
return { queries: json.queries, tokens };
return { queries: responseData.queries, tokens };
} catch (error) {
console.error('Error in query rewriting:', error);
throw error;
}
}
}

View File

@@ -1,4 +1,9 @@
import { SchemaType } from "@google/generative-ai";
export enum SchemaType {
STRING = 'STRING',
BOOLEAN = 'BOOLEAN',
ARRAY = 'ARRAY',
OBJECT = 'OBJECT'
}
// Action Types
type BaseAction = {
@@ -122,14 +127,9 @@ export type KeywordsResponse = {
// Schema Types
export type SchemaProperty = {
type: SchemaType;
description: string;
description?: string;
enum?: string[];
items?: {
type: SchemaType;
description?: string;
properties?: Record<string, SchemaProperty>;
required?: string[];
};
items?: SchemaProperty;
properties?: Record<string, SchemaProperty>;
required?: string[];
maxItems?: number;

42
src/utils/schema.ts Normal file
View File

@@ -0,0 +1,42 @@
import { z } from 'zod';
import { SchemaType } from '../types';
import type { SchemaProperty, ResponseSchema } from '../types';
export function convertToZodType(prop: SchemaProperty): z.ZodTypeAny {
switch (prop.type) {
case SchemaType.STRING:
return z.string().describe(prop.description || '');
case SchemaType.BOOLEAN:
return z.boolean().describe(prop.description || '');
case SchemaType.ARRAY:
if (!prop.items) throw new Error('Array schema must have items defined');
return z.array(convertToZodType(prop.items)).describe(prop.description || '');
case SchemaType.OBJECT:
if (!prop.properties) throw new Error('Object schema must have properties defined');
const shape: Record<string, z.ZodTypeAny> = {};
for (const [key, value] of Object.entries(prop.properties)) {
shape[key] = convertToZodType(value);
}
return z.object(shape).describe(prop.description || '');
default:
throw new Error(`Unsupported schema type: ${prop.type}`);
}
}
export function createZodSchema(schema: ResponseSchema): z.ZodObject<any> {
const shape: Record<string, z.ZodTypeAny> = {};
for (const [key, prop] of Object.entries(schema.properties)) {
shape[key] = convertToZodType(prop);
}
return z.object(shape);
}
export function createPromptConfig(temperature: number = 0) {
return {
temperature,
max_tokens: 1000,
top_p: 1,
frequency_penalty: 0,
presence_penalty: 0
};
}