This commit is contained in:
commit
03f2c49b7e
3
.env.development
Normal file
3
.env.development
Normal file
@ -0,0 +1,3 @@
|
||||
VITE_BASE_URL=/api
|
||||
|
||||
VITE_PROXY_URL=https://dev.eigent.ai
|
||||
11
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
11
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
---
|
||||
|
||||
name: 🐞 Bug report
|
||||
about: Create a report to help us improve
|
||||
title: "[Bug] the title of bug report"
|
||||
labels: bug
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
#### Describe the bug
|
||||
10
.github/ISSUE_TEMPLATE/help_wanted.md
vendored
Normal file
10
.github/ISSUE_TEMPLATE/help_wanted.md
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
---
|
||||
name: 🥺 Help wanted
|
||||
about: Confuse about the use of electron-vue-vite
|
||||
title: "[Help] the title of help wanted report"
|
||||
labels: help wanted
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
#### Describe the problem you confuse
|
||||
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
12
.github/PULL_REQUEST_TEMPLATE.md
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
<!-- Thank you for contributing! -->
|
||||
|
||||
### Description
|
||||
|
||||
<!-- Please insert your description here and provide especially info about the "what" this PR is solving -->
|
||||
|
||||
### What is the purpose of this pull request? <!-- (put an "X" next to an item) -->
|
||||
|
||||
- [ ] Bug fix
|
||||
- [ ] New Feature
|
||||
- [ ] Documentation update
|
||||
- [ ] Other
|
||||
11
.github/dependabot.yml
vendored
Normal file
11
.github/dependabot.yml
vendored
Normal file
@ -0,0 +1,11 @@
|
||||
# To get started with Dependabot version updates, you'll need to specify which
|
||||
# package ecosystems to update and where the package manifests are located.
|
||||
# Please see the documentation for all configuration options:
|
||||
# https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates
|
||||
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: "npm" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
64
.github/workflows/build.yml
vendored
Normal file
64
.github/workflows/build.yml
vendored
Normal file
@ -0,0 +1,64 @@
|
||||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v*"
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.spec.js"
|
||||
- ".idea"
|
||||
- ".vscode"
|
||||
- ".dockerignore"
|
||||
- "Dockerfile"
|
||||
- ".gitignore"
|
||||
- ".github/**"
|
||||
- "!.github/workflows/build.yml"
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-latest, macos-13, windows-latest]
|
||||
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18
|
||||
|
||||
- name: Install Dependencies
|
||||
run: npm install
|
||||
|
||||
- name: Build Release Files
|
||||
run: npm run build
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Upload Artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release_on_${{ matrix.os }}
|
||||
path: release/
|
||||
retention-days: 5
|
||||
|
||||
- name: Create Release
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: softprops/action-gh-release@v1
|
||||
with:
|
||||
files: |
|
||||
release/*.exe
|
||||
release/*.dmg
|
||||
release/*.zip
|
||||
draft: false
|
||||
prerelease: false
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
81
.github/workflows/ci.yml
vendored
Normal file
81
.github/workflows/ci.yml
vendored
Normal file
@ -0,0 +1,81 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
pull_request_target:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
pull-requests: write
|
||||
|
||||
jobs:
|
||||
job1:
|
||||
name: Check Not Allowed File Changes
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
markdown_change: ${{ steps.filter_markdown.outputs.change }}
|
||||
markdown_files: ${{ steps.filter_markdown.outputs.change_files }}
|
||||
steps:
|
||||
|
||||
- name: Check Not Allowed File Changes
|
||||
uses: dorny/paths-filter@v2
|
||||
id: filter_not_allowed
|
||||
with:
|
||||
list-files: json
|
||||
filters: |
|
||||
change:
|
||||
- 'package-lock.json'
|
||||
- 'yarn.lock'
|
||||
- 'pnpm-lock.yaml'
|
||||
|
||||
# ref: https://github.com/github/docs/blob/main/.github/workflows/triage-unallowed-contributions.yml
|
||||
- name: Comment About Changes We Can't Accept
|
||||
if: ${{ steps.filter_not_allowed.outputs.change == 'true' }}
|
||||
uses: actions/github-script@v6
|
||||
with:
|
||||
script: |
|
||||
let workflowFailMessage = "It looks like you've modified some files that we can't accept as contributions."
|
||||
try {
|
||||
const badFilesArr = [
|
||||
'package-lock.json',
|
||||
'yarn.lock',
|
||||
'pnpm-lock.yaml',
|
||||
]
|
||||
const badFiles = badFilesArr.join('\n- ')
|
||||
const reviewMessage = `👋 Hey there spelunker. It looks like you've modified some files that we can't accept as contributions. The complete list of files we can't accept are:\n- ${badFiles}\n\nYou'll need to revert all of the files you changed in that list using [GitHub Desktop](https://docs.github.com/en/free-pro-team@latest/desktop/contributing-and-collaborating-using-github-desktop/managing-commits/reverting-a-commit) or \`git checkout origin/main <file name>\`. Once you get those files reverted, we can continue with the review process. :octocat:\n\nMore discussion:\n- https://github.com/electron-vite/electron-vite-vue/issues/192`
|
||||
createdComment = await github.rest.issues.createComment({
|
||||
owner: context.repo.owner,
|
||||
repo: context.repo.repo,
|
||||
issue_number: context.payload.number,
|
||||
body: reviewMessage,
|
||||
})
|
||||
workflowFailMessage = `${workflowFailMessage} Please see ${createdComment.data.html_url} for details.`
|
||||
} catch(err) {
|
||||
console.log("Error creating comment.", err)
|
||||
}
|
||||
core.setFailed(workflowFailMessage)
|
||||
|
||||
- name: Check Not Linted Markdown
|
||||
if: ${{ always() }}
|
||||
uses: dorny/paths-filter@v2
|
||||
id: filter_markdown
|
||||
with:
|
||||
list-files: shell
|
||||
filters: |
|
||||
change:
|
||||
- added|modified: '*.md'
|
||||
|
||||
|
||||
job2:
|
||||
name: Lint Markdown
|
||||
runs-on: ubuntu-latest
|
||||
needs: job1
|
||||
if: ${{ always() && needs.job1.outputs.markdown_change == 'true' }}
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.pull_request.head.sha }}
|
||||
|
||||
- name: Lint markdown
|
||||
run: npx markdownlint-cli ${{ needs.job1.outputs.markdown_files }} --ignore node_modules
|
||||
27
.github/workflows/remove-old-artifacts.yml
vendored
Normal file
27
.github/workflows/remove-old-artifacts.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
name: Remove old artifacts
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Every day at 1am
|
||||
- cron: '0 1 * * *'
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
remove-old-artifacts:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 10
|
||||
|
||||
# For private repos
|
||||
permissions:
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
- name: Remove old artifacts
|
||||
uses: c-hive/gha-remove-artifacts@v1
|
||||
with:
|
||||
age: '2 days' # '<number> <unit>', e.g. 5 days, 2 years, 90 seconds, parsed by Moment.js
|
||||
# Optional inputs
|
||||
# skip-tags: true
|
||||
# skip-recent: 5
|
||||
43
.gitignore
vendored
Normal file
43
.gitignore
vendored
Normal file
@ -0,0 +1,43 @@
|
||||
# Logs
|
||||
logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
pnpm-debug.log*
|
||||
lerna-debug.log*
|
||||
|
||||
node_modules
|
||||
dist
|
||||
!package/**/dist
|
||||
dist-ssr
|
||||
dist-electron
|
||||
release
|
||||
*.local
|
||||
|
||||
# Editor directories and files
|
||||
.vscode/.debug.env
|
||||
.idea
|
||||
.DS_Store
|
||||
*.suo
|
||||
*.ntvs*
|
||||
*.njsproj
|
||||
*.sln
|
||||
*.sw?
|
||||
|
||||
#lockfile
|
||||
package-lock.json
|
||||
pnpm-lock.yaml
|
||||
yarn.lock
|
||||
/test-results/
|
||||
/playwright-report/
|
||||
/playwright/.cache/
|
||||
|
||||
.env
|
||||
.env.local
|
||||
.env.production
|
||||
|
||||
.cursor
|
||||
|
||||
# Public directory (large media files)
|
||||
public/
|
||||
6
.npmrc
Normal file
6
.npmrc
Normal file
@ -0,0 +1,6 @@
|
||||
# For electron-builder
|
||||
# https://github.com/electron-userland/electron-builder/issues/6289#issuecomment-1042620422
|
||||
shamefully-hoist=true
|
||||
|
||||
# For China 🇨🇳 developers
|
||||
# electron_mirror=https://npmmirror.com/mirrors/electron/
|
||||
23
.vscode/.debug.script.mjs
vendored
Normal file
23
.vscode/.debug.script.mjs
vendored
Normal file
@ -0,0 +1,23 @@
|
||||
import fs from 'node:fs'
|
||||
import path from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
import { createRequire } from 'node:module'
|
||||
import { spawn } from 'node:child_process'
|
||||
|
||||
const pkg = createRequire(import.meta.url)('../package.json')
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url))
|
||||
|
||||
// write .debug.env
|
||||
const envContent = Object.entries(pkg.debug.env).map(([key, val]) => `${key}=${val}`)
|
||||
fs.writeFileSync(path.join(__dirname, '.debug.env'), envContent.join('\n'))
|
||||
|
||||
// bootstrap
|
||||
spawn(
|
||||
// TODO: terminate `npm run dev` when Debug exits.
|
||||
process.platform === 'win32' ? 'npm.cmd' : 'npm',
|
||||
['run', 'dev'],
|
||||
{
|
||||
stdio: 'inherit',
|
||||
env: Object.assign(process.env, { VSCODE_DEBUG: 'true' }),
|
||||
},
|
||||
)
|
||||
7
.vscode/extensions.json
vendored
Normal file
7
.vscode/extensions.json
vendored
Normal file
@ -0,0 +1,7 @@
|
||||
{
|
||||
// See http://go.microsoft.com/fwlink/?LinkId=827846
|
||||
// for the documentation about the extensions.json format
|
||||
"recommendations": [
|
||||
"mrmlnc.vscode-json5"
|
||||
]
|
||||
}
|
||||
54
.vscode/launch.json
vendored
Normal file
54
.vscode/launch.json
vendored
Normal file
@ -0,0 +1,54 @@
|
||||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"compounds": [
|
||||
{
|
||||
"name": "Debug App",
|
||||
"preLaunchTask": "Before Debug",
|
||||
"configurations": [
|
||||
"Debug Main Process",
|
||||
"Debug Renderer Process"
|
||||
],
|
||||
"presentation": {
|
||||
"hidden": false,
|
||||
"group": "",
|
||||
"order": 1
|
||||
},
|
||||
"stopAll": true
|
||||
}
|
||||
],
|
||||
"configurations": [
|
||||
{
|
||||
"name": "Debug Main Process",
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"runtimeExecutable": "${workspaceRoot}/node_modules/.bin/electron",
|
||||
"windows": {
|
||||
"runtimeExecutable": "${workspaceRoot}/node_modules/.bin/electron.cmd"
|
||||
},
|
||||
"runtimeArgs": [
|
||||
"--no-sandbox",
|
||||
"--remote-debugging-port=9229",
|
||||
"."
|
||||
],
|
||||
"envFile": "${workspaceFolder}/.vscode/.debug.env",
|
||||
"console": "integratedTerminal"
|
||||
},
|
||||
{
|
||||
"name": "Debug Renderer Process",
|
||||
"port": 9229,
|
||||
"request": "attach",
|
||||
"type": "chrome",
|
||||
"timeout": 60000,
|
||||
"skipFiles": [
|
||||
"<node_internals>/**",
|
||||
"${workspaceRoot}/node_modules/**",
|
||||
"${workspaceRoot}/dist-electron/**",
|
||||
// Skip files in host(VITE_DEV_SERVER_URL)
|
||||
"http://127.0.0.1:7777/**"
|
||||
]
|
||||
},
|
||||
]
|
||||
}
|
||||
16
.vscode/settings.json
vendored
Normal file
16
.vscode/settings.json
vendored
Normal file
@ -0,0 +1,16 @@
|
||||
{
|
||||
"typescript.tsdk": "node_modules/typescript/lib",
|
||||
"typescript.tsc.autoDetect": "off",
|
||||
"json.schemas": [
|
||||
{
|
||||
"fileMatch": [
|
||||
"/*electron-builder.json5",
|
||||
"/*electron-builder.json"
|
||||
],
|
||||
"url": "https://json.schemastore.org/electron-builder"
|
||||
}
|
||||
],
|
||||
"cSpell.words": [
|
||||
"Eigent"
|
||||
]
|
||||
}
|
||||
31
.vscode/tasks.json
vendored
Normal file
31
.vscode/tasks.json
vendored
Normal file
@ -0,0 +1,31 @@
|
||||
{
|
||||
// See https://go.microsoft.com/fwlink/?LinkId=733558
|
||||
// for the documentation about the tasks.json format
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"label": "Before Debug",
|
||||
"type": "shell",
|
||||
"command": "node .vscode/.debug.script.mjs",
|
||||
"isBackground": true,
|
||||
"problemMatcher": {
|
||||
"owner": "typescript",
|
||||
"fileLocation": "relative",
|
||||
"pattern": {
|
||||
// TODO: correct "regexp"
|
||||
"regexp": "^([a-zA-Z]\\:\/?([\\w\\-]\/?)+\\.\\w+):(\\d+):(\\d+): (ERROR|WARNING)\\: (.*)$",
|
||||
"file": 1,
|
||||
"line": 3,
|
||||
"column": 4,
|
||||
"code": 5,
|
||||
"message": 6
|
||||
},
|
||||
"background": {
|
||||
"activeOnStart": true,
|
||||
"beginsPattern": "^.*VITE v.* ready in \\d* ms.*$",
|
||||
"endsPattern": "^.*\\[startup\\] Electron App.*$"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
200
CONTRIBUTING.md
Normal file
200
CONTRIBUTING.md
Normal file
@ -0,0 +1,200 @@
|
||||
🐫 **Welcome to Eigent!** 🐫
|
||||
|
||||
Thank you for your interest in contributing to the Eigent project! 🎉 We're excited to have your support. As an open-source product build on CAMEL in a rapidly evolving and open-ended field, we wholeheartedly welcome contributions of all kinds. Whether you want to introduce new features, enhance the infrastructure, improve documentation, asking issues, or fix bugs, we appreciate your enthusiasm and efforts. 🙌 You are welcome to join our [discord](https://discord.camel-ai.org/) for more efficient communication. 💬
|
||||
|
||||
## Join Our Community 🌍
|
||||
|
||||
### Developer Meeting Time & Link 💻
|
||||
- English speakers: Mondays at 8 PM PDT. Join via Discord: [Meeting Link](https://meet.google.com/sez-aomy-ebm?authuser=0&hs=122&ijlm=1753634732982)
|
||||
- Chinese Speakers: Mondays at 9 PM UTC+8. Join via TecentMeeting: [Meeting Link](https://meeting.tencent.com/dm/057wap1eeCSY)
|
||||
|
||||
### Our Communication Channels 💬
|
||||
- **Discord:** [Join here](https://discord.camel-ai.org/)
|
||||
- **WeChat:** Scan the QR code [here](https://ghli.org/camel/wechat.png)
|
||||
- **Slack:** [Join here](https://join.slack.com/t/camel-ai/shared_invite/zt-2g7xc41gy-_7rcrNNAArIP6sLQqldkqQ)
|
||||
|
||||
## Guidelines 📝
|
||||
|
||||
### Contributing to the Code 👨💻👩💻
|
||||
|
||||
If you're eager to contribute to this project, that's fantastic! We're thrilled to have your support.
|
||||
|
||||
- If you are a contributor from the community:
|
||||
- Follow the [Fork-and-Pull-Request](https://docs.github.com/en/get-started/quickstart/contributing-to-projects) workflow when opening your pull requests.
|
||||
- If you are a member of [CAMEL-AI.org](https://github.com/camel-ai):
|
||||
- Follow the [Checkout-and-Pull-Request](https://dev.to/ceceliacreates/how-to-create-a-pull-request-on-github-16h1) workflow when opening your pull request; this will allow the PR to pass all tests that require [GitHub Secrets](https://docs.github.com/en/actions/security-guides/encrypted-secrets).
|
||||
|
||||
Make sure to mention any related issues and tag the relevant maintainers too. 💪
|
||||
|
||||
Before your pull request can be merged, it must pass the formatting, linting, and testing checks. You can find instructions on running these checks locally under the **Common Actions** section below. 🔍
|
||||
|
||||
Ensuring excellent documentation and thorough testing is absolutely crucial. Here are some guidelines to follow based on the type of contribution you're making:
|
||||
|
||||
- If you fix a bug:
|
||||
- Add a relevant unit test when possible. These can be found in the `test` directory.
|
||||
- If you make an improvement:
|
||||
- Update any affected example console scripts in the `examples` directory, Gradio demos in the `apps` directory, and documentation in the `docs` directory.
|
||||
- Update unit tests when relevant.
|
||||
- If you add a feature:
|
||||
- Include unit tests in the `test` directory.
|
||||
- Add a demo script in the `examples` directory.
|
||||
|
||||
We're a small team focused on building great things. If you have something in mind that you'd like to add or modify, opening a pull request is the ideal way to catch our attention. 🚀
|
||||
|
||||
### Contributing to Code Reviews 🔍
|
||||
This part outlines the guidelines and best practices for conducting code reviews in Eigent. The aim is to ensure that all contributions are of high quality, align with the project's goals, and are consistent with our coding standards.
|
||||
|
||||
#### Purpose of Code Reviews
|
||||
- Maintain Code Quality: Ensure that the codebase remains clean, readable, and maintainable.
|
||||
- Knowledge Sharing: Facilitate knowledge sharing among contributors and help new contributors learn best practices.
|
||||
- Bug Prevention: Catch potential bugs and issues before they are merged into the main branch.
|
||||
- Consistency: Ensure consistency in style, design patterns, and architecture across the project.
|
||||
|
||||
#### Review Process Overview
|
||||
- Reviewers should check the code for functionality, readability, consistency, and compliance with the project’s coding standards.
|
||||
- If changes are necessary, the reviewer should leave constructive feedback.
|
||||
- The contributor addresses feedback and updates the PR.
|
||||
- The reviewer re-reviews the updated code.
|
||||
- Once the code is approved by at least two reviewer, it can be merged into the main branch.
|
||||
- Merging should be done by a maintainer or an authorized contributor.
|
||||
|
||||
#### Code Review Checklist
|
||||
- Functionality
|
||||
- Correctness: Does the code perform the intended task? Are edge cases handled?
|
||||
- Testing: Is there sufficient test coverage? Do all tests pass?
|
||||
- Security: Are there any security vulnerabilities introduced by the change?
|
||||
- Performance: Does the code introduce any performance regressions?
|
||||
|
||||
- Code Quality
|
||||
- Readability: Is the code easy to read and understand? Is it well-commented where necessary?
|
||||
- Maintainability: Is the code structured in a way that makes future changes easy?
|
||||
- Style: Does the code follow the project’s style guidelines?
|
||||
Currently we use Ruff for format check and take [Google Python Style Guide]("https://google.github.io/styleguide/pyguide.html") as reference.
|
||||
- Documentation: Are public methods, classes, and any complex logic well-documented?
|
||||
- Design
|
||||
- Consistency: Does the code follow established design patterns and project architecture?
|
||||
- Modularity: Are the changes modular and self-contained? Does the code avoid unnecessary duplication?
|
||||
- Dependencies: Are dependencies minimized and used appropriately?
|
||||
|
||||
#### Reviewer Responsibilities
|
||||
- Timely Reviews: Reviewers should strive to review PRs promptly to keep the project moving.
|
||||
- Constructive Feedback: Provide feedback that is clear, constructive, and aimed at helping the contributor improve.
|
||||
- Collaboration: Work with the contributor to address any issues and ensure the final code meets the project’s standards.
|
||||
- Approvals: Only approve code that you are confident meets all the necessary criteria.
|
||||
|
||||
#### Common Pitfalls
|
||||
- Large PRs: Avoid submitting PRs that are too large. Break down your changes into smaller, manageable PRs if possible.
|
||||
- Ignoring Feedback: Address all feedback provided by reviewers, even if you don’t agree with it—discuss it instead of ignoring it.
|
||||
- Rushed Reviews: Avoid rushing through reviews. Taking the time to thoroughly review code is critical to maintaining quality.
|
||||
|
||||
Code reviews are an essential part of maintaining the quality and integrity of our open source project. By following these guidelines, we can ensure that Eigent remains robust, secure, and easy to maintain, while also fostering a collaborative and welcoming community.
|
||||
|
||||
### Guideline for Writing Docstrings
|
||||
|
||||
This guideline will help you write clear, concise, and structured docstrings for contributing to `Eigent`.
|
||||
|
||||
#### 1. Use the Triple-Quoted String with `r"""` (Raw String)
|
||||
Begin the docstring with `r"""` to indicate a raw docstring. This prevents any issues with special characters and ensures consistent formatting.
|
||||
|
||||
#### 2. Provide a Brief Class or Method Description
|
||||
- Start with a concise summary of the purpose and functionality.
|
||||
- Keep each line under `79` characters.
|
||||
- The summary should start on the first line without a linebreak.
|
||||
|
||||
Example:
|
||||
```python
|
||||
r"""Class for managing conversations of CAMEL Chat Agents.
|
||||
"""
|
||||
```
|
||||
|
||||
#### 3. Document Parameters in the Args Section
|
||||
- Use an `Args`: section for documenting constructor or function parameters.
|
||||
- Maintain the `79`-character limit for each line, and indent continuation lines by 4 spaces.
|
||||
- Follow this structure:
|
||||
- Parameter Name: Match the function signature.
|
||||
- Type: Include the type (e.g., `int`, `str`, custom types like `BaseModelBackend`).
|
||||
- Description: Provide a brief explanation of the parameter's role.
|
||||
- Default Value: Use (`default: :obj:<default_value>`) to indicate default values.
|
||||
|
||||
Example:
|
||||
```markdown
|
||||
Args:
|
||||
system_message (BaseMessage): The system message for initializing
|
||||
the agent's conversation context.
|
||||
model (BaseModelBackend, optional): The model backend to use for
|
||||
response generation. Defaults to :obj:`OpenAIModel` with
|
||||
`GPT_4O_MINI`. (default: :obj:`OpenAIModel` with `GPT_4O_MINI`)
|
||||
```
|
||||
|
||||
### Principles 🛡️
|
||||
|
||||
#### Naming Principle: Avoid Abbreviations in Naming
|
||||
|
||||
- Abbreviations can lead to ambiguity, especially since variable names and code in CAMEL are directly used by agents.
|
||||
- Use clear, descriptive names that convey meaning without requiring additional explanation. This improves both human readability and the agent's ability to interpret the code.
|
||||
|
||||
Examples:
|
||||
|
||||
- Bad: msg_win_sz
|
||||
- Good: message_window_size
|
||||
|
||||
By adhering to this principle, we ensure that CAMEL remains accessible and unambiguous for both developers and AI agents.
|
||||
|
||||
### Board Item Create Workflow 🛠️
|
||||
At Eigent, we manage our project through a structured workflow that ensures efficiency and clarity in our development process. Our workflow includes stages for issue creation and pull requests (PRs), sprint planning, and reviews.
|
||||
|
||||
#### Issue Item Stage:
|
||||
Our [issues](https://github.com/eigent-ai/Eigent-desktop/issues) page on GitHub is regularly updated with bugs, improvements, and feature requests. We have a handy set of labels to help you sort through and find issues that interest you. Feel free to use these labels to keep things organized.
|
||||
|
||||
When you start working on an issue, please assign it to yourself so that others know it's being taken care of.
|
||||
|
||||
When creating a new issue, it's best to keep it focused on a specific bug, improvement, or feature. If two issues are related or blocking each other, it's better to link them instead of merging them into one.
|
||||
|
||||
We do our best to keep these issues up to date, but considering the fast-paced nature of this field, some may become outdated. If you come across any such issues, please give us a heads-up so we can address them promptly. 👀
|
||||
|
||||
Here’s how to engage with our issues effectively:
|
||||
- Go to [GitHub Issues](https://github.com/eigent-ai/Eigent-desktop/issues), create a new issue, choose the category, and fill in the required information.
|
||||
- Ensure the issue has a proper title and update the Assignees, Labels, Projects (select Backlog status), Development, and Milestones.
|
||||
- Discuss the issue during team meetings, then move it to the Analysis Done column.
|
||||
- At the beginning of each sprint, share the analyzed issue and move it to the Sprint Planned column if you are going to work on this issue in the sprint.
|
||||
|
||||
#### Pull Request Item Stage:
|
||||
|
||||
- Go to [GitHub Pulls](https://github.com/eigent-ai/Eigent-desktop/pulls), create a new PR, choose the branch, and fill in the information, linking the related issue.
|
||||
- Ensure the PR has a proper title and update the Reviewers (convert to draft), Assignees, Labels, Projects (select Developing status), Development, and Milestones.
|
||||
- If the PR is related to a roadmap, link the roadmap to the PR.
|
||||
- Move the PR item through the stages: Developing, Stuck, Reviewing (click ready for review), Merged. The linked issue will close automatically when the PR is merged.
|
||||
|
||||
**Labeling PRs:**
|
||||
- **feat**: For new features (e.g., `feat: Add new AI model`)
|
||||
- **fix**: For bug fixes (e.g., `fix: Resolve memory leak issue`)
|
||||
- **docs**: For documentation updates (e.g., `docs: Update contribution guidelines`)
|
||||
- **style**: For code style changes (e.g., `style: Refactor code formatting`)
|
||||
- **refactor**: For code refactoring (e.g., `refactor: Optimize data processing`)
|
||||
- **test**: For adding or updating tests (e.g., `test: Add unit tests for new feature`)
|
||||
- **chore**: For maintenance tasks (e.g., `chore: Update dependencies`)
|
||||
|
||||
### Getting Help 🆘
|
||||
|
||||
Our aim is to make the developer setup as straightforward as possible. If you encounter any challenges during the setup process, don't hesitate to reach out to a maintainer. We're here to assist you and ensure that the experience is smooth not just for you but also for future contributors. 😊
|
||||
|
||||
## Quick Start 🚀
|
||||
|
||||
```bash
|
||||
git clone https://github.com/eigent-ai/Eigent-desktop.git
|
||||
cd Eigent-desktop
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
## Common Actions 🔄
|
||||
|
||||
### Update dependencies
|
||||
|
||||
Whenever you add, update, or delete any dependencies in `pyproject.toml`, please run `uv lock` to synchronize the dependencies with the lock file.
|
||||
|
||||
## Giving Credit 🎉
|
||||
|
||||
If your contribution has been included in a release, we'd love to give you credit on Twitter, but only if you're comfortable with it!
|
||||
|
||||
If you have a Twitter account that you would like us to mention, please let us know either in the pull request or through another communication method. We want to make sure you receive proper recognition for your valuable contributions. 😄
|
||||
46
LICENSE
Normal file
46
LICENSE
Normal file
@ -0,0 +1,46 @@
|
||||
# Eigent Open Source License
|
||||
|
||||
Eigent is licensed under a modified version of the Apache License 2.0, with the following additional conditions:
|
||||
|
||||
1. Eigent may be utilized commercially, including as a backend service for other applications or as an application development platform for enterprises. However, if any of the following conditions apply, you must obtain a valid commercial license from Eigent.AI :
|
||||
|
||||
a. Commercial Self-Hosted Deployment: You may not use this software or any of its components in a production environment for commercial purposes without an active, valid commercial license from Eigent AI.
|
||||
Definitions:
|
||||
- Software: Eigent source code, binaries, and related components provided under this license.
|
||||
- Production Environment: Any environment not solely used for development, testing, or personal non-commercial evaluation purposes.
|
||||
- Commercial Purposes: Activities intended or directed towards commercial advantage or monetary compensation, including, without limitation, supporting internal business operations or providing services to third parties.
|
||||
|
||||
b. Multi-tenant SaaS service: Unless explicitly authorized by Eigent.AI in writing, you may not use the Eigent source code to operate a multi-tenant Software-as-a-Service platform or any online service similar to Eigent’s official cloud service.
|
||||
- Tenant Definition: Within the context of Eigent, one tenant corresponds to one workspace. The workspace provides a separated area for each tenant's data and configurations.
|
||||
|
||||
c. Branding and Attribution: You must not remove, hide, or alter the Eigent name, logos, or copyright notices displayed in the Eigent user interface (including desktop applications and web consoles).This restriction is inapplicable to uses of Eigent that do not involve its frontend.
|
||||
- Frontend Definition: For the purposes of this license, the "frontend" of Eigent includes all components located in the `electron/` directory when running Eigent from the raw source code, or the "electron" image when running Eigent with Docker.
|
||||
|
||||
Please contact us at info@eigent.ai for licensing inquiries.
|
||||
|
||||
2. As a contributor, you should agree that:
|
||||
|
||||
a. Eigent AI can adjust the open-source agreement to be more restrictive or permissive as deemed necessary.
|
||||
|
||||
b. Your contributed code may be used by Eigent.AI for commercial purposes, including but not limited to cloud-hosted and self-hosted services operated by Eigent AI.
|
||||
|
||||
Apart from the specific conditions mentioned above, all other rights and restrictions follow the Apache License 2.0. Detailed information about the Apache License 2.0 can be found at http://www.apache.org/licenses/LICENSE-2.0.
|
||||
|
||||
The interactive design of this product, as well as Eigent’s names and logos, are protected by intellectual property laws.
|
||||
|
||||
© 2023-2025 Eigent AI LTD
|
||||
|
||||
|
||||
----------
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing,
|
||||
software distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
423
README.md
Normal file
423
README.md
Normal file
@ -0,0 +1,423 @@
|
||||
<div align="center"><a name="readme-top"></a>
|
||||
|
||||
[![][image-head]][eigent-site]
|
||||
|
||||
[![][image-seperator]][eigent-site]
|
||||
|
||||
### Eigent: The World's First Multi-agent Workforce to Unlock Your Exceptional Productivity
|
||||
|
||||
<!-- SHIELD GROUP -->
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
[![][github-star]][eigent-github]
|
||||
[![][social-x-shield]][social-x-link]
|
||||
[![][discord-image]][discord-url]<br>
|
||||
[![Reddit][reddit-image]][reddit-url]
|
||||
[![Wechat][wechat-image]][wechat-url]
|
||||
[![][sponsor-shield]][sponsor-link]
|
||||
[![][built-with-camel]][camel-github]
|
||||
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
<div align="center">
|
||||
|
||||
**English** · [简体中文](./README_CN.md) · [Official Site][eigent-site] · [Documents][docs-site] · [Feedback][github-issue-link]
|
||||
|
||||
</div>
|
||||
<br/>
|
||||
|
||||
**Eigent** is the world’s first **Multi-agent Workforce** desktop application, empowering you to build, manage, and deploy a custom AI workforce that can turn your most complex workflows into automated tasks.
|
||||
|
||||
Built on [CAMEL-AI][camel-site]'s acclaimed open-source project, our system introduces a **Multi-Agent Workforce** that **boosts productivity** through parallel execution, customization, and privacy protection.
|
||||
|
||||
### ⭐ 100% Open Source - 🥇 Local Deployment - 🏆 MCP Integration
|
||||
|
||||
- ✅ **Zero Setup** - No technical configuration required
|
||||
- ✅ **Multi-Agent Coordination** - Handle complex multi-agent workflows
|
||||
- ✅ **Enterprise Feature** - SSO/Access control
|
||||
- ✅ **Local Deploymen**t
|
||||
- ✅ **Open Source**
|
||||
- ✅ **Custom Model Support**
|
||||
- ✅ **MCP Integration**
|
||||
|
||||
<br/>
|
||||
|
||||
<details>
|
||||
<summary><kbd>Table of contents</kbd></summary>
|
||||
|
||||
#### TOC
|
||||
|
||||
- [🚀 Getting Started](#-getting-started)
|
||||
- [☁️ Cloud Version](#️-cloud-version)
|
||||
- [🏠 Self-Hosting (Community Edition)](#-self-hosting-community-edition)
|
||||
- [🏢 Enterprise](#-enterprise)
|
||||
- [✨ Key features](#-key-features)
|
||||
- [🏭 Workforce](#-workforce)
|
||||
- [🧠 Comprehensive Model Support](#-comprehensive-model-support)
|
||||
- [🔌 MCP Tools Integration (MCP)](#-mcp-tools-integration-mcp)
|
||||
- [✋ Human-in-the-Loop](#-human-in-the-loop)
|
||||
- [👐 100% Open Source](#-100-open-source)
|
||||
- [🧩 Use Cases](#-use-cases)
|
||||
- [🛠️ Tech Stack](#-tech-stack)
|
||||
- [Backend](#backend)
|
||||
- [Frontend](#frontend)
|
||||
- [🌟 Staying ahead](#staying-ahead)
|
||||
- [🗺️ Roadmap](#-roadmap)
|
||||
- [📖 Contributing](#-contributing)
|
||||
- [Main Contributors](#main-contributors)
|
||||
- [Distinguished amabssador](#distinguished-amabssador)
|
||||
- [Ecosystem](#ecosystem)
|
||||
- [📄 Open Source License](#-open-source-license)
|
||||
- [🌐 Community & contact](#-community--contact)
|
||||
|
||||
####
|
||||
|
||||
<br/>
|
||||
|
||||
</details>
|
||||
|
||||
## **🚀 Getting Started**
|
||||
|
||||
There are three ways to get started with Eigent:
|
||||
|
||||
### ☁️ Cloud Version
|
||||
|
||||
The fastest way to experience Eigent's multi-agent AI capabilities is through our cloud platform, perfect for teams and individuals who want immediate access without setup complexity. We'll host the models, APIs, and cloud storage, ensuring Eigent runs flawlessly.
|
||||
|
||||
- **Instant Access** - Start building multi-agent workflows in minutes.
|
||||
- **Managed Infrastructure** - We handle scaling, updates, and maintenance.
|
||||
- **Premium Support** - Subscribe and get priority assistance from our engineering team.
|
||||
|
||||
<br/>
|
||||
|
||||
[![image-public-beta]][eigent-download]
|
||||
|
||||
<div align="right">
|
||||
<a href="https://www.eigent.ai/download">Get started at Eigent.ai →</a>
|
||||
</div>
|
||||
|
||||
### 🏠 Self-Hosting (Community Edition)
|
||||
|
||||
For users who prefer local control, data privacy, or customization, this option is ideal for organizations requiring:
|
||||
|
||||
- **Data Privacy** - Keep sensitive data within your infrastructure.
|
||||
- **Customization** - Modify and extend the platform to fit your needs.
|
||||
- **Cost Control** - Avoid recurring cloud fees for large-scale deployments.
|
||||
|
||||
#### 1. Prerequisites
|
||||
|
||||
- Node.js and npm
|
||||
|
||||
#### 2. Quick Start
|
||||
|
||||
```bash
|
||||
git clone https://github.com/eigent-ai/eigent.git
|
||||
cd eigent
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### 🏢 Enterprise
|
||||
|
||||
For organizations requiring maximum security, customization, and control:
|
||||
|
||||
- **Commercial License** - [Check our license →](LICENSE)
|
||||
- **Exclusive Features** (like SSO & custom development)
|
||||
- **Scalable Enterprise Deployment**
|
||||
- **Negotiated SLAs** & implementation services
|
||||
|
||||
📧 For further details, please contact us at [info@eigent.ai](mailto:info@eigent.ai).
|
||||
|
||||
## **✨ Key features**
|
||||
Unlock the full potential of exceptional productivity with Eigent’s powerful features—built for seamless integration, smarter task execution, and boundless automation.
|
||||
|
||||
### 🏭 Workforce
|
||||
Employs a team of specialized AI agents that collaborate to solve complex tasks. Eigent dynamically breaks down tasks and activates multiple agents to work **in parallel.**
|
||||
|
||||
Eigent pre-defined the following agent workers:
|
||||
|
||||
- **Developer Agent:** Writes and executes code, runs terminal commands.
|
||||
- **Search Agent:** Searches the web and extracts content.
|
||||
- **Document Agent:** Creates and manages documents.
|
||||
- **Multi-Modal Agent:** Processes images and audio.
|
||||
|
||||

|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
### 🧠 Comprehensive Model Support
|
||||
Deploy Eigent locally with your preferred models.
|
||||
|
||||

|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
### 🔌 MCP Tools Integration (MCP)
|
||||
Eigent comes with massive built-in **Model Context Protocol (MCP)** tools (for web browsing, code execution, Notion, Google suite, Slack etc.), and also lets you **install your own tools**. Equip agents with exactly the right tools for your scenarios – even integrate internal APIs or custom functions – to enhance their capabilities.
|
||||
|
||||

|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
### ✋ Human-in-the-Loop
|
||||
If a task gets stuck or encounters uncertainty, Eigent will automatically request human input.
|
||||
|
||||

|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
### 👐 100% Open Source
|
||||
Eigent is completely open-sourced. You can download, inspect, and modify the code, ensuring transparency and fostering a community-driven ecosystem for multi-agent innovation.
|
||||
|
||||
![Opensource][image-opensource]
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
## 🧩 Use Cases
|
||||
|
||||
### 1. Palm Springs Tennis Trip Itinerary with Slack Summary [Replay ▶️](https://www.eigent.ai/download?share_token=IjE3NTM0MzUxNTEzMzctNzExMyI.aIeysw.MUeG6ZcBxI1GqvPDvn4dcv-CDWw__1753435151337-7113)
|
||||
|
||||
<details>
|
||||
<summary><strong>Prompt:</strong> <kbd>We are two tennis fans and want to go see the tennis tournament ... <kbd></summary>
|
||||
<br>
|
||||
We are two tennis fans and want to go see the tennis tournament in Palm Springs 2026. I live in SF - please prepare a detailed itinerary with flights, hotels, things to do for 3 days - around the time semifinal/finals are happening. We like hiking, vegan food and spas. Our budget is $5K. The itinerary should be a detailed timeline of time, activity, cost, other details and if applicable a link to buy tickets/make reservations etc. for the item. Some preferences .Spa access would be nice but not necessary. When you finish this task, please generate a html report about this trip; write a summary of this plan and send text summary and report html link to slack #tennis-trip-sf channel.
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/palm_springs_tennis_trip_itinerary_with_slack_summary.mp4" type="video/mp4">
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 2. Generate Q2 Report from CSV Bank Data [Replay ▶️](https://www.eigent.ai/download?share_token=IjE3NTM1MjY4OTE4MDgtODczOSI.aIjJmQ.WTdoX9mATwrcBr_w53BmGEHPo8U__1753526891808-8739)
|
||||
|
||||
<details>
|
||||
<summary><strong>Prompt:</strong> <kbd>Please help me prepare a Q2 financial statement based on my bank ... <kbd></summary>
|
||||
<br>
|
||||
Please help me prepare a Q2 financial statement based on my bank transfer record file bank_transacation.csv in my desktop to a html report with chart to investors how much we have spent.
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/generate_q2_report_from_csv_bank_data.mp4" type="video/mp4">
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 3. UK Healthcare Market Research Report Automation [Replay ▶️](https://www.eigent.ai/download?share_token=IjE3NTMzOTM1NTg3OTctODcwNyI.aIey-Q.Jh9QXzYrRYarY0kz_qsgoj3ewX0__1753393558797-8707)
|
||||
|
||||
<details>
|
||||
<summary><strong>Prompt:</strong> <kbd>Analyze the UK healthcare industry to support the planning ... <kbd></summary>
|
||||
<br>
|
||||
Analyze the UK healthcare industry to support the planning of my next company. Provide a comprehensive market overview, including current trends, growth projections, and relevant regulations. Identify the top 5–10 major opportunities, gaps, or underserved segments within the market. Present all findings in a well-structured, professional HTML report. Then send a message to slack #eigentr-product-test channel when this task is done to align the report content with my teammates.
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/uk_healthcare_market_research_report_automation.mp4" type="video/mp4">
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 4. German Electric Skateboard Market Feasibility [Replay ▶️](https://www.eigent.ai/download?share_token=IjE3NTM2NTI4MjY3ODctNjk2Ig.aIjGiA.t-qIXxk_BZ4ENqa-yVIm0wMVyXU__1753652826787-696)
|
||||
|
||||
<details>
|
||||
<summary><strong>Prompt:</strong> <kbd>We are a company that produces high-end electric skateboards ... <kbd></summary>
|
||||
<br>
|
||||
We are a company that produces high-end electric skateboards, and we are considering entering the German market. Please prepare a detailed market entry feasibility report for me. The report needs to cover the following aspects:
|
||||
1. Market Size & Regulations: Research the market size, annual growth rate, key players, and market share for Personal Light Electric Vehicles (PLEVs) in Germany. Simultaneously, provide a detailed breakdown and summary of German laws and regulations concerning the use of electric skateboards on public roads, including certification requirements (such as ABE certification) and insurance policies.
|
||||
2. Consumer Profile: Analyze the profile of potential German consumers, including their age, income level, primary usage scenarios (commuting, recreation), key purchasing decision drivers (price, performance, brand, design), and the channels they typically use to gather information (forums, social media, offline retail stores).
|
||||
3. Channels & Distribution: Investigate Germany’s mainstream online electronics sales platforms (e.g., Amazon.de, MediaMarkt.de) and high-end sporting goods offline retail chains. List the top 5 potential online and offline distribution partners and find the contact information for their purchasing departments, if possible.
|
||||
4. Costing & Pricing: Based on the product cost structure in my Product_Cost.csv file on my desktop, and taking into account German customs duties, Value Added Tax (VAT), logistics and warehousing costs, and potential marketing expenses, estimate a Manufacturer’s Suggested Retail Price (MSRP) and analyze its competitiveness in the market.
|
||||
5. Comprehensive Report & Presentation: Summarize all research findings into an HTML report file. The content should include data charts, key findings, and a final market entry strategy recommendation (Recommended / Not Recommended / Recommended with Conditions).
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/german_electric_skateboard_market_feasibility.mp4" type="video/mp4">
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 5. SEO Audit for Workforce Multiagent Launch [Replay ▶️](https://www.eigent.ai/download?share_token=IjE3NTM2OTk5NzExNDQtNTY5NiI.aIex0w.jc_NIPmfIf9e3zGt-oG9fbMi3K4__1753699971144-5696)
|
||||
|
||||
<details>
|
||||
<summary><strong>Prompt:</strong> <kbd>To support the launch of our new Workforce Multiagent product ... <kbd></summary>
|
||||
<br>
|
||||
To support the launch of our new Workforce Multiagent product, please run a thorough SEO audit on our official website (https://www.camel-ai.org/) and deliver a detailed optimization report with actionable recommendations.
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/seo_audit_for_workforce_multiagent_launch.mp4" type="video/mp4">
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 6. Identify Duplicate Files in Downloads [Replay ▶️](https://www.eigent.ai/download?share_token=IjE3NTM3NjAzODgxNzEtMjQ4Ig.aIhKLQ.epOG--0Nj0o4Bqjtdqm9OZdaqRQ__1753760388171-248)
|
||||
|
||||
<details>
|
||||
<summary><strong>Prompt:</strong> <kbd>I have a folder named mydocs inside my Documents directory ... <kbd></summary>
|
||||
<br>
|
||||
I have a folder named mydocs inside my Documents directory. Please scan it and identify all files that are exact or near duplicates — including those with identical content, file size, or format (even if file names or extensions differ). List them clearly, grouped by similarity.
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/identify_duplicate_files_in_downloads.mp4" type="video/mp4">
|
||||
Your browser does not support the video tag.
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
## 🛠️ Tech Stack
|
||||
|
||||
### Backend
|
||||
- **Framework:** FastAPI
|
||||
- **Package Manager:** uv
|
||||
- **Async Server:** Uvicorn
|
||||
- **Authentication:** OAuth 2.0, Passlib.
|
||||
- **Multi-agent framework:** CAMEL
|
||||
|
||||
### Frontend
|
||||
|
||||
- **Framework:** React
|
||||
- **Desktop App Framework:** Electron
|
||||
- **Language:** TypeScript
|
||||
- **UI:** Tailwind CSS, Radix UI, Lucide React, Framer Motion
|
||||
- **State Management:** Zustand
|
||||
- **Flow Editor:** React Flow
|
||||
|
||||
## 🌟 Staying ahead
|
||||
|
||||
> \[!IMPORTANT]
|
||||
>
|
||||
> **Star Eigent**, You will receive all release notifications from GitHub without any delay \~ ⭐️
|
||||
|
||||
![][image-star-us]
|
||||
|
||||
## 🗺️ Roadmap
|
||||
|
||||
| Topics | Issues | Discord Channel |
|
||||
| ------------------------ | -- |-- |
|
||||
| **Context Engineering** | - Prompt caching<br> - System prompt optimize<br> - Toolkit docstring optimize<br> - Context compression | [**Join Discord →**](https://discord.gg/D2e3rBWD) |
|
||||
| **Multi-modal Enhancement** | - More accurate image understanding when using browser<br> - Advanced video generation | [**Join Discord →**](https://discord.gg/kyapNCeJ) |
|
||||
| **Multi-agent system** | - Workforce support fixed workflow<br> - Workforce support multi-round conversion | [**Join Discord →**](https://discord.gg/bFRmPuDB) |
|
||||
| **Browser Toolkit** | - BrowseCamp integration<br> - Benchmark improvement<br> - Forbid repeated page visiting<br> - Automatic cache button clicking | [**Join Discord →**](https://discord.gg/NF73ze5v) |
|
||||
| **Document Toolkit** | - Support dynamic file editing | [**Join Discord →**](https://discord.gg/4yAWJxYr) |
|
||||
| **Terminal Toolkit** | - Benchmark improvement<br> - Terminal-Bench integration | [**Join Discord →**](https://discord.gg/FjQfnsrV) |
|
||||
| **Environment & RL** | - Environment design<br> - Data-generation<br> - RL framework integration (VERL, TRL, OpenRLHF) | [**Join Discord →**](https://discord.gg/MaVZXEn8) |
|
||||
|
||||
|
||||
## [🤝 Contributing][contribution-link]
|
||||
|
||||
We believe in building trust and embracing all forms of open-source collaborations. Your creative contributions help drive the innovation of `Eigent`. Explore our GitHub issues and projects to dive in and show us what you’ve got 🤝❤️ [Contribution Guideline][contribution-link]
|
||||
|
||||
## [❤️ Sponsor][sponsor-link]
|
||||
|
||||
Eigent is built on top of [CAMEL-AI.org][camel-ai-org-github]'s research and infrastructures. [Sponsoring CAMEL-AI.org][sponsor-link] will make `Eigent` better.
|
||||
|
||||
## **📄 Open Source License**
|
||||
|
||||
This repository is licensed under the [**Eigent Open Source License**](LICENSE), based on Apache 2.0 with additional conditions.
|
||||
|
||||
## 🌐 Community & Contact
|
||||
For more information please contact info@eigent.ai
|
||||
|
||||
- **GitHub Issues:** Report bugs, request features, and track development. [Submit an issue][github-issue-link]
|
||||
|
||||
- **Discord:** Get real-time support, chat with the community, and stay updated. [Join us](https://discord.camel-ai.org/)
|
||||
|
||||
- **X (Twitter):** Follow for updates, AI insights, and key announcements. [Follow us][social-x-link]
|
||||
|
||||
- **WeChat Community:** Scan the QR code below to join our WeChat community.
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
<!-- LINK GROUP -->
|
||||
<!-- Social -->
|
||||
[discord-url]: https://discord.camel-ai.org/
|
||||
[discord-image]: https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb
|
||||
|
||||
[built-with-camel]:https://img.shields.io/badge/-Built--with--CAMEL-4C19E8.svg?logo=data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjQ4IiBoZWlnaHQ9IjI3MiIgdmlld0JveD0iMCAwIDI0OCAyNzIiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxwYXRoIGQ9Ik04LjgzMTE3IDE4LjU4NjVMMCAzMC44MjY3QzUuNDY2OTIgMzUuMDQzMiAxNS4xMzkxIDM4LjgyNTggMjQuODExNCAzNi4yOTU5QzMwLjY5ODggNDAuOTM0MSAzOS42NzAyIDQwLjIzMTMgNDQuMTU1OSA0MC4wOTA4QzQzLjQ1NSA0Ny4zOTk0IDQyLjQ3MzcgNzAuOTU1OCA0NC4xNTU5IDEwNi43MTJDNDUuODM4IDE0Mi40NjggNzEuNzcwOCAxNjYuODY4IDg0LjUyNjkgMTc0LjU5OEw3Ni4wMDAyIDIyMEw4NC41MjY5IDI3MkgxMDguOTE4TDk4LjAwMDIgMjIwTDEwOC45MTggMTc0LjU5OEwxMjkuOTQ0IDI3MkgxNTQuNzU2TDEzNC4xNSAxNzQuNTk4SDE4Ny4xMzdMMTY2LjUzMSAyNzJIMTkxLjc2M0wyMTIuMzY5IDE3NC41OThMMjI2IDIyMEwyMTIuMzY5IDI3MkgyMzcuNjAxTDI0OC4wMDEgMjIwTDIzNy4xOCAxNzQuNTk4QzIzOS4yODMgMTY5LjExNyAyNDAuNDAxIDE2Ni45NzYgMjQxLjgwNiAxNjEuMTA1QzI0OS4zNzUgMTI5LjQ4MSAyMzUuMDc3IDEwMy45MDEgMjI2LjY2NyA5NC40ODRMMjA2LjQ4MSA3My44MjNDMTk3LjY1IDY0Ljk2ODMgMTgyLjUxMSA2NC41NDY3IDE3Mi44MzkgNzIuNTU4MUMxNjUuNzI4IDc4LjQ0NzcgMTYxLjcwMSA3OC43NzI3IDE1NC43NTYgNzIuNTU4MUMxNTEuODEyIDcwLjAyODEgMTQ0LjUzNSA2MS40ODg5IDEzNC45OTEgNTMuNTgzN0MxMjUuMzE5IDQ1LjU3MjMgMTA4LjQ5NyA0OC45NDU1IDEwMi4xODkgNTUuNjkxOUw3My41OTMxIDg0LjM2NDRWNy42MjM0OUw3OS4xMjczIDBDNjAuOTA0MiAzLjY1NDMzIDIzLjgwMjEgOS41NjMwOSAxOS43NjUgMTAuNTc1MUMxNS43Mjc5IDExLjU4NyAxMC43OTM3IDE2LjMzNzcgOC44MzExNyAxOC41ODY1WiIgZmlsbD0id2hpdGUiLz4KPHBhdGggZD0iTTQzLjIwMzggMTguNzE4N0w0OS4wOTEyIDEzLjA0OTNMNTQuOTc4NyAxOC43MTg3TDQ5LjA5MTIgMjQuODI0Mkw0My4yMDM4IDE4LjcxODdaIiBmaWxsPSIjNEMxOUU4Ii8+Cjwvc3ZnPgo=
|
||||
|
||||
[eigent-github]: https://github.com/eigent-ai/eigent
|
||||
[github-star]: https://img.shields.io/github/stars/eigent-ai?color=F5F4F0&labelColor=gray&style=plastic&logo=github
|
||||
[camel-ai-org-github]: https://github.com/camel-ai
|
||||
|
||||
[camel-github]: https://github.com/camel-ai/camel
|
||||
[eigent-github]: https://github.com/eigent-ai/eigent
|
||||
[contribution-link]: https:/github.com/eigent-ai/eigent/blob/master/CONTRIBUTING.md
|
||||
|
||||
[social-x-link]: https://x.com/Eigent_AI
|
||||
[social-x-shield]: https://img.shields.io/badge/-%40Eigent_AI-white?labelColor=gray&logo=x&logoColor=white&style=plastic
|
||||
|
||||
[reddit-url]: https://www.reddit.com/r/CamelAI/
|
||||
[reddit-image]: https://img.shields.io/reddit/subreddit-subscribers/CamelAI?style=plastic&logo=reddit&label=r%2FCAMEL&labelColor=white
|
||||
|
||||
[wechat-url]: https://ghli.org/camel/wechat.png
|
||||
[wechat-image]: https://img.shields.io/badge/WeChat-CamelAIOrg-brightgreen?logo=wechat&logoColor=white
|
||||
|
||||
[sponsor-link]: https://github.com/sponsors/camel-ai
|
||||
[sponsor-shield]: https://img.shields.io/badge/-Sponsor%20CAMEL--AI-1d1d1d?logo=github&logoColor=white&style=plastic
|
||||
|
||||
[eigent-download]: https://www.eigent.ai/download
|
||||
[download-shield]: https://img.shields.io/badge/Download%20Eigent-363AF5?style=plastic
|
||||
|
||||
<!-- camel & eigent -->
|
||||
[camel-site]: https://www.camel-ai.org
|
||||
[eigent-site]: https://www.eigent.ai
|
||||
[docs-site]: https://docs.eigent.ai
|
||||
[github-issue-link]: https://github.com/eigent-ai/eigent/issues
|
||||
|
||||
<!-- marketing -->
|
||||
[image-seperator]: https://eigent-ai.github.io/.github/assets/seperator.png
|
||||
[image-head]: https://eigent-ai.github.io/.github/assets/head.png
|
||||
[image-public-beta]: https://eigent-ai.github.io/.github/assets/banner.png
|
||||
[image-star-us]: https://eigent-ai.github.io/.github/assets/star-us.gif
|
||||
[image-opensource]: https://eigent-ai.github.io/.github/assets/opensource.png
|
||||
[image-wechat]: https://eigent-ai.github.io/.github/assets/wechat.png
|
||||
|
||||
<!-- feature -->
|
||||
[image-workforce]: https://eigent-ai.github.io/.github/assets/feature_dynamic_workforce.gif
|
||||
[image-human-in-the-loop]: https://eigent-ai.github.io/.github/assets/feature_human_in_the_loop.gif
|
||||
[image-customise-workers]: https://eigent-ai.github.io/.github/assets/feature_customise_workers.gif
|
||||
[image-add-mcps]: https://eigent-ai.github.io/.github/assets/feature_add_mcps.gif
|
||||
[image-local-model]: https://eigent-ai.github.io/.github/assets/feature_local_model.gif
|
||||
416
README_CN.md
Normal file
416
README_CN.md
Normal file
@ -0,0 +1,416 @@
|
||||
<div align="center"><a name="readme-top"></a>
|
||||
|
||||
[![][image-head]][eigent-site]
|
||||
|
||||
[![][image-seperator]][eigent-site]
|
||||
|
||||
### Eigent:全球首个多智能体工作流,释放卓越生产力
|
||||
|
||||
<!-- SHIELD GROUP -->
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
[![][github-star]][eigent-github]
|
||||
[![][social-x-shield]][social-x-link]
|
||||
[![][discord-image]][discord-url]<br>
|
||||
[![Reddit][reddit-image]][reddit-url]
|
||||
[![Wechat][wechat-image]][wechat-url]
|
||||
[![][sponsor-shield]][sponsor-link]
|
||||
[![][built-with-camel]][camel-github]
|
||||
|
||||
</div>
|
||||
|
||||
<hr/>
|
||||
<div align="center">
|
||||
|
||||
[English](./README.md) · **简体中文** · [官方网站][eigent-site] · [文档][docs-site] · [反馈][github-issue-link]
|
||||
|
||||
</div>
|
||||
<br/>
|
||||
|
||||
**Eigent** 是全球首个 **多智能体工作流** 桌面应用程序,帮助您构建、管理和部署定制化的 AI 工作团队,将最复杂的工作流程转化为自动化任务。
|
||||
|
||||
基于 [CAMEL-AI][camel-site] 广受赞誉的开源项目,我们的系统引入了 **多智能体工作流**,通过并行执行、定制化和隐私保护 **提升生产力**。
|
||||
|
||||
### ⭐ 100% 开源 - 🥇 本地部署 - 🏆 MCP 集成
|
||||
|
||||
- ✅ **零配置** - 无需技术设置
|
||||
- ✅ **多智能体协作** - 处理复杂的多智能体工作流
|
||||
- ✅ **企业级功能** - SSO/访问控制
|
||||
- ✅ **本地部署**
|
||||
- ✅ **开源**
|
||||
- ✅ **支持自定义模型**
|
||||
- ✅ **MCP 集成**
|
||||
|
||||
<br/>
|
||||
|
||||
<details>
|
||||
<summary><kbd>目录</kbd></summary>
|
||||
|
||||
#### 目录
|
||||
|
||||
- [🚀 快速开始](#-快速开始)
|
||||
- [☁️ 云版本](#️-云版本)
|
||||
- [🏠 自托管(社区版)](#-自托管社区版)
|
||||
- [🏢 企业版](#-企业版)
|
||||
- [✨ 核心功能](#-核心功能)
|
||||
- [🏭 工作流](#-工作流)
|
||||
- [🧠 全面模型支持](#-全面模型支持)
|
||||
- [🔌 MCP 工具集成](#-mcp-工具集成)
|
||||
- [✋ 人工介入](#-人工介入)
|
||||
- [👐 100% 开源](#-100-开源)
|
||||
- [🧩 使用案例](#-使用案例)
|
||||
- [🛠️ 技术栈](#️-技术栈)
|
||||
- [后端](#后端)
|
||||
- [前端](#前端)
|
||||
- [🌟 保持领先](#保持领先)
|
||||
- [🗺️ 路线图](#️-路线图)
|
||||
- [📖 贡献](#-贡献)
|
||||
- [核心贡献者](#核心贡献者)
|
||||
- [杰出大使](#杰出大使)
|
||||
- [生态系统](#生态系统)
|
||||
- [📄 开源许可证](#-开源许可证)
|
||||
- [🌐 社区与联系](#-社区与联系)
|
||||
|
||||
####
|
||||
|
||||
<br/>
|
||||
|
||||
</details>
|
||||
|
||||
## **🚀 快速开始**
|
||||
|
||||
有三种方式开始使用 Eigent:
|
||||
|
||||
### ☁️ 云版本
|
||||
|
||||
最快体验 Eigent 多智能体 AI 能力的方式是通过我们的云平台,适合希望无需复杂设置即可立即使用的团队和个人。我们将托管模型、API 和云存储,确保 Eigent 流畅运行。
|
||||
|
||||
- **即时访问** - 几分钟内开始构建多智能体工作流。
|
||||
- **托管基础设施** - 我们负责扩展、更新和维护。
|
||||
- **优先支持** - 订阅后获得工程团队的优先协助。
|
||||
|
||||
<br/>
|
||||
|
||||
[![image-public-beta]][eigent-download]
|
||||
|
||||
<div align="right">
|
||||
<a href="https://www.eigent.ai">Get started at Eigent.ai →</a>
|
||||
</div>
|
||||
|
||||
### 🏠 自托管(社区版)
|
||||
|
||||
适合偏好本地控制、数据隐私或定制的用户,此选项适用于需要以下功能的组织:
|
||||
|
||||
- **数据隐私** - 敏感数据保留在您的基础设施内。
|
||||
- **定制化** - 修改和扩展平台以满足需求。
|
||||
- **成本控制** - 避免大规模部署的持续云费用。
|
||||
|
||||
#### 1. 前提条件
|
||||
|
||||
- Node.js 和 npm
|
||||
|
||||
#### 2. 快速开始
|
||||
|
||||
```bash
|
||||
git clone https://github.com/eigent-ai/eigent.git
|
||||
cd eigent
|
||||
npm install
|
||||
npm run dev
|
||||
```
|
||||
|
||||
### 🏢 企业版
|
||||
|
||||
适合需要最高安全性、定制化和控制的组织:
|
||||
|
||||
- **商业许可证** - [查看许可证 →](LICENSE)
|
||||
- **独家功能**(如 SSO 和定制开发)
|
||||
- **可扩展的企业部署**
|
||||
- **协商的 SLA** 和实施服务
|
||||
|
||||
📧 更多详情,请联系 [info@eigent.ai](mailto:info@eigent.ai)。
|
||||
|
||||
## **✨ 核心功能**
|
||||
通过 Eigent 的强大功能释放卓越生产力的全部潜力——专为无缝集成、智能任务执行和无边界自动化而设计。
|
||||
|
||||
### 🏭 工作流
|
||||
部署一支专业 AI 智能体团队,协作解决复杂任务。Eigent 动态分解任务并激活多个智能体 **并行工作**。
|
||||
|
||||
Eigent 预定义了以下智能体工作者:
|
||||
|
||||
- **开发智能体**:编写和执行代码,运行终端命令。
|
||||
- **搜索智能体**:搜索网络并提取内容。
|
||||
- **文档智能体**:创建和管理文档。
|
||||
- **多模态智能体**:处理图像和音频。
|
||||
|
||||

|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
### 🧠 全面模型支持
|
||||
使用您偏好的模型本地部署 Eigent。
|
||||
|
||||

|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
### 🔌 MCP 工具集成
|
||||
Eigent 内置大量 **模型上下文协议(MCP)** 工具(用于网页浏览、代码执行、Notion、Google 套件、Slack 等),并允许您 **安装自己的工具**。为智能体配备适合您场景的工具——甚至集成内部 API 或自定义功能——以增强其能力。
|
||||
|
||||

|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
### ✋ 人工介入
|
||||
如果任务卡住或遇到不确定性,Eigent 会自动请求人工输入。
|
||||
|
||||

|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
### 👐 100% 开源
|
||||
Eigent 完全开源。您可以下载、检查和修改代码,确保透明度并促进多智能体创新的社区驱动生态系统。
|
||||
|
||||
![Opensource][image-opensource]
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
<br/>
|
||||
|
||||
## 🧩 使用案例
|
||||
|
||||
### 1. 棕榈泉网球旅行行程与 Slack 摘要 [回放 ▶️](https://www.eigent.ai/download?share_token=IjE3NTM0MzUxNTEzMzctNzExMyI.aIeysw.MUeG6ZcBxI1GqvPDvn4dcv-CDWw__1753435151337-7113)
|
||||
|
||||
<details>
|
||||
<summary><strong>提示:</strong> <kbd>我们是两个网球爱好者,想去观看 2026 年棕榈泉的网球比赛... <kbd></summary>
|
||||
<br>
|
||||
我们是两个网球爱好者,想去观看 2026 年棕榈泉的网球比赛。我住在旧金山——请准备一个详细的行程,包括航班、酒店、为期 3 天的活动安排——围绕半决赛/决赛的时间。我们喜欢徒步、素食和 Spa。预算为 5,000 美元。行程应是一个详细的时间表,包括时间、活动、费用、其他细节,以及购买门票/预订的链接(如适用)。完成后,请生成一份关于此次旅行的 HTML 报告;编写此计划的摘要,并将文本摘要和报告 HTML 链接发送到 Slack #tennis-trip-sf 频道。
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/palm_springs_tennis_trip_itinerary_with_slack_summary.mp4" type="video/mp4">
|
||||
您的浏览器不支持视频标签。
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 2. 从 CSV 银行数据生成 Q2 报告 [回放 ▶️](https://www.eigent.ai/download?share_token=IjE3NTM1MjY4OTE4MDgtODczOSI.aIjJmQ.WTdoX9mATwrcBr_w53BmGEHPo8U__1753526891808-8739)
|
||||
|
||||
<details>
|
||||
<summary><strong>提示:</strong> <kbd>请根据我桌面上的银行转账记录文件 bank_transacation.csv... <kbd></summary>
|
||||
<br>
|
||||
请根据我桌面上的银行转账记录文件 bank_transacation.csv,帮我准备一份 Q2 财务报表,生成带图表的 HTML 报告,向投资者展示我们的支出情况。
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/generate_q2_report_from_csv_bank_data.mp4" type="video/mp4">
|
||||
您的浏览器不支持视频标签。
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 3. 英国医疗市场调研报告自动化 [回放 ▶️](https://www.eigent.ai/download?share_token=IjE3NTMzOTM1NTg3OTctODcwNyI.aIey-Q.Jh9QXzYrRYarY0kz_qsgoj3ewX0__1753393558797-8707)
|
||||
|
||||
<details>
|
||||
<summary><strong>提示:</strong> <kbd>分析英国医疗保健行业以支持我下一家公司的规划... <kbd></summary>
|
||||
<br>
|
||||
分析英国医疗保健行业以支持我下一家公司的规划。提供全面的市场概览,包括当前趋势、增长预测和相关法规。识别市场中5-10个主要机会、缺口或服务不足的细分领域。将所有发现整理成结构清晰、专业的HTML报告。完成后,向Slack的#eigentr-product-test频道发送消息,以便与团队成员对齐报告内容。。
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/uk_healthcare_market_research_report_automation.mp4" type="video/mp4">
|
||||
您的浏览器不支持视频标签。
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 4. 德国电动滑板市场可行性 [回放 ▶️](https://www.eigent.ai/download?share_token=IjE3NTM2NTI4MjY3ODctNjk2Ig.aIjGiA.t-qIXxk_BZ4ENqa-yVIm0wMVyXU__1753652826787-696)
|
||||
|
||||
<details>
|
||||
<summary><strong>提示:</strong> <kbd>我们是一家生产高端电动滑板的公司... <kbd></summary>
|
||||
<br>
|
||||
我们是一家生产高端电动滑板的公司,正在考虑进入德国市场。请为我准备一份详细的市场进入可行性报告。报告需涵盖以下方面:1. 市场规模与法规;2. 消费者画像;3. 渠道与分销;4. 成本与定价;5. 综合报告与演示。
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/german_electric_skateboard_market_feasibility.mp4" type="video/mp4">
|
||||
您的浏览器不支持视频标签。
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 5. 多智能体产品发布的 SEO 审计 [回放 ▶️](https://www.eigent.ai/download?share_token=IjE3NTM2OTk5NzExNDQtNTY5NiI.aIex0w.jc_NIPmfIf9e3zGt-oG9fbMi3K4__1753699971144-5696)
|
||||
|
||||
<details>
|
||||
<summary><strong>提示:</strong> <kbd>为了支持我们新的多智能体产品发布... <kbd></summary>
|
||||
<br>
|
||||
为了支持我们新的多智能体产品发布,请对我们的官方网站 (https://www.camel-ai.org/) 进行全面的 SEO 审计,并提供带有可操作建议的详细优化报告。
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/seo_audit_for_workforce_multiagent_launch.mp4" type="video/mp4">
|
||||
您的浏览器不支持视频标签。
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
### 6. 识别下载文件夹中的重复文件 [回放 ▶️](https://www.eigent.ai/download?share_token=IjE3NTM3NjAzODgxNzEtMjQ4Ig.aIhKLQ.epOG--0Nj0o4Bqjtdqm9OZdaqRQ__1753760388171-248)
|
||||
|
||||
<details>
|
||||
<summary><strong>提示:</strong> <kbd>我的 Documents 目录中有一个名为 mydocs 的文件夹... <kbd></summary>
|
||||
<br>
|
||||
我的 Documents 目录中有一个名为 mydocs 的文件夹。请扫描并识别所有完全或近似重复的文件——包括内容相同、文件大小或格式相同的文件(即使文件名或扩展名不同)。清晰列出它们,按相似性分组。
|
||||
</details>
|
||||
|
||||
<br>
|
||||
|
||||
<video width="100%" controls>
|
||||
<source src="public/images/identify_duplicate_files_in_downloads.mp4" type="video/mp4">
|
||||
您的浏览器不支持视频标签。
|
||||
</video>
|
||||
|
||||
<br>
|
||||
|
||||
[![][download-shield]][eigent-download]
|
||||
|
||||
## 🛠️ 技术栈
|
||||
|
||||
### 后端
|
||||
- **框架:** FastAPI
|
||||
- **包管理器:** uv
|
||||
- **异步服务器:** Uvicorn
|
||||
- **认证:** OAuth 2.0, Passlib
|
||||
- **多智能体框架:** CAMEL
|
||||
|
||||
### 前端
|
||||
- **框架:** React
|
||||
- **桌面应用框架:** Electron
|
||||
- **语言:** TypeScript
|
||||
- **UI:** Tailwind CSS, Radix UI, Lucide React, Framer Motion
|
||||
- **状态管理:** Zustand
|
||||
- **流程编辑器:** React Flow
|
||||
|
||||
## 🌟 保持领先
|
||||
|
||||
> \[!重要]
|
||||
>
|
||||
> **给 Eigent 加星标**,您将通过 GitHub 及时收到所有发布通知 ⭐️
|
||||
|
||||
![][image-star-us]
|
||||
|
||||
## 🗺️ 路线图
|
||||
|
||||
| 主题 | 问题 | Discord 频道 |
|
||||
| ------------------------ | -- |-- |
|
||||
| **上下文工程** | - 提示缓存<br> - 系统提示优化<br> - 工具包文档优化<br> - 上下文压缩 | [**加入 Discord →**](https://discord.gg/D2e3rBWD) |
|
||||
| **多模态增强** | - 使用浏览器时更准确的图像理解<br> - 高级视频生成 | [**加入 Discord →**](https://discord.gg/kyapNCeJ) |
|
||||
| **多智能体系统** | - 工作流支持固定流程<br> - 工作流支持多轮对话 | [**加入 Discord →**](https://discord.gg/bFRmPuDB) |
|
||||
| **浏览器工具包** | - BrowseCamp 集成<br> - 基准测试改进<br> - 禁止重复访问页面<br> - 自动缓存按钮点击 | [**加入 Discord →**](https://discord.gg/NF73ze5v) |
|
||||
| **文档工具包** | - 支持动态文件编辑 | [**加入 Discord →**](https://discord.gg/4yAWJxYr) |
|
||||
| **终端工具包** | - 基准测试改进<br> - Terminal-Bench 集成 | [**加入 Discord →**](https://discord.gg/FjQfnsrV) |
|
||||
| **环境与强化学习** | - 环境设计<br> - 数据生成<br> - 强化学习框架集成(VERL, TRL, OpenRLHF) | [**加入 Discord →**](https://discord.gg/MaVZXEn8) |
|
||||
|
||||
## [🤝 贡献][contribution-link]
|
||||
|
||||
我们相信通过开源协作建立信任。您的创意贡献将推动 `Eigent` 的创新。探索我们的 GitHub 问题与项目,加入我们 🤝❤️ [贡献指南][contribution-link]
|
||||
|
||||
## [❤️ 赞助][sponsor-link]
|
||||
|
||||
Eigent 基于 [CAMEL-AI.org][camel-ai-org-github] 的研究和基础设施构建。[赞助 CAMEL-AI.org][sponsor-link] 将使 `Eigent` 变得更好。
|
||||
|
||||
## **📄 开源许可证**
|
||||
|
||||
本仓库采用 [**Eigent 开源许可证**](LICENSE),基于 Apache 2.0 并附加额外条款。
|
||||
|
||||
## 🌐 社区与联系
|
||||
更多信息请联系 info@eigent.ai
|
||||
|
||||
- **GitHub Issues:** 报告错误、请求功能并跟踪开发进度。[提交问题][github-issue-link]
|
||||
|
||||
- **Discord:** 获取实时支持、与社区交流并保持更新。[加入我们](https://discord.camel-ai.org/)
|
||||
|
||||
- **X (Twitter):** 关注更新、AI 见解和重要公告。[关注我们][social-x-link]
|
||||
|
||||
- **微信社区:** 扫描下方二维码加入我们的微信社区。
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
<!-- LINK GROUP -->
|
||||
<!-- Social -->
|
||||
[discord-url]: https://discord.camel-ai.org/
|
||||
[discord-image]: https://img.shields.io/discord/1082486657678311454?logo=discord&labelColor=%20%235462eb&logoColor=%20%23f5f5f5&color=%20%235462eb
|
||||
|
||||
[built-with-camel]:https://img.shields.io/badge/-Built--with--CAMEL-4C19E8.svg?logo=data:image/svg+xml;base64,PHN2ZyB3aWR0aD0iMjQ4IiBoZWlnaHQ9IjI3MiIgdmlld0JveD0iMCAwIDI0OCAyNzIiIGZpbGw9Im5vbmUiIHhtbG5zPSJodHRwOi8vd3d3LnczLm9yZy8yMDAwL3N2ZyI+CjxwYXRoIGQ9Ik04LjgzMTE3IDE4LjU4NjVMMCAzMC44MjY3QzUuNDY2OTIgMzUuMDQzMiAxNS4xMzkxIDM4LjgyNTggMjQuODExNCAzNi4yOTU5QzMwLjY5ODggNDAuOTM0MSAzOS42NzAyIDQwLjIzMTMgNDQuMTU1OSA0MC4wOTA4QzQzLjQ1NSA0Ny4zOTk0IDQyLjQ3MzcgNzAuOTU1OCA0NC4xNTU5IDEwNi43MTJDNDUuODM4IDE0Mi40NjggNzEuNzcwOCAxNjYuODY4IDg0LjUyNjkgMTc0LjU5OEw3Ni4wMDAyIDIyMEw4NC41MjY5IDI3MkgxMDguOTE4TDk4LjAwMDIgMjIwTDEwOC45MTggMTc0LjU5OEwxMjkuOTQ0IDI3MkgxNTQuNzU2TDEzNC4xNSAxNzQuNTk4SDE4Ny4xMzdMMTY2LjUzMSAyNzJIMTkxLjc2M0wyMTIuMzY5IDE3NC41OThMMjI2IDIyMEwyMTIuMzY5IDI3MkgyMzcuNjAxTDI0OC4wMDEgMjIwTDIzNy4xOCAxNzQuNTk4QzIzOS4yODMgMTY5LjExNyAyNDAuNDAxIDE2Ni45NzYgMjQxLjgwNiAxNjEuMTA1QzI0OS4zNzUgMTI5LjQ4MSAyMzUuMDc3IDEwMy45MDEgMjI2LjY2NyA5NC40ODRMMjA2LjQ4MSA3My44MjNDMTk3LjY1IDY0Ljk2ODMgMTgyLjUxMSA2NC41NDY3IDE3Mi44MzkgNzIuNTU4MUMxNjUuNzI4IDc4LjQ0NzcgMTYxLjcwMSA3OC43NzI3IDE1NC43NTYgNzIuNTU4MUMxNTEuODEyIDcwLjAyODEgMTQ0LjUzNSA2MS40ODg5IDEzNC45OTEgNTMuNTgzN0MxMjUuMzE5IDQ1LjU3MjMgMTA4LjQ5NyA0OC45NDU1IDEwMi4xODkgNTUuNjkxOUw3My41OTMxIDg0LjM2NDRWNy42MjM0OUw3OS4xMjczIDBDNjAuOTA0MiAzLjY1NDMzIDIzLjgwMjEgOS41NjMwOSAxOS43NjUgMTAuNTc1MUMxNS43Mjc5IDExLjU4NyAxMC43OTM3IDE2LjMzNzcgOC44MzExNyAxOC41ODY1WiIgZmlsbD0id2hpdGUiLz4KPHBhdGggZD0iTTQzLjIwMzggMTguNzE4N0w0OS4wOTEyIDEzLjA0OTNMNTQuOTc4NyAxOC43MTg3TDQ5LjA5MTIgMjQuODI0Mkw0My4yMDM4IDE4LjcxODdaIiBmaWxsPSIjNEMxOUU4Ii8+Cjwvc3ZnPgo=
|
||||
|
||||
[eigent-github]: https://github.com/eigent-ai/eigent
|
||||
[github-star]: https://img.shields.io/github/stars/eigent-ai?color=F5F4F0&labelColor=gray&style=plastic&logo=github
|
||||
[camel-ai-org-github]: https://github.com/camel-ai
|
||||
|
||||
[camel-github]: https://github.com/camel-ai/camel
|
||||
[eigent-github]: https://github.com/eigent-ai/eigent
|
||||
[contribution-link]: https:/github.com/eigent-ai/eigent/blob/master/CONTRIBUTING.md
|
||||
|
||||
[social-x-link]: https://x.com/Eigent_AI
|
||||
[social-x-shield]: https://img.shields.io/badge/-%40Eigent_AI-white?labelColor=gray&logo=x&logoColor=white&style=plastic
|
||||
|
||||
[reddit-url]: https://www.reddit.com/r/CamelAI/
|
||||
[reddit-image]: https://img.shields.io/reddit/subreddit-subscribers/CamelAI?style=plastic&logo=reddit&label=r%2FCAMEL&labelColor=white
|
||||
|
||||
[wechat-url]: https://ghli.org/camel/wechat.png
|
||||
[wechat-image]: https://img.shields.io/badge/WeChat-CamelAIOrg-brightgreen?logo=wechat&logoColor=white
|
||||
|
||||
[sponsor-link]: https://github.com/sponsors/camel-ai
|
||||
[sponsor-shield]: https://img.shields.io/badge/-Sponsor%20CAMEL--AI-1d1d1d?logo=github&logoColor=white&style=plastic
|
||||
|
||||
[eigent-download]: https://www.eigent.ai/download
|
||||
[download-shield]: https://img.shields.io/badge/Download%20Eigent-363AF5?style=plastic
|
||||
|
||||
<!-- camel & eigent -->
|
||||
[camel-site]: https://www.camel-ai.org
|
||||
[eigent-site]: https://www.eigent.ai
|
||||
[docs-site]: https://docs.eigent.ai
|
||||
[github-issue-link]: https://github.com/eigent-ai/eigent/issues
|
||||
|
||||
<!-- marketing -->
|
||||
[image-seperator]: https://eigent-ai.github.io/.github/assets/seperator.png
|
||||
[image-head]: https://eigent-ai.github.io/.github/assets/head.png
|
||||
[image-public-beta]: https://eigent-ai.github.io/.github/assets/banner.png
|
||||
[image-star-us]: https://eigent-ai.github.io/.github/assets/star-us.gif
|
||||
[image-opensource]: https://eigent-ai.github.io/.github/assets/opensource.png
|
||||
[image-wechat]: https://eigent-ai.github.io/.github/assets/wechat.png
|
||||
|
||||
<!-- feature -->
|
||||
[image-workforce]: https://eigent-ai.github.io/.github/assets/feature_dynamic_workforce.gif
|
||||
[image-human-in-the-loop]: https://eigent-ai.github.io/.github/assets/feature_human_in_the_loop.gif
|
||||
[image-customise-workers]: https://eigent-ai.github.io/.github/assets/feature_customise_workers.gif
|
||||
[image-add-mcps]: https://eigent-ai.github.io/.github/assets/feature_add_mcps.gif
|
||||
[image-local-model]: https://eigent-ai.github.io/.github/assets/feature_local_model.gif
|
||||
21
backend/.gitignore
vendored
Normal file
21
backend/.gitignore
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
# Python-generated files
|
||||
__pycache__/
|
||||
*.py[oc]
|
||||
build/
|
||||
dist/
|
||||
wheels/
|
||||
*.egg-info
|
||||
*.mo
|
||||
test.py
|
||||
|
||||
# Virtual environments
|
||||
.venv
|
||||
.env
|
||||
|
||||
runtime
|
||||
tmp
|
||||
img
|
||||
|
||||
|
||||
uv_installing.lock
|
||||
uv_installed.lock
|
||||
1
backend/.python-version
Normal file
1
backend/.python-version
Normal file
@ -0,0 +1 @@
|
||||
3.10.16
|
||||
10
backend/.vscode/settings.json
vendored
Normal file
10
backend/.vscode/settings.json
vendored
Normal file
@ -0,0 +1,10 @@
|
||||
{
|
||||
"cSpell.words": [
|
||||
"astep",
|
||||
"dotenv",
|
||||
"duckduckgo",
|
||||
"eigent",
|
||||
"extendleft",
|
||||
"toolkits"
|
||||
]
|
||||
}
|
||||
23
backend/README.md
Normal file
23
backend/README.md
Normal file
@ -0,0 +1,23 @@
|
||||
```bash
|
||||
uv run uvicorn main:api --port 5001
|
||||
```
|
||||
|
||||
i18n operation process: https://github.com/Anbarryprojects/fastapi-babel
|
||||
|
||||
```bash
|
||||
|
||||
pybabel extract -F babel.cfg -o messages.pot . --ignore-pot-creation-date # Extract multilingual strings from code to messages.pot file
|
||||
pybabel init -i messages.pot -d lang -l zh_CN # Generate Chinese language pack, can only be generated initially, subsequent execution will cause overwrite
|
||||
pybabel compile -d lang -l zh_CN # Compile language pack
|
||||
|
||||
|
||||
pybabel update -i messages.pot -d lang
|
||||
# -i messages.pot: Specify the input file as the generated .pot file
|
||||
# -d translations: Specify the translation directory, which typically contains .po files for each language
|
||||
# -l zh: Specify the language code
|
||||
```
|
||||
|
||||
```bash
|
||||
# regular search
|
||||
\berror\b(?!\])
|
||||
```
|
||||
8
backend/app/__init__.py
Normal file
8
backend/app/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
from fastapi import FastAPI
|
||||
from fastapi.middleware.cors import CORSMiddleware
|
||||
|
||||
|
||||
api = FastAPI()
|
||||
api.add_middleware(
|
||||
CORSMiddleware, allow_origins=["*"], allow_credentials=True, allow_methods=["*"], allow_headers=["*"]
|
||||
)
|
||||
5
backend/app/command/__init__.py
Normal file
5
backend/app/command/__init__.py
Normal file
@ -0,0 +1,5 @@
|
||||
import click
|
||||
|
||||
|
||||
@click.group()
|
||||
def cli(): ...
|
||||
10
backend/app/component/babel.py
Normal file
10
backend/app/component/babel.py
Normal file
@ -0,0 +1,10 @@
|
||||
from fastapi_babel import BabelConfigs, Babel
|
||||
from pathlib import Path
|
||||
|
||||
babel_configs = BabelConfigs(
|
||||
ROOT_DIR=Path(__file__).parent.parent,
|
||||
BABEL_DEFAULT_LOCALE="en_US",
|
||||
BABEL_TRANSLATION_DIRECTORY="lang",
|
||||
)
|
||||
|
||||
babel = Babel(configs=babel_configs)
|
||||
16
backend/app/component/code.py
Normal file
16
backend/app/component/code.py
Normal file
@ -0,0 +1,16 @@
|
||||
success = 0 # success
|
||||
error = 1 # common error
|
||||
not_found = 4 # can't found route or resource
|
||||
|
||||
password = 10 # acount password error
|
||||
token_need = 11 # token need
|
||||
token_expired = 12 # token expired
|
||||
token_invalid = 13 # token invalid
|
||||
token_blocked = 14 # token in block list
|
||||
|
||||
|
||||
form_error = 100 # form error
|
||||
|
||||
no_permission_error = 300 # no permission
|
||||
|
||||
program_error = 500 # program error
|
||||
9
backend/app/component/command.py
Normal file
9
backend/app/component/command.py
Normal file
@ -0,0 +1,9 @@
|
||||
import os
|
||||
|
||||
|
||||
def bun():
|
||||
return os.path.expanduser("~/.eigent/bin/bun")
|
||||
|
||||
|
||||
def uv():
|
||||
return os.path.expanduser("~/.eigent/bin/uv")
|
||||
15
backend/app/component/debug.py
Normal file
15
backend/app/component/debug.py
Normal file
@ -0,0 +1,15 @@
|
||||
import inspect
|
||||
|
||||
|
||||
def dump_class(obj, max_val_len=1000):
|
||||
cls = obj.__class__
|
||||
print(f"Class: {cls.__name__}")
|
||||
print("Attributes:")
|
||||
for name, val in vars(obj).items():
|
||||
val_str = repr(val)
|
||||
if len(val_str) > max_val_len:
|
||||
val_str = val_str[:max_val_len] + "... [truncated]"
|
||||
print(f" {name} = {val_str}")
|
||||
# print("Methods:")
|
||||
# for name, method in inspect.getmembers(cls, predicate=inspect.isfunction):
|
||||
# print(f" {name}()")
|
||||
11
backend/app/component/encrypt.py
Normal file
11
backend/app/component/encrypt.py
Normal file
@ -0,0 +1,11 @@
|
||||
from passlib.context import CryptContext
|
||||
|
||||
password = CryptContext(schemes=["bcrypt"], deprecated="auto")
|
||||
|
||||
|
||||
def password_hash(password_value: str):
|
||||
return password.hash(password_value)
|
||||
|
||||
|
||||
def password_verify(password_value: str, password_hash: str):
|
||||
return password.verify(password_value, password_hash)
|
||||
98
backend/app/component/environment.py
Normal file
98
backend/app/component/environment.py
Normal file
@ -0,0 +1,98 @@
|
||||
import importlib.util
|
||||
import os
|
||||
from pathlib import Path
|
||||
from fastapi import APIRouter, FastAPI
|
||||
from dotenv import load_dotenv
|
||||
import importlib
|
||||
from typing import Any, overload
|
||||
|
||||
|
||||
env_path = os.path.join(os.path.expanduser("~"), ".eigent", ".env")
|
||||
load_dotenv(dotenv_path=env_path)
|
||||
|
||||
|
||||
@overload
|
||||
def env(key: str) -> str | None: ...
|
||||
|
||||
|
||||
@overload
|
||||
def env(key: str, default: str) -> str: ...
|
||||
|
||||
|
||||
@overload
|
||||
def env(key: str, default: Any) -> Any: ...
|
||||
|
||||
|
||||
def env(key: str, default=None):
|
||||
return os.getenv(key, default)
|
||||
|
||||
|
||||
def env_or_fail(key: str):
|
||||
value = env(key)
|
||||
if value is None:
|
||||
raise Exception("can't get env config value.")
|
||||
return value
|
||||
|
||||
|
||||
def env_not_empty(key: str):
|
||||
value = env(key)
|
||||
if not value:
|
||||
raise Exception("env config value can't be empty.")
|
||||
return value
|
||||
|
||||
|
||||
def base_path():
|
||||
return Path(__file__).parent.parent.parent
|
||||
|
||||
|
||||
def to_path(path: str):
|
||||
return base_path() / path
|
||||
|
||||
|
||||
def auto_import(package: str):
|
||||
"""
|
||||
Automatically import all Python files in the specified directory
|
||||
"""
|
||||
# Get all file names in the folder
|
||||
folder = package.replace(".", "/")
|
||||
files = os.listdir(folder)
|
||||
|
||||
# Import all .py files in the folder
|
||||
for file in files:
|
||||
if file.endswith(".py") and not file.startswith("__"):
|
||||
module_name = file[:-3] # Remove the .py extension from filename
|
||||
importlib.import_module(package + "." + module_name)
|
||||
|
||||
|
||||
def auto_include_routers(api: FastAPI, prefix: str, directory: str):
|
||||
"""
|
||||
Automatically scan all modules in the specified directory and register routes
|
||||
|
||||
:param api: FastAPI instance
|
||||
:param prefix: Route prefix
|
||||
:param directory: Directory path to scan
|
||||
"""
|
||||
# Convert directory to absolute path
|
||||
dir_path = Path(directory).resolve()
|
||||
|
||||
# Traverse all .py files in the directory
|
||||
for root, _, files in os.walk(dir_path):
|
||||
for file_name in files:
|
||||
if file_name.endswith("_controller.py") and not file_name.startswith("__"):
|
||||
# Construct complete file path
|
||||
file_path = Path(root) / file_name
|
||||
|
||||
# Generate module name
|
||||
module_name = file_path.stem
|
||||
|
||||
# Load module using importlib
|
||||
spec = importlib.util.spec_from_file_location(module_name, file_path)
|
||||
if spec is None or spec.loader is None:
|
||||
continue
|
||||
module = importlib.util.module_from_spec(spec)
|
||||
spec.loader.exec_module(module)
|
||||
|
||||
# Check if router attribute exists in module and is an APIRouter instance
|
||||
router = getattr(module, "router", None)
|
||||
if isinstance(router, APIRouter):
|
||||
api.include_router(router, prefix=prefix)
|
||||
41
backend/app/component/model_validation.py
Normal file
41
backend/app/component/model_validation.py
Normal file
@ -0,0 +1,41 @@
|
||||
from camel.agents import ChatAgent
|
||||
from camel.models import ModelFactory
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
|
||||
|
||||
def get_website_content(url: str) -> str:
|
||||
r"""Gets the content of a website.
|
||||
|
||||
Args:
|
||||
url (str): The URL of the website.
|
||||
|
||||
Returns:
|
||||
str: The content of the website.
|
||||
"""
|
||||
return "Welcome to CAMEL AI!"
|
||||
|
||||
|
||||
def create_agent(
|
||||
model_platform: str, model_type: str, api_key: str = None, url: str = None, model_config_dict: dict = None, **kwargs
|
||||
) -> ChatAgent:
|
||||
platform = model_platform
|
||||
mtype = model_type
|
||||
if mtype is None:
|
||||
raise ValueError(f"Invalid model_type: {model_type}")
|
||||
if platform is None:
|
||||
raise ValueError(f"Invalid model_platform: {model_platform}")
|
||||
model = ModelFactory.create(
|
||||
model_platform=platform,
|
||||
model_type=mtype,
|
||||
api_key=api_key,
|
||||
url=url,
|
||||
timeout=10,
|
||||
model_config_dict=model_config_dict,
|
||||
**kwargs,
|
||||
)
|
||||
agent = ChatAgent(
|
||||
system_message="You are a helpful assistant that must use the tool get_website_content to get the content of a website.",
|
||||
model=model,
|
||||
tools=[get_website_content],
|
||||
)
|
||||
return agent
|
||||
58
backend/app/component/pydantic/i18n.py
Normal file
58
backend/app/component/pydantic/i18n.py
Normal file
@ -0,0 +1,58 @@
|
||||
from pathlib import Path
|
||||
from app.component.babel import babel_configs, babel
|
||||
import re, os
|
||||
from fastapi_babel.middleware import Babel, LANGUAGES_PATTERN
|
||||
from pydantic_i18n import JsonLoader, PydanticI18n
|
||||
|
||||
|
||||
def get_language(lang_code: str | None = None):
|
||||
"""Ported from fastapi_babel.middleware.BabelMiddleware.get_language
|
||||
Applies an available language.
|
||||
|
||||
To apply an available language it will be searched in the language folder for an available one
|
||||
and will also priotize the one with the highest quality value. The Fallback language will be the
|
||||
taken from the BABEL_DEFAULT_LOCALE var.
|
||||
|
||||
Args:
|
||||
babel (Babel): Request scoped Babel instance
|
||||
lang_code (str): The Value of the Accept-Language Header.
|
||||
|
||||
Returns:
|
||||
str: The language that should be used.
|
||||
"""
|
||||
|
||||
if not lang_code:
|
||||
return babel.config.BABEL_DEFAULT_LOCALE
|
||||
|
||||
matches = re.finditer(LANGUAGES_PATTERN, lang_code)
|
||||
languages = [
|
||||
(f"{m.group(1)}{f'_{m.group(2)}' if m.group(2) else ''}", m.group(3) or "")
|
||||
for m in matches
|
||||
]
|
||||
languages = sorted(
|
||||
languages, key=lambda x: x[1], reverse=True
|
||||
) # sort the priority, no priority comes last
|
||||
translation_directory = Path(babel.config.BABEL_TRANSLATION_DIRECTORY)
|
||||
translation_files = [i.name for i in translation_directory.iterdir()]
|
||||
explicit_priority = None
|
||||
|
||||
for lang, quality in languages:
|
||||
if lang in translation_files:
|
||||
if (
|
||||
not quality
|
||||
): # languages without quality value having the highest priority 1
|
||||
return lang
|
||||
|
||||
elif (
|
||||
not explicit_priority
|
||||
): # set language with explicit priority <= priority 1
|
||||
explicit_priority = lang
|
||||
|
||||
# Return language with explicit priority or default value
|
||||
return (
|
||||
explicit_priority if explicit_priority else babel_configs.BABEL_DEFAULT_LOCALE
|
||||
)
|
||||
|
||||
|
||||
loader = JsonLoader(os.path.dirname(__file__) + "/translations")
|
||||
trans = PydanticI18n(loader)
|
||||
98
backend/app/component/pydantic/translations/en_US.json
Normal file
98
backend/app/component/pydantic/translations/en_US.json
Normal file
@ -0,0 +1,98 @@
|
||||
{
|
||||
"Object has no attribute '{}'": "Object has no attribute '{}'",
|
||||
"Invalid JSON: {}": "Invalid JSON: {}",
|
||||
"JSON input should be string, bytes or bytearray": "JSON input should be string, bytes or bytearray",
|
||||
"Cannot check `{}` when validating from json, use a JsonOrPython validator instead": "Cannot check `{}` when validating from json, use a JsonOrPython validator instead",
|
||||
"Recursion error - cyclic reference detected": "Recursion error - cyclic reference detected",
|
||||
"Field required": "Field required",
|
||||
"Field is frozen": "Field is frozen",
|
||||
"Instance is frozen": "Instance is frozen",
|
||||
"Extra inputs are not permitted": "Extra inputs are not permitted",
|
||||
"Keys should be strings": "Keys should be strings",
|
||||
"Error extracting attribute: {}": "Error extracting attribute: {}",
|
||||
"Input should be a valid dictionary or instance of {}": "Input should be a valid dictionary or instance of {}",
|
||||
"Input should be a valid dictionary or object to extract fields from": "Input should be a valid dictionary or object to extract fields from",
|
||||
"Input should be a dictionary or an instance of {}": "Input should be a dictionary or an instance of {}",
|
||||
"Input should be an instance of {}": "Input should be an instance of {}",
|
||||
"Input should be None": "Input should be None",
|
||||
"Input should be greater than {}": "Input should be greater than {}",
|
||||
"Input should be greater than or equal to {}": "Input should be greater than or equal to {}",
|
||||
"Input should be less than {}": "Input should be less than {}",
|
||||
"Input should be less than or equal to {}": "Input should be less than or equal to {}",
|
||||
"Input should be a multiple of {}": "Input should be a multiple of {}",
|
||||
"Input should be a finite number": "Input should be a finite number",
|
||||
"Input should be iterable": "Input should be iterable",
|
||||
"Error iterating over object, error: {}": "Error iterating over object, error: {}",
|
||||
"Input should be a valid string": "Input should be a valid string",
|
||||
"Input should be a string, not an instance of a subclass of str": "Input should be a string, not an instance of a subclass of str",
|
||||
"Input should be a valid string, unable to parse raw data as a unicode string": "Input should be a valid string, unable to parse raw data as a unicode string",
|
||||
"String should have at least {}": "String should have at least {}",
|
||||
"String should have at most {}": "String should have at most {}",
|
||||
"String should match pattern '{}'": "String should match pattern '{}'",
|
||||
"Input should be {}": "Input should be {}",
|
||||
"Input should be a valid dictionary": "Input should be a valid dictionary",
|
||||
"Input should be a valid mapping, error: {}": "Input should be a valid mapping, error: {}",
|
||||
"Input should be a valid list": "Input should be a valid list",
|
||||
"Input should be a valid tuple": "Input should be a valid tuple",
|
||||
"Input should be a valid set": "Input should be a valid set",
|
||||
"Input should be a valid boolean": "Input should be a valid boolean",
|
||||
"Input should be a valid boolean, unable to interpret input": "Input should be a valid boolean, unable to interpret input",
|
||||
"Input should be a valid integer": "Input should be a valid integer",
|
||||
"Input should be a valid integer, unable to parse string as an integer": "Input should be a valid integer, unable to parse string as an integer",
|
||||
"Unable to parse input string as an integer, exceeded maximum size": "Unable to parse input string as an integer, exceeded maximum size",
|
||||
"Input should be a valid integer, got a number with a fractional part": "Input should be a valid integer, got a number with a fractional part",
|
||||
"Input should be a valid number": "Input should be a valid number",
|
||||
"Input should be a valid number, unable to parse string as a number": "Input should be a valid number, unable to parse string as a number",
|
||||
"Input should be a valid bytes": "Input should be a valid bytes",
|
||||
"Data should have at least {}": "Data should have at least {}",
|
||||
"Data should have at most {}": "Data should have at most {}",
|
||||
"Data should be valid {}": "Data should be valid {}",
|
||||
"Value error, {}": "Value error, {}",
|
||||
"Assertion failed, {}": "Assertion failed, {}",
|
||||
"Input should be a valid date": "Input should be a valid date",
|
||||
"Input should be a valid date in the format YYYY-MM-DD, {}": "Input should be a valid date in the format YYYY-MM-DD, {}",
|
||||
"Input should be a valid date or datetime, {}": "Input should be a valid date or datetime, {}",
|
||||
"Datetimes provided to dates should have zero time - e.g. be exact dates": "Datetimes provided to dates should have zero time - e.g. be exact dates",
|
||||
"Date should be in the past": "Date should be in the past",
|
||||
"Date should be in the future": "Date should be in the future",
|
||||
"Input should be a valid time": "Input should be a valid time",
|
||||
"Input should be in a valid time format, {}": "Input should be in a valid time format, {}",
|
||||
"Input should be a valid datetime": "Input should be a valid datetime",
|
||||
"Input should be a valid datetime, {}": "Input should be a valid datetime, {}",
|
||||
"Invalid datetime object, got {}": "Invalid datetime object, got {}",
|
||||
"Input should be a valid datetime or date, {}": "Input should be a valid datetime or date, {}",
|
||||
"Input should be in the past": "Input should be in the past",
|
||||
"Input should be in the future": "Input should be in the future",
|
||||
"Input should not have timezone info": "Input should not have timezone info",
|
||||
"Input should have timezone info": "Input should have timezone info",
|
||||
"Timezone offset of {}": "Timezone offset of {}",
|
||||
"Input should be a valid timedelta": "Input should be a valid timedelta",
|
||||
"Input should be a valid timedelta, {}": "Input should be a valid timedelta, {}",
|
||||
"Input should be a valid frozenset": "Input should be a valid frozenset",
|
||||
"Input should be a subclass of {}": "Input should be a subclass of {}",
|
||||
"Input should be callable": "Input should be callable",
|
||||
"Input tag '{}": "Input tag '{}",
|
||||
"Unable to extract tag using discriminator {}": "Unable to extract tag using discriminator {}",
|
||||
"Arguments must be a tuple, list or a dictionary": "Arguments must be a tuple, list or a dictionary",
|
||||
"Missing required argument": "Missing required argument",
|
||||
"Unexpected keyword argument": "Unexpected keyword argument",
|
||||
"Missing required keyword only argument": "Missing required keyword only argument",
|
||||
"Unexpected positional argument": "Unexpected positional argument",
|
||||
"Missing required positional only argument": "Missing required positional only argument",
|
||||
"Got multiple values for argument": "Got multiple values for argument",
|
||||
"URL input should be a string or URL": "URL input should be a string or URL",
|
||||
"Input should be a valid URL, {}": "Input should be a valid URL, {}",
|
||||
"Input violated strict URL syntax rules, {}": "Input violated strict URL syntax rules, {}",
|
||||
"URL should have at most {}": "URL should have at most {}",
|
||||
"URL scheme should be {}": "URL scheme should be {}",
|
||||
"UUID input should be a string, bytes or UUID object": "UUID input should be a string, bytes or UUID object",
|
||||
"Input should be a valid UUID, {}": "Input should be a valid UUID, {}",
|
||||
"UUID version {} expected": "UUID version {} expected",
|
||||
"Decimal input should be an integer, float, string or Decimal object": "Decimal input should be an integer, float, string or Decimal object",
|
||||
"Input should be a valid decimal": "Input should be a valid decimal",
|
||||
"Decimal input should have no more than {} in total": "Decimal input should have no more than {} in total",
|
||||
"Decimal input should have no more than {}": "Decimal input should have no more than {}",
|
||||
"Decimal input should have no more than {} before the decimal point": "Decimal input should have no more than {} before the decimal point",
|
||||
"Input should be a valid python complex object, a number, or a valid complex string following the rules at https://docs.python.org/3/library/functions.html#complex": "Input should be a valid python complex object, a number, or a valid complex string following the rules at https://docs.python.org/3/library/functions.html#complex",
|
||||
"Input should be a valid complex string following the rules at https://docs.python.org/3/library/functions.html#complex": "Input should be a valid complex string following the rules at https://docs.python.org/3/library/functions.html#complex"
|
||||
}
|
||||
98
backend/app/component/pydantic/translations/zh_CN.json
Normal file
98
backend/app/component/pydantic/translations/zh_CN.json
Normal file
@ -0,0 +1,98 @@
|
||||
{
|
||||
"Object has no attribute '{}'": "对象没有属性'{}'",
|
||||
"Invalid JSON: {}": "无效的 JSON:{}",
|
||||
"JSON input should be string, bytes or bytearray": "JSON 输入应为字符串、字节或字节数组",
|
||||
"Cannot check `{}` when validating from json, use a JsonOrPython validator instead": "在从 JSON 验证时,无法检查`{}`,请改用 JsonOrPython 验证器",
|
||||
"Recursion error - cyclic reference detected": "递归错误 - 检测到循环引用",
|
||||
"Field required": "字段必填",
|
||||
"Field is frozen": "字段已冻结",
|
||||
"Instance is frozen": "实例已冻结",
|
||||
"Extra inputs are not permitted": "不允许额外输入",
|
||||
"Keys should be strings": "键应为字符串",
|
||||
"Error extracting attribute: {}": "提取属性时出错:{}",
|
||||
"Input should be a valid dictionary or instance of {}": "输入应为有效的字典或{}的实例",
|
||||
"Input should be a valid dictionary or object to extract fields from": "输入应为有效的字典或可用于提取字段的对象",
|
||||
"Input should be a dictionary or an instance of {}": "输入应为字典或{}的实例",
|
||||
"Input should be an instance of {}": "输入应为{}的实例",
|
||||
"Input should be None": "输入应为 None",
|
||||
"Input should be greater than {}": "输入应大于{}",
|
||||
"Input should be greater than or equal to {}": "输入应大于或等于{}",
|
||||
"Input should be less than {}": "输入应小于{}",
|
||||
"Input should be less than or equal to {}": "输入应小于或等于{}",
|
||||
"Input should be a multiple of {}": "输入应为{}的倍数",
|
||||
"Input should be a finite number": "输入应为有限数字",
|
||||
"Input should be iterable": "输入应为可迭代对象",
|
||||
"Error iterating over object, error: {}": "迭代对象时出错,错误:{}",
|
||||
"Input should be a valid string": "输入应为有效字符串",
|
||||
"Input should be a string, not an instance of a subclass of str": "输入应为字符串,而不是 str 的子类实例",
|
||||
"Input should be a valid string, unable to parse raw data as a unicode string": "输入应为有效字符串,无法将原始数据解析为 Unicode 字符串",
|
||||
"String should have at least {}": "字符串应至少有{}",
|
||||
"String should have at most {}": "字符串应最多有{}",
|
||||
"String should match pattern '{}'": "字符串应匹配模式'{}'",
|
||||
"Input should be {}": "输入应为{}",
|
||||
"Input should be a valid dictionary": "输入应为有效的字典",
|
||||
"Input should be a valid mapping, error: {}": "输入应为有效的映射,错误:{}",
|
||||
"Input should be a valid list": "输入应为有效的列表",
|
||||
"Input should be a valid tuple": "输入应为有效的元组",
|
||||
"Input should be a valid set": "输入应为有效的集合",
|
||||
"Input should be a valid boolean": "输入应为有效的布尔值",
|
||||
"Input should be a valid boolean, unable to interpret input": "输入应为有效的布尔值,无法解析输入",
|
||||
"Input should be a valid integer": "输入应为有效的整数",
|
||||
"Input should be a valid integer, unable to parse string as an integer": "输入应为有效的整数,无法将字符串解析为整数",
|
||||
"Unable to parse input string as an integer, exceeded maximum size": "无法将输入字符串解析为整数,超出最大尺寸",
|
||||
"Input should be a valid integer, got a number with a fractional part": "输入应为有效的整数,但输入的数字有小数部分",
|
||||
"Input should be a valid number": "输入应为有效的数字",
|
||||
"Input should be a valid number, unable to parse string as a number": "输入应为有效的数字,无法将字符串解析为数字",
|
||||
"Input should be a valid bytes": "输入应为有效的字节",
|
||||
"Data should have at least {}": "数据应至少有{}",
|
||||
"Data should have at most {}": "数据应最多有{}",
|
||||
"Data should be valid {}": "数据应为有效的{}",
|
||||
"Value error, {}": "值错误,{}",
|
||||
"Assertion failed, {}": "断言失败,{}",
|
||||
"Input should be a valid date": "输入应为有效的日期",
|
||||
"Input should be a valid date in the format YYYY-MM-DD, {}": "输入应为有效的日期,格式为 YYYY-MM-DD,{}",
|
||||
"Input should be a valid date or datetime, {}": "输入应为有效的日期或日期时间,{}",
|
||||
"Datetimes provided to dates should have zero time - e.g. be exact dates": "提供给日期的日期时间应为零时间,即为精确日期",
|
||||
"Date should be in the past": "日期应为过去的日期",
|
||||
"Date should be in the future": "日期应为未来的日期",
|
||||
"Input should be a valid time": "输入应为有效的时间",
|
||||
"Input should be in a valid time format, {}": "输入应为有效的时间格式,{}",
|
||||
"Input should be a valid datetime": "输入应为有效的日期时间",
|
||||
"Input should be a valid datetime, {}": "输入应为有效的日期时间,{}",
|
||||
"Invalid datetime object, got {}": "无效的日期时间对象,得到{}",
|
||||
"Input should be a valid datetime or date, {}": "输入应为有效的日期时间或日期,{}",
|
||||
"Input should be in the past": "输入应为过去的日期/时间",
|
||||
"Input should be in the future": "输入应为未来的日期/时间",
|
||||
"Input should not have timezone info": "输入不应包含时区信息",
|
||||
"Input should have timezone info": "输入应包含时区信息",
|
||||
"Timezone offset of {}": "时区偏移量为{}",
|
||||
"Input should be a valid timedelta": "输入应为有效的 timedelta",
|
||||
"Input should be a valid timedelta, {}": "输入应为有效的 timedelta,{}",
|
||||
"Input should be a valid frozenset": "输入应为有效的 frozenset",
|
||||
"Input should be a subclass of {}": "输入应为{}的子类",
|
||||
"Input should be callable": "输入应为可调用对象",
|
||||
"Input tag '{}": "输入标签'{}'",
|
||||
"Unable to extract tag using discriminator {}": "无法使用区分符{}提取标签",
|
||||
"Arguments must be a tuple, list or a dictionary": "参数必须是元组、列表或字典",
|
||||
"Missing required argument": "缺少必需的参数",
|
||||
"Unexpected keyword argument": "意外的关键字参数",
|
||||
"Missing required keyword only argument": "缺少必需的关键字参数",
|
||||
"Unexpected positional argument": "意外的位置参数",
|
||||
"Missing required positional only argument": "缺少必需的位置参数",
|
||||
"Got multiple values for argument": "为参数提供了多个值",
|
||||
"URL input should be a string or URL": "URL 输入应为字符串或 URL",
|
||||
"Input should be a valid URL, {}": "输入应为有效的 URL,{}",
|
||||
"Input violated strict URL syntax rules, {}": "输入违反了严格的 URL 语法规则,{}",
|
||||
"URL should have at most {}": "URL 应最多有{}",
|
||||
"URL scheme should be {}": "URL 的协议应为{}",
|
||||
"UUID input should be a string, bytes or UUID object": "UUID 输入应为字符串、字节或 UUID 对象",
|
||||
"Input should be a valid UUID, {}": "输入应为有效的 UUID,{}",
|
||||
"UUID version {} expected": "期望的 UUID 版本为{}",
|
||||
"Decimal input should be an integer, float, string or Decimal object": "十进制输入应为整数、浮点数、字符串或 Decimal 对象",
|
||||
"Input should be a valid decimal": "输入应为有效的十进制数",
|
||||
"Decimal input should have no more than {} in total": "十进制输入的总位数不应超过{}",
|
||||
"Decimal input should have no more than {}": "十进制输入不应超过{}",
|
||||
"Decimal input should have no more than {} before the decimal point": "十进制输入的小数点前不应超过{}位",
|
||||
"Input should be a valid python complex object, a number, or a valid complex string following the rules at https://docs.python.org/3/library/functions.html#complex": "输入应为有效的 Python 复杂对象、数字,或遵循 https://docs.python.org/3/library/functions.html#complex 规则的有效复杂字符串",
|
||||
"Input should be a valid complex string following the rules at https://docs.python.org/3/library/functions.html#complex": "输入应为遵循 https://docs.python.org/3/library/functions.html#complex 规则的有效复杂字符串"
|
||||
}
|
||||
0
backend/app/controller/__init__.py
Normal file
0
backend/app/controller/__init__.py
Normal file
86
backend/app/controller/chat_controller.py
Normal file
86
backend/app/controller/chat_controller.py
Normal file
@ -0,0 +1,86 @@
|
||||
import asyncio
|
||||
import os
|
||||
import re
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import APIRouter, Request, Response
|
||||
from fastapi.responses import StreamingResponse
|
||||
from loguru import logger
|
||||
from app.component import code
|
||||
from app.exception.exception import UserException
|
||||
from app.model.chat import Chat, HumanReply, McpServers, Status, SupplementChat
|
||||
from app.service.chat_service import step_solve
|
||||
from app.service.task import (
|
||||
Action,
|
||||
ActionImproveData,
|
||||
ActionInstallMcpData,
|
||||
ActionStopData,
|
||||
ActionSupplementData,
|
||||
get_task_lock,
|
||||
)
|
||||
|
||||
|
||||
router = APIRouter(tags=["chat"])
|
||||
|
||||
|
||||
@router.post("/chat", name="start chat")
|
||||
def post(data: Chat, request: Request):
|
||||
load_dotenv(dotenv_path=data.env_path)
|
||||
|
||||
logger.debug(f"start chat: {data.model_dump_json()}")
|
||||
|
||||
os.environ["file_save_path"] = data.file_save_path()
|
||||
os.environ["browser_port"] = str(data.browser_port)
|
||||
os.environ["OPENAI_API_KEY"] = data.api_key
|
||||
os.environ["OPENAI_API_BASE_URL"] = data.api_url or "https://api.openai.com/v1"
|
||||
os.environ["CAMEL_MODEL_LOG_ENABLED"] = "true"
|
||||
|
||||
email = re.sub(r'[\\/*?:"<>|\s]', "_", data.email.split("@")[0]).strip(".")
|
||||
camel_log = Path.home() / ".eigent" / email / ("task_" + data.task_id) / "camel_logs"
|
||||
camel_log.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
os.environ["CAMEL_LOG_DIR"] = str(camel_log)
|
||||
|
||||
if data.is_cloud():
|
||||
os.environ["cloud_api_key"] = data.api_key
|
||||
return StreamingResponse(step_solve(data, request), media_type="text/event-stream")
|
||||
|
||||
|
||||
@router.post("/chat/{id}", name="improve chat")
|
||||
def improve(id: str, data: SupplementChat):
|
||||
task_lock = get_task_lock(id)
|
||||
if task_lock.status == Status.done:
|
||||
raise UserException(code.error, "Task was done")
|
||||
asyncio.run(task_lock.put_queue(ActionImproveData(data=data.question)))
|
||||
return Response(status_code=201)
|
||||
|
||||
|
||||
@router.put("/chat/{id}", name="supplement task")
|
||||
def supplement(id: str, data: SupplementChat):
|
||||
task_lock = get_task_lock(id)
|
||||
if task_lock.status != Status.done:
|
||||
raise UserException(code.error, "Please wait task done")
|
||||
asyncio.run(task_lock.put_queue(ActionSupplementData(data=data)))
|
||||
return Response(status_code=201)
|
||||
|
||||
|
||||
@router.delete("/chat/{id}", name="stop chat")
|
||||
def stop(id: str):
|
||||
"""stop the task"""
|
||||
task_lock = get_task_lock(id)
|
||||
asyncio.run(task_lock.put_queue(ActionStopData(action=Action.stop)))
|
||||
return Response(status_code=204)
|
||||
|
||||
|
||||
@router.post("/chat/{id}/human-reply")
|
||||
def human_reply(id: str, data: HumanReply):
|
||||
task_lock = get_task_lock(id)
|
||||
asyncio.run(task_lock.put_human_input(data.agent, data.reply))
|
||||
return Response(status_code=201)
|
||||
|
||||
|
||||
@router.post("/chat/{id}/install-mcp")
|
||||
def install_mcp(id: str, data: McpServers):
|
||||
task_lock = get_task_lock(id)
|
||||
asyncio.run(task_lock.put_queue(ActionInstallMcpData(action=Action.install_mcp, data=data)))
|
||||
return Response(status_code=201)
|
||||
50
backend/app/controller/model_controller.py
Normal file
50
backend/app/controller/model_controller.py
Normal file
@ -0,0 +1,50 @@
|
||||
from fastapi import APIRouter
|
||||
from pydantic import BaseModel, Field
|
||||
from app.component.model_validation import create_agent
|
||||
|
||||
|
||||
router = APIRouter(tags=["model"])
|
||||
|
||||
|
||||
class ValidateModelRequest(BaseModel):
|
||||
model_platform: str = Field("OPENAI", description="Model platform")
|
||||
model_type: str = Field("GPT_4O_MINI", description="Model type")
|
||||
api_key: str | None = Field(None, description="API key")
|
||||
url: str | None = Field(None, description="Model URL")
|
||||
model_config_dict: dict | None = Field(None, description="Model config dict")
|
||||
extra_params: dict | None = Field(None, description="Extra model parameters")
|
||||
|
||||
|
||||
class ValidateModelResponse(BaseModel):
|
||||
is_valid: bool = Field(..., description="Is valid")
|
||||
is_tool_calls: bool = Field(..., description="Is tool call used")
|
||||
message: str = Field(..., description="Message")
|
||||
|
||||
|
||||
@router.post("/model/validate")
|
||||
async def validate_model(request: ValidateModelRequest):
|
||||
try:
|
||||
extra = request.extra_params or {}
|
||||
|
||||
agent = create_agent(
|
||||
request.model_platform,
|
||||
request.model_type,
|
||||
api_key=request.api_key,
|
||||
url=request.url,
|
||||
model_config_dict=request.model_config_dict,
|
||||
**extra,
|
||||
)
|
||||
response = agent.step(
|
||||
input_message="""
|
||||
Get the content of https://www.camel-ai.org,
|
||||
you must use the get_website_content tool to get the content ,
|
||||
i just want to verify the get_website_content tool is working
|
||||
"""
|
||||
)
|
||||
except Exception as e:
|
||||
return ValidateModelResponse(is_valid=False, is_tool_calls=False, message=str(e))
|
||||
return ValidateModelResponse(
|
||||
is_valid=True if response else False,
|
||||
is_tool_calls=response.info["tool_calls"][0].result == "Welcome to CAMEL AI!",
|
||||
message="",
|
||||
)
|
||||
52
backend/app/controller/task_controller.py
Normal file
52
backend/app/controller/task_controller.py
Normal file
@ -0,0 +1,52 @@
|
||||
from typing import Literal
|
||||
from dotenv import load_dotenv
|
||||
from fastapi import APIRouter, Response
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel
|
||||
from app.model.chat import NewAgent, UpdateData
|
||||
from app.service.task import (
|
||||
Action,
|
||||
ActionNewAgent,
|
||||
ActionTakeControl,
|
||||
ActionStartData,
|
||||
ActionUpdateTaskData,
|
||||
get_task_lock,
|
||||
)
|
||||
import asyncio
|
||||
|
||||
|
||||
router = APIRouter(tags=["task"])
|
||||
|
||||
|
||||
@router.post("/task/{id}/start", name="start task")
|
||||
def start(id: str):
|
||||
task_lock = get_task_lock(id)
|
||||
logger.debug(f"start task {id}")
|
||||
asyncio.run(task_lock.put_queue(ActionStartData(action=Action.start)))
|
||||
logger.debug(f"start task {id} success")
|
||||
return Response(status_code=201)
|
||||
|
||||
|
||||
@router.put("/task/{id}", name="update task")
|
||||
def put(id: str, data: UpdateData):
|
||||
task_lock = get_task_lock(id)
|
||||
asyncio.run(task_lock.put_queue(ActionUpdateTaskData(action=Action.update_task, data=data)))
|
||||
return Response(status_code=201)
|
||||
|
||||
|
||||
class TakeControl(BaseModel):
|
||||
action: Literal[Action.pause, Action.resume]
|
||||
|
||||
|
||||
@router.put("/task/{id}/take-control", name="take control pause or resume")
|
||||
def take_control(id: str, data: TakeControl):
|
||||
task_lock = get_task_lock(id)
|
||||
asyncio.run(task_lock.put_queue(ActionTakeControl(action=data.action)))
|
||||
return Response(status_code=204)
|
||||
|
||||
|
||||
@router.post("/task/{id}/add-agent", name="add new agent")
|
||||
def add_agent(id: str, data: NewAgent):
|
||||
load_dotenv(dotenv_path=data.env_path)
|
||||
asyncio.run(get_task_lock(id).put_queue(ActionNewAgent(**data.model_dump())))
|
||||
return Response(status_code=204)
|
||||
18
backend/app/controller/tool_controller.py
Normal file
18
backend/app/controller/tool_controller.py
Normal file
@ -0,0 +1,18 @@
|
||||
from fastapi import APIRouter
|
||||
|
||||
from app.utils.toolkit.notion_mcp_toolkit import NotionMCPToolkit
|
||||
|
||||
|
||||
router = APIRouter(tags=["task"])
|
||||
|
||||
|
||||
@router.post("/install/tool/{tool}", name="install tool")
|
||||
async def install_tool(tool: str):
|
||||
if tool == "notion":
|
||||
toolkit = NotionMCPToolkit(tool)
|
||||
await toolkit.connect()
|
||||
else:
|
||||
return {"error": "Tool not found"}
|
||||
tools = [tool.func.__name__ for tool in toolkit.get_tools()]
|
||||
await toolkit.disconnect()
|
||||
return tools
|
||||
20
backend/app/exception/exception.py
Normal file
20
backend/app/exception/exception.py
Normal file
@ -0,0 +1,20 @@
|
||||
class UserException(Exception):
|
||||
def __init__(self, code: int, description: str):
|
||||
self.code = code
|
||||
self.description = description
|
||||
|
||||
|
||||
class TokenException(Exception):
|
||||
def __init__(self, code: int, text: str):
|
||||
self.code = code
|
||||
self.text = text
|
||||
|
||||
|
||||
class NoPermissionException(Exception):
|
||||
def __init__(self, text: str):
|
||||
self.text = text
|
||||
|
||||
|
||||
class ProgramException(Exception):
|
||||
def __init__(self, text: str):
|
||||
self.text = text
|
||||
63
backend/app/exception/handler.py
Normal file
63
backend/app/exception/handler.py
Normal file
@ -0,0 +1,63 @@
|
||||
import traceback
|
||||
from fastapi import Request
|
||||
from fastapi.encoders import jsonable_encoder
|
||||
from fastapi.exceptions import RequestValidationError
|
||||
from fastapi.responses import JSONResponse
|
||||
from loguru import logger
|
||||
from app import api
|
||||
from app.component import code
|
||||
from app.exception.exception import NoPermissionException, ProgramException, TokenException
|
||||
from app.component.pydantic.i18n import trans, get_language
|
||||
from app.exception.exception import UserException
|
||||
|
||||
|
||||
@api.exception_handler(RequestValidationError)
|
||||
async def request_exception(request: Request, e: RequestValidationError):
|
||||
if (lang := get_language(request.headers.get("Accept-Language"))) is None:
|
||||
lang = "en_US"
|
||||
return JSONResponse(
|
||||
content={
|
||||
"code": code.form_error,
|
||||
"error": jsonable_encoder(trans.translate(list(e.errors()), locale=lang)),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
@api.exception_handler(TokenException)
|
||||
async def token_exception(request: Request, e: TokenException):
|
||||
return JSONResponse(content={"code": e.code, "text": e.text})
|
||||
|
||||
|
||||
@api.exception_handler(UserException)
|
||||
async def user_exception(request: Request, e: UserException):
|
||||
return JSONResponse(content={"code": e.code, "text": e.description})
|
||||
|
||||
|
||||
@api.exception_handler(NoPermissionException)
|
||||
async def no_permission(request: Request, exception: NoPermissionException):
|
||||
return JSONResponse(
|
||||
status_code=200,
|
||||
content={"code": code.no_permission_error, "text": exception.text},
|
||||
)
|
||||
|
||||
|
||||
@api.exception_handler(ProgramException)
|
||||
async def program_exception(request: Request, exception: NoPermissionException):
|
||||
return JSONResponse(
|
||||
status_code=200,
|
||||
content={"code": code.program_error, "text": exception.text},
|
||||
)
|
||||
|
||||
|
||||
@api.exception_handler(Exception)
|
||||
async def global_exception_handler(request: Request, exc: Exception):
|
||||
logger.error(f"Unhandled error: {exc}")
|
||||
traceback.print_exc() # output to electron log
|
||||
|
||||
return JSONResponse(
|
||||
status_code=500,
|
||||
content={
|
||||
"code": 500,
|
||||
"message": str(exc),
|
||||
},
|
||||
)
|
||||
6
backend/app/middleware/__init__.py
Normal file
6
backend/app/middleware/__init__.py
Normal file
@ -0,0 +1,6 @@
|
||||
from app import api
|
||||
from app.component.babel import babel_configs
|
||||
from fastapi_babel import BabelMiddleware
|
||||
|
||||
|
||||
api.add_middleware(BabelMiddleware, babel_configs=babel_configs)
|
||||
111
backend/app/model/chat.py
Normal file
111
backend/app/model/chat.py
Normal file
@ -0,0 +1,111 @@
|
||||
from enum import Enum
|
||||
import json
|
||||
from pathlib import Path
|
||||
import re
|
||||
from typing import Literal
|
||||
from loguru import logger
|
||||
from pydantic import BaseModel, field_validator
|
||||
from camel.types import ModelType, RoleType
|
||||
|
||||
|
||||
class Status(str, Enum):
|
||||
confirming = "confirming"
|
||||
confirmed = "confirmed"
|
||||
processing = "processing"
|
||||
done = "done"
|
||||
|
||||
|
||||
class ChatHistory(BaseModel):
|
||||
role: RoleType
|
||||
content: str
|
||||
|
||||
|
||||
McpServers = dict[Literal["mcpServers"], dict[str, dict]]
|
||||
|
||||
|
||||
class Chat(BaseModel):
|
||||
task_id: str
|
||||
question: str
|
||||
email: str
|
||||
attaches: list[str] = []
|
||||
model_platform: str
|
||||
model_type: str
|
||||
api_key: str
|
||||
api_url: str | None = None # for cloud version, user don't need to set api_url
|
||||
language: str = "en"
|
||||
browser_port: int = 9222
|
||||
max_retries: int = 3
|
||||
allow_local_system: bool = False
|
||||
installed_mcp: McpServers = {"mcpServers": {}}
|
||||
bun_mirror: str = ""
|
||||
uvx_mirror: str = ""
|
||||
env_path: str | None = None
|
||||
summary_prompt: str = (
|
||||
"After completing the task, please generate a summary of the entire task completion. "
|
||||
"The summary must be enclosed in <summary></summary> tags and include:\n"
|
||||
"1. A confirmation of task completion, referencing the original goal.\n"
|
||||
"2. A high-level overview of the work performed and the final outcome.\n"
|
||||
"3. A bulleted list of key results or accomplishments.\n"
|
||||
"Adopt a confident and professional tone."
|
||||
)
|
||||
new_agents: list["NewAgent"] = []
|
||||
extra_params: dict | None = None # For provider-specific parameters like Azure
|
||||
|
||||
@field_validator("model_type")
|
||||
@classmethod
|
||||
def check_model_type(cls, model_type: str):
|
||||
try:
|
||||
ModelType(model_type)
|
||||
except ValueError:
|
||||
# raise ValueError("Invalid model type")
|
||||
logger.debug("model_type is invalid")
|
||||
return model_type
|
||||
|
||||
def get_bun_env(self) -> dict[str, str]:
|
||||
return {"NPM_CONFIG_REGISTRY": self.bun_mirror} if self.bun_mirror else {}
|
||||
|
||||
def get_uvx_env(self) -> dict[str, str]:
|
||||
return {"UV_DEFAULT_INDEX": self.uvx_mirror, "PIP_INDEX_URL": self.uvx_mirror} if self.uvx_mirror else {}
|
||||
|
||||
def is_cloud(self):
|
||||
return self.api_url is not None
|
||||
|
||||
def file_save_path(self, path: str | None = None):
|
||||
email = re.sub(r'[\\/*?:"<>|\s]', "_", self.email.split("@")[0]).strip(".")
|
||||
save_path = Path.home() / "eigent" / email / ("task_" + self.task_id)
|
||||
if path is not None:
|
||||
save_path = save_path / path
|
||||
save_path.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
return str(save_path)
|
||||
|
||||
|
||||
class SupplementChat(BaseModel):
|
||||
question: str
|
||||
|
||||
|
||||
class HumanReply(BaseModel):
|
||||
agent: str
|
||||
reply: str
|
||||
|
||||
|
||||
class TaskContent(BaseModel):
|
||||
id: str
|
||||
content: str
|
||||
|
||||
|
||||
class UpdateData(BaseModel):
|
||||
task: list[TaskContent]
|
||||
|
||||
|
||||
class NewAgent(BaseModel):
|
||||
name: str
|
||||
description: str
|
||||
tools: list[str]
|
||||
mcp_tools: McpServers | None
|
||||
env_path: str | None = None
|
||||
|
||||
|
||||
def sse_json(step: str, data):
|
||||
res_format = {"step": step, "data": data}
|
||||
return f"data: {json.dumps(res_format, ensure_ascii=False)}\n\n"
|
||||
491
backend/app/service/chat_service.py
Normal file
491
backend/app/service/chat_service.py
Normal file
@ -0,0 +1,491 @@
|
||||
import asyncio
|
||||
import datetime
|
||||
from pathlib import Path
|
||||
import platform
|
||||
from typing import Literal
|
||||
from fastapi import Request
|
||||
from inflection import titleize
|
||||
from pydash import chain
|
||||
from app.component.debug import dump_class
|
||||
from app.component.environment import env
|
||||
from app.service.task import (
|
||||
ActionImproveData,
|
||||
ActionInstallMcpData,
|
||||
ActionNewAgent,
|
||||
create_task_lock,
|
||||
delete_task_lock,
|
||||
)
|
||||
from camel.toolkits import AgentCommunicationToolkit, ToolkitMessageIntegration
|
||||
from app.utils.toolkit.human_toolkit import HumanToolkit
|
||||
from app.utils.toolkit.note_taking_toolkit import NoteTakingToolkit
|
||||
from app.utils.workforce import Workforce
|
||||
from loguru import logger
|
||||
from app.model.chat import Chat, NewAgent, Status, sse_json, TaskContent
|
||||
from camel.tasks import Task
|
||||
from app.utils.agent import (
|
||||
ListenChatAgent,
|
||||
agent_model,
|
||||
get_mcp_tools,
|
||||
get_toolkits,
|
||||
mcp_agent,
|
||||
developer_agent,
|
||||
document_agent,
|
||||
multi_modal_agent,
|
||||
search_agent,
|
||||
social_medium_agent,
|
||||
task_summary_agent,
|
||||
question_confirm_agent,
|
||||
)
|
||||
from app.service.task import Action, Agents
|
||||
from app.utils.server.sync_step import sync_step
|
||||
from camel.types import ModelPlatformType
|
||||
from camel.models import ModelProcessingError
|
||||
|
||||
|
||||
@sync_step
|
||||
async def step_solve(options: Chat, request: Request):
|
||||
# if True:
|
||||
# import faulthandler
|
||||
|
||||
# faulthandler.enable()
|
||||
# for second in [5, 10, 20, 30, 60, 120, 240]:
|
||||
# faulthandler.dump_traceback_later(second)
|
||||
task_lock = create_task_lock(options.task_id)
|
||||
|
||||
start_event_loop = True
|
||||
question_agent = question_confirm_agent(options)
|
||||
camel_task = None
|
||||
workforce = None
|
||||
while True:
|
||||
if await request.is_disconnected():
|
||||
if workforce is not None:
|
||||
workforce.stop()
|
||||
break
|
||||
try:
|
||||
item = await task_lock.get_queue()
|
||||
logger.info(f"item: {dump_class(item)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error getting item from queue: {e}")
|
||||
break
|
||||
|
||||
try:
|
||||
if item.action == Action.improve or start_event_loop:
|
||||
# from viztracer import VizTracer
|
||||
|
||||
# tracer = VizTracer()
|
||||
# tracer.start()
|
||||
if start_event_loop is True:
|
||||
question = options.question
|
||||
start_event_loop = False
|
||||
else:
|
||||
assert isinstance(item, ActionImproveData)
|
||||
question = item.data
|
||||
if len(question) < 12 and len(options.attaches) == 0:
|
||||
confirm = await question_confirm(question_agent, question)
|
||||
else:
|
||||
confirm = True
|
||||
|
||||
if confirm is not True:
|
||||
yield confirm
|
||||
else:
|
||||
yield sse_json("confirmed", "")
|
||||
(workforce, mcp) = await construct_workforce(options)
|
||||
for new_agent in options.new_agents:
|
||||
workforce.add_single_agent_worker(
|
||||
format_agent_description(new_agent), await new_agent_model(new_agent, options)
|
||||
)
|
||||
summary_task_agent = task_summary_agent(options)
|
||||
task_lock.status = Status.confirmed
|
||||
question = question + options.summary_prompt
|
||||
camel_task = Task(content=question, id=options.task_id)
|
||||
if len(options.attaches) > 0:
|
||||
camel_task.additional_info = {Path(file_path).name: file_path for file_path in options.attaches}
|
||||
|
||||
sub_tasks = workforce.eigent_make_sub_tasks(camel_task)
|
||||
summary_task_content = await summary_task(summary_task_agent, camel_task)
|
||||
yield to_sub_tasks(camel_task, summary_task_content)
|
||||
# tracer.stop()
|
||||
# tracer.save("trace.json")
|
||||
if env("debug") == "on":
|
||||
task_lock.status = Status.processing
|
||||
task = asyncio.create_task(workforce.eigent_start(sub_tasks))
|
||||
task_lock.add_background_task(task)
|
||||
|
||||
elif item.action == Action.update_task:
|
||||
assert camel_task is not None
|
||||
update_tasks = {item.id: item for item in item.data.task}
|
||||
sub_tasks = update_sub_tasks(sub_tasks, update_tasks)
|
||||
add_sub_tasks(camel_task, item.data.task)
|
||||
yield to_sub_tasks(camel_task, summary_task_content)
|
||||
elif item.action == Action.start:
|
||||
task_lock.status = Status.processing
|
||||
task = asyncio.create_task(workforce.eigent_start(sub_tasks))
|
||||
task_lock.add_background_task(task)
|
||||
elif item.action == Action.task_state:
|
||||
yield sse_json("task_state", item.data)
|
||||
elif item.action == Action.create_agent:
|
||||
yield sse_json("create_agent", item.data)
|
||||
elif item.action == Action.activate_agent:
|
||||
yield sse_json("activate_agent", item.data)
|
||||
elif item.action == Action.deactivate_agent:
|
||||
yield sse_json("deactivate_agent", dict(item.data))
|
||||
elif item.action == Action.assign_task:
|
||||
yield sse_json("assign_task", item.data)
|
||||
elif item.action == Action.activate_toolkit:
|
||||
yield sse_json("activate_toolkit", item.data)
|
||||
elif item.action == Action.deactivate_toolkit:
|
||||
yield sse_json("deactivate_toolkit", item.data)
|
||||
elif item.action == Action.write_file:
|
||||
yield sse_json(
|
||||
"write_file",
|
||||
{"file_path": item.data, "process_task_id": item.process_task_id},
|
||||
)
|
||||
elif item.action == Action.ask:
|
||||
yield sse_json("ask", item.data)
|
||||
elif item.action == Action.notice:
|
||||
yield sse_json(
|
||||
"notice",
|
||||
{"notice": item.data, "process_task_id": item.process_task_id},
|
||||
)
|
||||
elif item.action == Action.search_mcp:
|
||||
yield sse_json("search_mcp", item.data)
|
||||
elif item.action == Action.install_mcp:
|
||||
task = asyncio.create_task(install_mcp(mcp, item))
|
||||
task_lock.add_background_task(task)
|
||||
elif item.action == Action.terminal:
|
||||
yield sse_json(
|
||||
"terminal",
|
||||
{"output": item.data, "process_task_id": item.process_task_id},
|
||||
)
|
||||
elif item.action == Action.pause:
|
||||
if workforce is not None:
|
||||
workforce.pause()
|
||||
elif item.action == Action.resume:
|
||||
if workforce is not None:
|
||||
workforce.resume()
|
||||
elif item.action == Action.new_agent:
|
||||
if workforce is not None:
|
||||
workforce.pause()
|
||||
workforce.add_single_agent_worker(format_agent_description(item), await new_agent_model(item, options))
|
||||
workforce.resume()
|
||||
elif item.action == Action.end:
|
||||
assert camel_task is not None
|
||||
task_lock.status = Status.done
|
||||
yield sse_json("end", str(camel_task.result))
|
||||
if workforce is not None:
|
||||
workforce.stop_gracefully()
|
||||
break
|
||||
elif item.action == Action.supplement:
|
||||
assert camel_task is not None
|
||||
task_lock.status = Status.processing
|
||||
camel_task.add_subtask(
|
||||
Task(
|
||||
content=item.data.question,
|
||||
id=f"{camel_task.id}.{len(camel_task.subtasks)}",
|
||||
)
|
||||
)
|
||||
task = asyncio.create_task(workforce.eigent_start(camel_task.subtasks))
|
||||
task_lock.add_background_task(task)
|
||||
elif item.action == Action.budget_not_enough:
|
||||
if workforce is not None:
|
||||
workforce.pause()
|
||||
yield sse_json(Action.budget_not_enough, {"message": "budget not enouth"})
|
||||
elif item.action == Action.stop:
|
||||
if workforce is not None:
|
||||
if workforce._running:
|
||||
workforce.stop()
|
||||
workforce.stop_gracefully()
|
||||
await delete_task_lock(task_lock.id)
|
||||
break
|
||||
else:
|
||||
logger.warning(f"Unknown action: {item.action}")
|
||||
except ModelProcessingError as e:
|
||||
if "Budget has been exceeded" in str(e):
|
||||
# workforce decompose task don't use ListenAgent, this need return sse
|
||||
if 'workforce' in locals() and workforce is not None:
|
||||
workforce.pause()
|
||||
yield sse_json(Action.budget_not_enough, {"message": "budget not enouth"})
|
||||
else:
|
||||
logger.error(f"Error processing action {item.action}: {e}")
|
||||
yield sse_json("error", {"message": str(e)})
|
||||
if 'workforce' in locals() and workforce is not None and workforce._running:
|
||||
workforce.stop()
|
||||
except Exception as e:
|
||||
logger.error(f"Error processing action {item.action}: {e}")
|
||||
raise e
|
||||
# Continue processing other items instead of breaking
|
||||
|
||||
|
||||
async def install_mcp(
|
||||
mcp: ListenChatAgent,
|
||||
install_mcp: ActionInstallMcpData,
|
||||
):
|
||||
mcp.add_tools(await get_mcp_tools(install_mcp.data))
|
||||
|
||||
|
||||
def to_sub_tasks(task: Task, summary_task_content: str):
|
||||
return sse_json(
|
||||
"to_sub_tasks",
|
||||
{
|
||||
"summary_task": summary_task_content,
|
||||
"sub_tasks": tree_sub_tasks(task.subtasks),
|
||||
},
|
||||
)
|
||||
|
||||
|
||||
def tree_sub_tasks(sub_tasks: list[Task], depth: int = 0):
|
||||
if depth > 5:
|
||||
return []
|
||||
return (
|
||||
chain(sub_tasks)
|
||||
.map(
|
||||
lambda x: {
|
||||
"id": x.id,
|
||||
"content": x.content,
|
||||
"state": x.state,
|
||||
"subtasks": tree_sub_tasks(x.subtasks, depth + 1),
|
||||
}
|
||||
)
|
||||
.value()
|
||||
)
|
||||
|
||||
|
||||
def update_sub_tasks(sub_tasks: list[Task], update_tasks: dict[str, TaskContent], depth: int = 0):
|
||||
if depth > 5: # limit the depth of the recursion
|
||||
return []
|
||||
|
||||
i = 0
|
||||
while i < len(sub_tasks):
|
||||
item = sub_tasks[i]
|
||||
if item.id in update_tasks:
|
||||
item.content = update_tasks[item.id].content
|
||||
update_sub_tasks(item.subtasks, update_tasks, depth + 1)
|
||||
i += 1
|
||||
else:
|
||||
sub_tasks.pop(i)
|
||||
return sub_tasks
|
||||
|
||||
|
||||
def add_sub_tasks(camel_task: Task, update_tasks: list[TaskContent]):
|
||||
for item in update_tasks:
|
||||
if item.id == "": #
|
||||
camel_task.add_subtask(
|
||||
Task(
|
||||
content=item.content,
|
||||
id=f"{camel_task.id}.{len(camel_task.subtasks) + 1}",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
async def question_confirm(agent: ListenChatAgent, prompt: str) -> str | Literal[True]:
|
||||
prompt = f"""
|
||||
> **Your Role:** You are a highly capable agent. Your primary function is to analyze a user's request and determine the appropriate course of action.
|
||||
>
|
||||
> **Your Process:**
|
||||
>
|
||||
> 1. **Analyze the User's Query:** Carefully examine the user's request: `{prompt}`.
|
||||
>
|
||||
> 2. **Categorize the Query:**
|
||||
> * **Simple Query:** Is this a simple greeting, a question that can be answered directly, or a conversational interaction (e.g., "hello", "thank you")?
|
||||
> * **Complex Task:** Is this a request that requires a series of steps, code execution, or interaction with tools to complete?
|
||||
>
|
||||
> 3. **Execute Your Decision:**
|
||||
> * **For a Simple Query:** Provide a direct and helpful response.
|
||||
> * **For a Complex Task:** Your *only* response should be "yes". This will trigger a specialized workforce to handle the task. Do not include any other text, punctuation, or pleasantries.
|
||||
"""
|
||||
resp = agent.step(prompt)
|
||||
logger.info(f"resp: {agent.chat_history}")
|
||||
if resp.msgs[0].content.lower() != "yes":
|
||||
return sse_json("wait_confirm", {"content": resp.msgs[0].content})
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
async def summary_task(agent: ListenChatAgent, task: Task) -> str:
|
||||
prompt = f"""The user's task is:
|
||||
---
|
||||
{task.to_string()}
|
||||
---
|
||||
Your instructions are:
|
||||
1. Come up with a short and descriptive name for this task.
|
||||
2. Create a concise summary of the task's main points and objectives.
|
||||
3. Return the task name and the summary, separated by a vertical bar (|).
|
||||
|
||||
Example format: "Task Name|This is the summary of the task."
|
||||
Do not include any other text or formatting.
|
||||
"""
|
||||
res = agent.step(prompt)
|
||||
logger.info(f"summary_task: {res.msgs[0].content}")
|
||||
return res.msgs[0].content
|
||||
|
||||
|
||||
async def construct_workforce(options: Chat) -> tuple[Workforce, ListenChatAgent]:
|
||||
working_directory = options.file_save_path()
|
||||
[coordinator_agent, task_agent] = [
|
||||
agent_model(
|
||||
key,
|
||||
prompt,
|
||||
options,
|
||||
[
|
||||
*(
|
||||
ToolkitMessageIntegration(
|
||||
message_handler=HumanToolkit(options.task_id, key).send_message_to_user
|
||||
).register_toolkits(NoteTakingToolkit(options.task_id, working_directory=working_directory))
|
||||
).get_tools()
|
||||
],
|
||||
)
|
||||
for key, prompt in {
|
||||
Agents.coordinator_agent: f"""
|
||||
You are a helpful coordinator.
|
||||
- You are now working in system {platform.system()} with architecture
|
||||
{platform.machine()} at working directory `{working_directory}`. All your
|
||||
work related to local operations should be done in that directory.
|
||||
The current date is {datetime.date.today()}. For any date-related tasks, you MUST use this as the current date.
|
||||
|
||||
- If a task assigned to another agent fails, you should re-assign it to the
|
||||
`Developer_Agent`. The `Developer_Agent` is a powerful agent with terminal
|
||||
access and can resolve a wide range of issues.
|
||||
""",
|
||||
Agents.task_agent: f"""
|
||||
You are a helpful task planner.
|
||||
- You are now working in system {platform.system()} with architecture
|
||||
{platform.machine()} at working directory `{working_directory}`. All your
|
||||
work related to local operations should be done in that directory.
|
||||
The current date is {datetime.date.today()}. For any date-related tasks, you MUST use this as the current date.
|
||||
""",
|
||||
}.items()
|
||||
]
|
||||
new_worker_agent = agent_model(
|
||||
Agents.new_worker_agent,
|
||||
f"""
|
||||
You are a helpful assistant.
|
||||
- You are now working in system {platform.system()} with architecture
|
||||
{platform.machine()} at working directory `{working_directory}`. All your
|
||||
work related to local operations should be done in that directory.
|
||||
The current date is {datetime.date.today()}. For any date-related tasks, you MUST use this as the current date.
|
||||
""",
|
||||
options,
|
||||
[
|
||||
*HumanToolkit.get_can_use_tools(options.task_id, Agents.new_worker_agent),
|
||||
*(
|
||||
ToolkitMessageIntegration(
|
||||
message_handler=HumanToolkit(options.task_id, Agents.new_worker_agent).send_message_to_user
|
||||
).register_toolkits(NoteTakingToolkit(options.task_id, working_directory=working_directory))
|
||||
).get_tools(),
|
||||
],
|
||||
)
|
||||
# msg_toolkit = AgentCommunicationToolkit(max_message_history=100)
|
||||
|
||||
searcher = search_agent(options)
|
||||
developer = await developer_agent(options)
|
||||
documenter = await document_agent(options)
|
||||
multi_modaler = multi_modal_agent(options)
|
||||
|
||||
# msg_toolkit.register_agent("Worker", new_worker_agent)
|
||||
# msg_toolkit.register_agent("Search_Agent", searcher)
|
||||
# msg_toolkit.register_agent("Developer_Agent", developer)
|
||||
# msg_toolkit.register_agent("Document_Agent", documenter)
|
||||
# msg_toolkit.register_agent("Multi_Modal_Agent", multi_modaler)
|
||||
|
||||
workforce = Workforce(
|
||||
options.task_id,
|
||||
"A workforce",
|
||||
graceful_shutdown_timeout=3, # 30 seconds for debugging
|
||||
share_memory=False,
|
||||
coordinator_agent=coordinator_agent,
|
||||
task_agent=task_agent,
|
||||
new_worker_agent=new_worker_agent,
|
||||
use_structured_output_handler=False if options.model_platform == ModelPlatformType.OPENAI else True,
|
||||
)
|
||||
workforce.add_single_agent_worker(
|
||||
"Developer Agent: A master-level coding assistant with a powerful "
|
||||
"terminal. It can write and execute code, manage files, automate "
|
||||
"desktop tasks, and deploy web applications to solve complex "
|
||||
"technical challenges.",
|
||||
developer,
|
||||
)
|
||||
workforce.add_single_agent_worker(
|
||||
"Search Agent: Can search the web, extract webpage content, "
|
||||
"simulate browser actions, and provide relevant information to "
|
||||
"solve the given task.",
|
||||
searcher,
|
||||
)
|
||||
workforce.add_single_agent_worker(
|
||||
"Document Agent: A document processing assistant skilled in creating "
|
||||
"and modifying a wide range of file formats. It can generate "
|
||||
"text-based files/reports (Markdown, JSON, YAML, HTML), "
|
||||
"office documents (Word, PDF), presentations (PowerPoint), and "
|
||||
"data files (Excel, CSV).",
|
||||
documenter,
|
||||
)
|
||||
workforce.add_single_agent_worker(
|
||||
"Multi-Modal Agent: A specialist in media processing. It can "
|
||||
"analyze images and audio, transcribe speech, download videos, and "
|
||||
"generate new images from text prompts.",
|
||||
multi_modaler,
|
||||
)
|
||||
# workforce.add_single_agent_worker(
|
||||
# "Social Media Agent: A social media management assistant for "
|
||||
# "handling tasks related to WhatsApp, Twitter, LinkedIn, Reddit, "
|
||||
# "Notion, Slack, and other social platforms.",
|
||||
# await social_medium_agent(options),
|
||||
# )
|
||||
mcp = await mcp_agent(options)
|
||||
# workforce.add_single_agent_worker(
|
||||
# "MCP Agent: A Model Context Protocol agent that provides access "
|
||||
# "to external tools and services through MCP integrations.",
|
||||
# mcp,
|
||||
# )
|
||||
return workforce, mcp
|
||||
|
||||
|
||||
def format_agent_description(agent_data: NewAgent | ActionNewAgent) -> str:
|
||||
r"""Format a comprehensive agent description including name, tools, and
|
||||
description.
|
||||
"""
|
||||
description_parts = [f"{agent_data.name}:"]
|
||||
|
||||
# Add description if available
|
||||
if hasattr(agent_data, "description") and agent_data.description:
|
||||
description_parts.append(agent_data.description.strip())
|
||||
else:
|
||||
description_parts.append("A specialized agent")
|
||||
|
||||
# Add tools information
|
||||
tool_names = []
|
||||
if hasattr(agent_data, "tools") and agent_data.tools:
|
||||
for tool in agent_data.tools:
|
||||
tool_names.append(titleize(tool))
|
||||
|
||||
if hasattr(agent_data, "mcp_tools") and agent_data.mcp_tools:
|
||||
for mcp_server in agent_data.mcp_tools.get("mcpServers", {}).keys():
|
||||
tool_names.append(titleize(mcp_server))
|
||||
|
||||
if tool_names:
|
||||
description_parts.append(f"with access to {', '.join(tool_names)} tools : <{tool_names}>")
|
||||
|
||||
return " ".join(description_parts)
|
||||
|
||||
|
||||
async def new_agent_model(data: NewAgent | ActionNewAgent, options: Chat):
|
||||
working_directory = options.file_save_path()
|
||||
tool_names = []
|
||||
tools = [*await get_toolkits(data.tools, data.name, options.task_id)]
|
||||
for item in data.tools:
|
||||
tool_names.append(titleize(item))
|
||||
if data.mcp_tools is not None:
|
||||
tools = [*tools, *await get_mcp_tools(data.mcp_tools)]
|
||||
for item in data.mcp_tools["mcpServers"].keys():
|
||||
tool_names.append(titleize(item))
|
||||
for item in tools:
|
||||
logger.debug(f"new agent function tool ====== {item.func.__name__}")
|
||||
# Enhanced system message with platform information
|
||||
enhanced_description = f"""{data.description}
|
||||
- You are now working in system {platform.system()} with architecture
|
||||
{platform.machine()} at working directory `{working_directory}`. All your
|
||||
work related to local operations should be done in that directory.
|
||||
The current date is {datetime.date.today()}. For any date-related tasks, you
|
||||
MUST use this as the current date.
|
||||
"""
|
||||
|
||||
return agent_model(data.name, enhanced_description, options, tools, tool_names=tool_names)
|
||||
360
backend/app/service/task.py
Normal file
360
backend/app/service/task.py
Normal file
@ -0,0 +1,360 @@
|
||||
from typing_extensions import Any, Literal, TypedDict
|
||||
from pydantic import BaseModel
|
||||
from app.exception.exception import ProgramException
|
||||
from app.model.chat import McpServers, Status, SupplementChat, Chat, UpdateData
|
||||
import asyncio
|
||||
from enum import Enum
|
||||
from camel.tasks import Task
|
||||
from contextlib import contextmanager
|
||||
from contextvars import ContextVar
|
||||
from datetime import datetime, timedelta
|
||||
import weakref
|
||||
from loguru import logger
|
||||
|
||||
|
||||
class Action(str, Enum):
|
||||
improve = "improve" # user -> backend
|
||||
update_task = "update_task" # user -> backend
|
||||
task_state = "task_state" # backend -> user
|
||||
start = "start" # user -> backend
|
||||
create_agent = "create_agent" # backend -> user
|
||||
activate_agent = "activate_agent" # backend -> user
|
||||
deactivate_agent = "deactivate_agent" # backend -> user
|
||||
assign_task = "assign_task" # backend -> user
|
||||
activate_toolkit = "activate_toolkit" # backend -> user
|
||||
deactivate_toolkit = "deactivate_toolkit" # backend -> user
|
||||
write_file = "write_file" # backend -> user
|
||||
ask = "ask" # backend -> user
|
||||
notice = "notice" # backend -> user
|
||||
search_mcp = "search_mcp" # backend -> user
|
||||
install_mcp = "install_mcp" # backend -> user
|
||||
terminal = "terminal" # backend -> user
|
||||
end = "end" # backend -> user
|
||||
stop = "stop" # user -> backend
|
||||
supplement = "supplement" # user -> backend
|
||||
pause = "pause" # user -> backend user take control
|
||||
resume = "resume" # user -> backend user take control
|
||||
new_agent = "new_agent" # user -> backend
|
||||
budget_not_enough = "budget_not_enough" # backend -> user
|
||||
|
||||
|
||||
class ActionImproveData(BaseModel):
|
||||
action: Literal[Action.improve] = Action.improve
|
||||
data: str
|
||||
|
||||
|
||||
class ActionStartData(BaseModel):
|
||||
action: Literal[Action.start] = Action.start
|
||||
|
||||
|
||||
class ActionUpdateTaskData(BaseModel):
|
||||
action: Literal[Action.update_task] = Action.update_task
|
||||
data: UpdateData
|
||||
|
||||
|
||||
class ActionTaskStateData(BaseModel):
|
||||
action: Literal[Action.task_state] = Action.task_state
|
||||
data: dict[Literal["task_id", "content", "state", "result", "failure_count"], str | int]
|
||||
|
||||
|
||||
class ActionAskData(BaseModel):
|
||||
action: Literal[Action.ask] = Action.ask
|
||||
data: dict[Literal["question", "agent"], str]
|
||||
|
||||
|
||||
class AgentDataDict(TypedDict):
|
||||
agent_name: str
|
||||
agent_id: str
|
||||
tools: list[str]
|
||||
|
||||
|
||||
class ActionCreateAgentData(BaseModel):
|
||||
action: Literal[Action.create_agent] = Action.create_agent
|
||||
data: AgentDataDict
|
||||
|
||||
|
||||
class ActionActivateAgentData(BaseModel):
|
||||
action: Literal[Action.activate_agent] = Action.activate_agent
|
||||
data: dict[Literal["agent_name", "process_task_id", "agent_id", "message"], str]
|
||||
|
||||
|
||||
class DataDict(TypedDict):
|
||||
agent_name: str
|
||||
agent_id: str
|
||||
process_task_id: str
|
||||
message: str
|
||||
tokens: int
|
||||
|
||||
|
||||
class ActionDeactivateAgentData(BaseModel):
|
||||
action: Literal[Action.deactivate_agent] = Action.deactivate_agent
|
||||
data: DataDict
|
||||
|
||||
|
||||
class ActionAssignTaskData(BaseModel):
|
||||
action: Literal[Action.assign_task] = Action.assign_task
|
||||
data: dict[Literal["assignee_id", "task_id", "content", "state"], str]
|
||||
|
||||
|
||||
class ActionActivateToolkitData(BaseModel):
|
||||
action: Literal[Action.activate_toolkit] = Action.activate_toolkit
|
||||
data: dict[
|
||||
Literal["agent_name", "toolkit_name", "process_task_id", "method_name", "message"],
|
||||
str,
|
||||
]
|
||||
|
||||
|
||||
class ActionDeactivateToolkitData(BaseModel):
|
||||
action: Literal[Action.deactivate_toolkit] = Action.deactivate_toolkit
|
||||
data: dict[
|
||||
Literal["agent_name", "toolkit_name", "process_task_id", "method_name", "message"],
|
||||
str,
|
||||
]
|
||||
|
||||
|
||||
class ActionWriteFileData(BaseModel):
|
||||
action: Literal[Action.write_file] = Action.write_file
|
||||
process_task_id: str
|
||||
data: str
|
||||
|
||||
|
||||
class ActionNoticeData(BaseModel):
|
||||
action: Literal[Action.notice] = Action.notice
|
||||
process_task_id: str
|
||||
data: str
|
||||
|
||||
|
||||
class ActionSearchMcpData(BaseModel):
|
||||
action: Literal[Action.search_mcp] = Action.search_mcp
|
||||
data: Any
|
||||
|
||||
|
||||
class ActionInstallMcpData(BaseModel):
|
||||
action: Literal[Action.install_mcp] = Action.install_mcp
|
||||
data: McpServers
|
||||
|
||||
|
||||
class ActionTerminalData(BaseModel):
|
||||
action: Literal[Action.terminal] = Action.terminal
|
||||
process_task_id: str
|
||||
data: str
|
||||
|
||||
|
||||
class ActionStopData(BaseModel):
|
||||
action: Literal[Action.stop] = Action.stop
|
||||
|
||||
|
||||
class ActionEndData(BaseModel):
|
||||
action: Literal[Action.end] = Action.end
|
||||
|
||||
|
||||
class ActionSupplementData(BaseModel):
|
||||
action: Literal[Action.supplement] = Action.supplement
|
||||
data: SupplementChat
|
||||
|
||||
|
||||
class ActionTakeControl(BaseModel):
|
||||
action: Literal[Action.pause, Action.resume]
|
||||
|
||||
|
||||
class ActionNewAgent(BaseModel):
|
||||
action: Literal[Action.new_agent] = Action.new_agent
|
||||
name: str
|
||||
description: str
|
||||
tools: list[str]
|
||||
mcp_tools: McpServers | None
|
||||
|
||||
|
||||
class ActionBudgetNotEnough(BaseModel):
|
||||
action: Literal[Action.budget_not_enough] = Action.budget_not_enough
|
||||
|
||||
|
||||
ActionData = (
|
||||
ActionImproveData
|
||||
| ActionStartData
|
||||
| ActionUpdateTaskData
|
||||
| ActionTaskStateData
|
||||
| ActionAskData
|
||||
| ActionCreateAgentData
|
||||
| ActionActivateAgentData
|
||||
| ActionDeactivateAgentData
|
||||
| ActionAssignTaskData
|
||||
| ActionActivateToolkitData
|
||||
| ActionDeactivateToolkitData
|
||||
| ActionWriteFileData
|
||||
| ActionNoticeData
|
||||
| ActionSearchMcpData
|
||||
| ActionInstallMcpData
|
||||
| ActionTerminalData
|
||||
| ActionStopData
|
||||
| ActionEndData
|
||||
| ActionSupplementData
|
||||
| ActionTakeControl
|
||||
| ActionNewAgent
|
||||
| ActionBudgetNotEnough
|
||||
)
|
||||
|
||||
|
||||
class Agents(str, Enum):
|
||||
task_agent = "task_agent"
|
||||
coordinator_agent = "coordinator_agent"
|
||||
new_worker_agent = "new_worker_agent"
|
||||
developer_agent = "developer_agent"
|
||||
search_agent = "search_agent"
|
||||
document_agent = "document_agent"
|
||||
multi_modal_agent = "multi_modal_agent"
|
||||
social_medium_agent = "social_medium_agent"
|
||||
mcp_agent = "mcp_agent"
|
||||
|
||||
|
||||
class TaskLock:
|
||||
id: str
|
||||
status: Status = Status.confirming
|
||||
active_agent: str = ""
|
||||
mcp: list[str]
|
||||
queue: asyncio.Queue[ActionData]
|
||||
"""Queue monitoring for SSE response"""
|
||||
human_input: dict[str, asyncio.Queue[str]]
|
||||
"""After receiving user's reply, put the reply into the corresponding agent's queue"""
|
||||
created_at: datetime
|
||||
last_accessed: datetime
|
||||
background_tasks: set[asyncio.Task]
|
||||
"""Track all background tasks for cleanup"""
|
||||
|
||||
def __init__(self, id: str, queue: asyncio.Queue, human_input: dict) -> None:
|
||||
self.id = id
|
||||
self.queue = queue
|
||||
self.human_input = human_input
|
||||
self.created_at = datetime.now()
|
||||
self.last_accessed = datetime.now()
|
||||
self.background_tasks = set()
|
||||
|
||||
async def put_queue(self, data: ActionData):
|
||||
self.last_accessed = datetime.now()
|
||||
await self.queue.put(data)
|
||||
|
||||
async def get_queue(self):
|
||||
self.last_accessed = datetime.now()
|
||||
return await self.queue.get()
|
||||
|
||||
async def put_human_input(self, agent: str, data: Any = None):
|
||||
await self.human_input[agent].put(data)
|
||||
|
||||
async def get_human_input(self, agent: str):
|
||||
return await self.human_input[agent].get()
|
||||
|
||||
def add_human_input_listen(self, agent: str):
|
||||
self.human_input[agent] = asyncio.Queue(1)
|
||||
|
||||
def add_background_task(self, task: asyncio.Task) -> None:
|
||||
r"""Add a task to track and clean up weak references"""
|
||||
self.background_tasks.add(task)
|
||||
task.add_done_callback(lambda t: self.background_tasks.discard(t))
|
||||
|
||||
async def cleanup(self):
|
||||
r"""Cancel all background tasks and clean up resources"""
|
||||
for task in list(self.background_tasks):
|
||||
if not task.done():
|
||||
task.cancel()
|
||||
try:
|
||||
await task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
self.background_tasks.clear()
|
||||
|
||||
|
||||
task_locks = dict[str, TaskLock]()
|
||||
# Cleanup task for removing stale task locks
|
||||
_cleanup_task: asyncio.Task | None = None
|
||||
task_index: dict[str, weakref.ref[Task]] = {}
|
||||
|
||||
|
||||
def get_task_lock(id: str) -> TaskLock:
|
||||
if id not in task_locks:
|
||||
raise ProgramException("Task not found")
|
||||
return task_locks[id]
|
||||
|
||||
|
||||
def create_task_lock(id: str) -> TaskLock:
|
||||
if id in task_locks:
|
||||
raise ProgramException("Task already exists")
|
||||
task_locks[id] = TaskLock(id=id, queue=asyncio.Queue(), human_input={})
|
||||
|
||||
# Start cleanup task if not running
|
||||
global _cleanup_task
|
||||
if _cleanup_task is None or _cleanup_task.done():
|
||||
_cleanup_task = asyncio.create_task(_periodic_cleanup())
|
||||
|
||||
return task_locks[id]
|
||||
|
||||
|
||||
async def delete_task_lock(id: str):
|
||||
if id not in task_locks:
|
||||
raise ProgramException("Task not found")
|
||||
|
||||
# Clean up background tasks before deletion
|
||||
task_lock = task_locks[id]
|
||||
await task_lock.cleanup()
|
||||
|
||||
del task_locks[id]
|
||||
logger.debug(f"Deleted task lock {id}, remaining locks: {len(task_locks)}")
|
||||
|
||||
|
||||
def get_camel_task(id: str, tasks: list[Task]) -> None | Task:
|
||||
if id in task_index:
|
||||
task_ref = task_index[id]
|
||||
task = task_ref()
|
||||
if task is not None:
|
||||
return task
|
||||
else:
|
||||
# Weak reference died, remove from index
|
||||
del task_index[id]
|
||||
|
||||
# Fallback to search and rebuild index
|
||||
for item in tasks:
|
||||
# Add to index
|
||||
task_index[item.id] = weakref.ref(item)
|
||||
|
||||
if item.id == id:
|
||||
return item
|
||||
else:
|
||||
task = get_camel_task(id, item.subtasks)
|
||||
if task is not None:
|
||||
return task
|
||||
return None
|
||||
|
||||
|
||||
async def _periodic_cleanup():
|
||||
r"""Periodically clean up stale task locks"""
|
||||
while True:
|
||||
try:
|
||||
await asyncio.sleep(300) # Run every 5 minutes
|
||||
|
||||
current_time = datetime.now()
|
||||
stale_timeout = timedelta(hours=2) # Consider tasks stale after 2 hours
|
||||
|
||||
stale_ids = []
|
||||
for task_id, task_lock in task_locks.items():
|
||||
if current_time - task_lock.last_accessed > stale_timeout:
|
||||
stale_ids.append(task_id)
|
||||
|
||||
for task_id in stale_ids:
|
||||
logger.warning(f"Cleaning up stale task lock: {task_id}")
|
||||
await delete_task_lock(task_id)
|
||||
|
||||
except asyncio.CancelledError:
|
||||
break
|
||||
except Exception as e:
|
||||
logger.error(f"Error in periodic cleanup: {e}")
|
||||
|
||||
|
||||
process_task = ContextVar[str]("id")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def set_process_task(process_task_id: str):
|
||||
origin = process_task.set(process_task_id)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
process_task.reset(origin)
|
||||
1344
backend/app/utils/agent.py
Normal file
1344
backend/app/utils/agent.py
Normal file
File diff suppressed because it is too large
Load Diff
177
backend/app/utils/listen/toolkit_listen.py
Normal file
177
backend/app/utils/listen/toolkit_listen.py
Normal file
@ -0,0 +1,177 @@
|
||||
import asyncio
|
||||
from functools import wraps
|
||||
from inspect import iscoroutinefunction
|
||||
import json
|
||||
from typing import Any, Callable
|
||||
|
||||
from loguru import logger
|
||||
from app.service.task import (
|
||||
ActionActivateToolkitData,
|
||||
ActionDeactivateToolkitData,
|
||||
get_task_lock,
|
||||
)
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
from app.service.task import process_task
|
||||
|
||||
|
||||
def listen_toolkit(
|
||||
wrap_method: Callable[..., Any] | None = None,
|
||||
inputs: Callable[..., str] | None = None,
|
||||
return_msg: Callable[[Any], str] | None = None,
|
||||
):
|
||||
def decorator(func: Callable[..., Any]):
|
||||
wrap = func if wrap_method is None else wrap_method
|
||||
|
||||
if iscoroutinefunction(func):
|
||||
# async function wrapper
|
||||
@wraps(func)
|
||||
async def async_wrapper(*args, **kwargs):
|
||||
toolkit: AbstractToolkit = args[0]
|
||||
task_lock = get_task_lock(toolkit.api_task_id)
|
||||
|
||||
if inputs is not None:
|
||||
args_str = inputs(*args, **kwargs)
|
||||
else:
|
||||
# remove first param self
|
||||
filtered_args = args[1:] if len(args) > 0 else []
|
||||
|
||||
args_str = ", ".join(repr(arg) for arg in filtered_args)
|
||||
if kwargs:
|
||||
kwargs_str = ", ".join(f"{k}={v!r}" for k, v in kwargs.items())
|
||||
args_str = f"{args_str}, {kwargs_str}" if args_str else kwargs_str
|
||||
|
||||
toolkit_name = toolkit.toolkit_name()
|
||||
method_name = func.__name__.replace("_", " ")
|
||||
await task_lock.put_queue(
|
||||
ActionActivateToolkitData(
|
||||
data={
|
||||
"agent_name": toolkit.agent_name,
|
||||
"process_task_id": process_task.get(""),
|
||||
"toolkit_name": toolkit_name,
|
||||
"method_name": method_name,
|
||||
"message": args_str,
|
||||
},
|
||||
)
|
||||
)
|
||||
error = None
|
||||
res = None
|
||||
try:
|
||||
res = await func(*args, **kwargs)
|
||||
except Exception as e:
|
||||
error = e
|
||||
|
||||
if return_msg and error is None:
|
||||
res_msg = return_msg(res)
|
||||
elif isinstance(res, str):
|
||||
res_msg = res
|
||||
else:
|
||||
if error is None:
|
||||
try:
|
||||
res_msg = json.dumps(res, ensure_ascii=False)
|
||||
except TypeError:
|
||||
# Handle cases where res contains non-serializable objects (like coroutines)
|
||||
res_msg = str(res)
|
||||
else:
|
||||
res_msg = str(error)
|
||||
|
||||
await task_lock.put_queue(
|
||||
ActionDeactivateToolkitData(
|
||||
data={
|
||||
"agent_name": toolkit.agent_name,
|
||||
"process_task_id": process_task.get(""),
|
||||
"toolkit_name": toolkit_name,
|
||||
"method_name": method_name,
|
||||
"message": res_msg[:500] if len(res_msg) > 500 else res_msg,
|
||||
},
|
||||
)
|
||||
)
|
||||
if error is not None:
|
||||
raise error
|
||||
return res
|
||||
|
||||
return async_wrapper
|
||||
|
||||
else:
|
||||
# sync function wrapper
|
||||
@wraps(func)
|
||||
def sync_wrapper(*args, **kwargs):
|
||||
toolkit: AbstractToolkit = args[0]
|
||||
task_lock = get_task_lock(toolkit.api_task_id)
|
||||
|
||||
if inputs is not None:
|
||||
args_str = inputs(*args, **kwargs)
|
||||
else:
|
||||
# remove first param self
|
||||
filtered_args = args[1:] if len(args) > 0 else []
|
||||
|
||||
args_str = ", ".join(repr(arg) for arg in filtered_args)
|
||||
if kwargs:
|
||||
kwargs_str = ", ".join(f"{k}={v!r}" for k, v in kwargs.items())
|
||||
args_str = f"{args_str}, {kwargs_str}" if args_str else kwargs_str
|
||||
|
||||
toolkit_name = toolkit.toolkit_name()
|
||||
method_name = func.__name__.replace("_", " ")
|
||||
task = asyncio.create_task(
|
||||
task_lock.put_queue(
|
||||
ActionActivateToolkitData(
|
||||
data={
|
||||
"agent_name": toolkit.agent_name,
|
||||
"process_task_id": process_task.get(""),
|
||||
"toolkit_name": toolkit_name,
|
||||
"method_name": method_name,
|
||||
"message": args_str,
|
||||
},
|
||||
)
|
||||
)
|
||||
)
|
||||
if hasattr(task_lock, "add_background_task"):
|
||||
task_lock.add_background_task(task)
|
||||
error = None
|
||||
res = None
|
||||
try:
|
||||
print(">>>>", func.__name__, "<<<<")
|
||||
res = func(*args, **kwargs)
|
||||
# Safety check: if the result is a coroutine, we need to await it
|
||||
if asyncio.iscoroutine(res):
|
||||
import warnings
|
||||
warnings.warn(f"Async function {func.__name__} was incorrectly called synchronously")
|
||||
res = asyncio.run(res)
|
||||
except Exception as e:
|
||||
error = e
|
||||
|
||||
if return_msg and error is None:
|
||||
res_msg = return_msg(res)
|
||||
elif isinstance(res, str):
|
||||
res_msg = res
|
||||
else:
|
||||
if error is None:
|
||||
try:
|
||||
res_msg = json.dumps(res, ensure_ascii=False)
|
||||
except TypeError:
|
||||
# Handle cases where res contains non-serializable objects (like coroutines)
|
||||
res_msg = str(res)
|
||||
else:
|
||||
res_msg = str(error)
|
||||
|
||||
task = asyncio.create_task(
|
||||
task_lock.put_queue(
|
||||
ActionDeactivateToolkitData(
|
||||
data={
|
||||
"agent_name": toolkit.agent_name,
|
||||
"process_task_id": process_task.get(""),
|
||||
"toolkit_name": toolkit_name,
|
||||
"method_name": method_name,
|
||||
"message": res_msg[:500] if len(res_msg) > 500 else res_msg,
|
||||
},
|
||||
)
|
||||
)
|
||||
)
|
||||
if hasattr(task_lock, "add_background_task"):
|
||||
task_lock.add_background_task(task)
|
||||
if error is not None:
|
||||
raise error
|
||||
return res
|
||||
|
||||
return sync_wrapper
|
||||
|
||||
return decorator
|
||||
48
backend/app/utils/server/sync_step.py
Normal file
48
backend/app/utils/server/sync_step.py
Normal file
@ -0,0 +1,48 @@
|
||||
import time
|
||||
import httpx
|
||||
import asyncio
|
||||
import os
|
||||
import json
|
||||
from loguru import logger
|
||||
from app.service.chat_service import Chat
|
||||
from app.component.environment import env
|
||||
|
||||
|
||||
def sync_step(func):
|
||||
async def wrapper(*args, **kwargs):
|
||||
server_url = env("SERVER_URL")
|
||||
sync_url = server_url + "/chat/steps" if server_url else None
|
||||
async for value in func(*args, **kwargs):
|
||||
if not server_url:
|
||||
yield value
|
||||
continue
|
||||
|
||||
if isinstance(value, str) and value.startswith("data: "):
|
||||
value_json_str = value[len("data: ") :].strip()
|
||||
else:
|
||||
value_json_str = value
|
||||
json_data = json.loads(value_json_str)
|
||||
chat: Chat = args[0] if args else None
|
||||
if chat is not None:
|
||||
asyncio.create_task(
|
||||
send_to_api(
|
||||
sync_url,
|
||||
{
|
||||
"task_id": chat.task_id,
|
||||
"step": json_data["step"],
|
||||
"data": json_data["data"],
|
||||
},
|
||||
)
|
||||
)
|
||||
yield value
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
async def send_to_api(url, data):
|
||||
async with httpx.AsyncClient() as client:
|
||||
try:
|
||||
res = await client.post(url, json=data)
|
||||
# logger.info(res)
|
||||
except Exception as e:
|
||||
logger.error(e)
|
||||
197
backend/app/utils/single_agent_worker.py
Normal file
197
backend/app/utils/single_agent_worker.py
Normal file
@ -0,0 +1,197 @@
|
||||
import datetime
|
||||
from camel.agents.chat_agent import AsyncStreamingChatAgentResponse
|
||||
from camel.societies.workforce.single_agent_worker import SingleAgentWorker as BaseSingleAgentWorker
|
||||
from camel.tasks.task import Task, TaskState, is_task_result_insufficient
|
||||
|
||||
from app.utils.agent import ListenChatAgent
|
||||
from camel.societies.workforce.prompts import PROCESS_TASK_PROMPT
|
||||
from colorama import Fore
|
||||
from camel.societies.workforce.utils import TaskResult
|
||||
|
||||
|
||||
class SingleAgentWorker(BaseSingleAgentWorker):
|
||||
def __init__(
|
||||
self,
|
||||
description: str,
|
||||
worker: ListenChatAgent,
|
||||
use_agent_pool: bool = True,
|
||||
pool_initial_size: int = 1,
|
||||
pool_max_size: int = 10,
|
||||
auto_scale_pool: bool = True,
|
||||
use_structured_output_handler: bool = True,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
description=description,
|
||||
worker=worker,
|
||||
use_agent_pool=use_agent_pool,
|
||||
pool_initial_size=pool_initial_size,
|
||||
pool_max_size=pool_max_size,
|
||||
auto_scale_pool=auto_scale_pool,
|
||||
use_structured_output_handler=use_structured_output_handler,
|
||||
)
|
||||
self.worker = worker # change type hint
|
||||
|
||||
async def _process_task(self, task: Task, dependencies: list[Task]) -> TaskState:
|
||||
r"""Processes a task with its dependencies using an efficient agent
|
||||
management system.
|
||||
|
||||
This method asynchronously processes a given task, considering its
|
||||
dependencies, by sending a generated prompt to a worker agent.
|
||||
Uses an agent pool for efficiency when enabled, or falls back to
|
||||
cloning when pool is disabled.
|
||||
|
||||
Args:
|
||||
task (Task): The task to process, which includes necessary details
|
||||
like content and type.
|
||||
dependencies (List[Task]): Tasks that the given task depends on.
|
||||
|
||||
Returns:
|
||||
TaskState: `TaskState.DONE` if processed successfully, otherwise
|
||||
`TaskState.FAILED`.
|
||||
"""
|
||||
# Get agent efficiently (from pool or by cloning)
|
||||
worker_agent = await self._get_worker_agent()
|
||||
worker_agent.process_task_id = task.id # type: ignore rewrite line
|
||||
|
||||
response_content = ""
|
||||
try:
|
||||
dependency_tasks_info = self._get_dep_tasks_info(dependencies)
|
||||
prompt = PROCESS_TASK_PROMPT.format(
|
||||
content=task.content,
|
||||
parent_task_content=task.parent.content if task.parent else "",
|
||||
dependency_tasks_info=dependency_tasks_info,
|
||||
additional_info=task.additional_info,
|
||||
)
|
||||
|
||||
if self.use_structured_output_handler and self.structured_handler:
|
||||
# Use structured output handler for prompt-based extraction
|
||||
enhanced_prompt = self.structured_handler.generate_structured_prompt(
|
||||
base_prompt=prompt,
|
||||
schema=TaskResult,
|
||||
examples=[
|
||||
{
|
||||
"content": "I have successfully completed the task...",
|
||||
"failed": False,
|
||||
}
|
||||
],
|
||||
additional_instructions="Ensure you provide a clear "
|
||||
"description of what was done and whether the task "
|
||||
"succeeded or failed.",
|
||||
)
|
||||
response = await worker_agent.astep(enhanced_prompt)
|
||||
|
||||
# Handle streaming response
|
||||
if isinstance(response, AsyncStreamingChatAgentResponse):
|
||||
content = ""
|
||||
async for chunk in response:
|
||||
if chunk.msg:
|
||||
content = chunk.msg.content
|
||||
response_content = content
|
||||
else:
|
||||
# Regular ChatAgentResponse
|
||||
response_content = response.msg.content if response.msg else ""
|
||||
|
||||
task_result = self.structured_handler.parse_structured_response(
|
||||
response_text=response_content,
|
||||
schema=TaskResult,
|
||||
fallback_values={
|
||||
"content": "Task processing failed",
|
||||
"failed": True,
|
||||
},
|
||||
)
|
||||
else:
|
||||
# Use native structured output if supported
|
||||
response = await worker_agent.astep(prompt, response_format=TaskResult)
|
||||
|
||||
# Handle streaming response for native output
|
||||
if isinstance(response, AsyncStreamingChatAgentResponse):
|
||||
task_result = None
|
||||
async for chunk in response:
|
||||
if chunk.msg and chunk.msg.parsed:
|
||||
task_result = chunk.msg.parsed
|
||||
response_content = chunk.msg.content
|
||||
# If no parsed result found in streaming, create fallback
|
||||
if task_result is None:
|
||||
task_result = TaskResult(
|
||||
content="Failed to parse streaming response",
|
||||
failed=True,
|
||||
)
|
||||
else:
|
||||
# Regular ChatAgentResponse
|
||||
task_result = response.msg.parsed
|
||||
response_content = response.msg.content if response.msg else ""
|
||||
|
||||
# Get token usage from the response
|
||||
if isinstance(response, AsyncStreamingChatAgentResponse):
|
||||
# For streaming responses, get the final response info
|
||||
final_response = await response
|
||||
usage_info = final_response.info.get("usage") or final_response.info.get("token_usage")
|
||||
else:
|
||||
usage_info = response.info.get("usage") or response.info.get("token_usage")
|
||||
total_tokens = usage_info.get("total_tokens", 0) if usage_info else 0
|
||||
|
||||
except Exception as e:
|
||||
print(f"{Fore.RED}Error processing task {task.id}: {type(e).__name__}: {e}{Fore.RESET}")
|
||||
# Store error information in task result
|
||||
task.result = f"{type(e).__name__}: {e!s}"
|
||||
return TaskState.FAILED
|
||||
finally:
|
||||
# Return agent to pool or let it be garbage collected
|
||||
await self._return_worker_agent(worker_agent)
|
||||
|
||||
# Populate additional_info with worker attempt details
|
||||
if task.additional_info is None:
|
||||
task.additional_info = {}
|
||||
|
||||
# Create worker attempt details with descriptive keys
|
||||
worker_attempt_details = {
|
||||
"agent_id": getattr(worker_agent, "agent_id", worker_agent.role_name),
|
||||
"original_worker_id": getattr(self.worker, "agent_id", self.worker.role_name),
|
||||
"timestamp": str(datetime.datetime.now()),
|
||||
"description": f"Attempt by "
|
||||
f"{getattr(worker_agent, 'agent_id', worker_agent.role_name)} "
|
||||
f"(from pool/clone of "
|
||||
f"{getattr(self.worker, 'agent_id', self.worker.role_name)}) "
|
||||
f"to process task: {task.content}",
|
||||
"response_content": response_content[:50],
|
||||
"tool_calls": str(
|
||||
final_response.info.get("tool_calls")
|
||||
if isinstance(response, AsyncStreamingChatAgentResponse)
|
||||
else response.info.get("tool_calls")
|
||||
)[:50],
|
||||
"total_tokens": total_tokens,
|
||||
}
|
||||
|
||||
# Store the worker attempt in additional_info
|
||||
if "worker_attempts" not in task.additional_info:
|
||||
task.additional_info["worker_attempts"] = []
|
||||
task.additional_info["worker_attempts"].append(worker_attempt_details)
|
||||
|
||||
# Store the actual token usage for this specific task
|
||||
task.additional_info["token_usage"] = {"total_tokens": total_tokens}
|
||||
|
||||
print(f"======\n{Fore.GREEN}Response from {self}:{Fore.RESET}")
|
||||
|
||||
if not self.use_structured_output_handler:
|
||||
# Handle native structured output parsing
|
||||
if task_result is None:
|
||||
print(f"{Fore.RED}Error in worker step execution: Invalid task result{Fore.RESET}")
|
||||
task_result = TaskResult(
|
||||
content="Failed to generate valid task result.",
|
||||
failed=True,
|
||||
)
|
||||
|
||||
color = Fore.RED if task_result.failed else Fore.GREEN # type: ignore[union-attr]
|
||||
print(
|
||||
f"\n{color}{task_result.content}{Fore.RESET}\n======", # type: ignore[union-attr]
|
||||
)
|
||||
|
||||
task.result = task_result.content # type: ignore[union-attr]
|
||||
|
||||
if task_result.failed: # type: ignore[union-attr]
|
||||
return TaskState.FAILED
|
||||
|
||||
if is_task_result_insufficient(task):
|
||||
print(f"{Fore.RED}Task {task.id}: Content validation failed - task marked as failed{Fore.RESET}")
|
||||
return TaskState.FAILED
|
||||
return TaskState.DONE
|
||||
16
backend/app/utils/toolkit/abstract_toolkit.py
Normal file
16
backend/app/utils/toolkit/abstract_toolkit.py
Normal file
@ -0,0 +1,16 @@
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from inflection import titleize
|
||||
|
||||
|
||||
class AbstractToolkit:
|
||||
api_task_id: str
|
||||
agent_name: str
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
"""default return all tools, subclass can override this method to filter tools"""
|
||||
return cls(api_task_id).get_tools() # type: ignore
|
||||
|
||||
@classmethod
|
||||
def toolkit_name(cls) -> str:
|
||||
return titleize(cls.__name__)
|
||||
36
backend/app/utils/toolkit/audio_analysis_toolkit.py
Normal file
36
backend/app/utils/toolkit/audio_analysis_toolkit.py
Normal file
@ -0,0 +1,36 @@
|
||||
import os
|
||||
from camel.models import BaseAudioModel, BaseModelBackend
|
||||
from camel.toolkits import AudioAnalysisToolkit as BaseAudioAnalysisToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class AudioAnalysisToolkit(BaseAudioAnalysisToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.multi_modal_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
cache_dir: str | None = None,
|
||||
transcribe_model: BaseAudioModel | None = None,
|
||||
audio_reasoning_model: BaseModelBackend | None = None,
|
||||
timeout: float | None = None,
|
||||
):
|
||||
if cache_dir is None:
|
||||
cache_dir = env("file_save_path", os.path.expanduser("~/.eigent/tmp/"))
|
||||
super().__init__(cache_dir, transcribe_model, audio_reasoning_model, timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseAudioAnalysisToolkit.audio2text,
|
||||
lambda _, audio_path, question: f"transcribe audio from {audio_path} and ask question: {question}",
|
||||
)
|
||||
def ask_question_about_audio(self, audio_path: str, question: str) -> str:
|
||||
return super().ask_question_about_audio(audio_path, question)
|
||||
|
||||
@listen_toolkit(BaseAudioAnalysisToolkit.audio2text)
|
||||
def audio2text(self, audio_path: str) -> str:
|
||||
return super().audio2text(audio_path)
|
||||
39
backend/app/utils/toolkit/code_execution_toolkit.py
Normal file
39
backend/app/utils/toolkit/code_execution_toolkit.py
Normal file
@ -0,0 +1,39 @@
|
||||
from typing import List, Literal
|
||||
from camel.toolkits import CodeExecutionToolkit as BaseCodeExecutionToolkit, FunctionTool
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class CodeExecutionToolkit(BaseCodeExecutionToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.developer_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
sandbox: Literal["internal_python", "jupyter", "docker", "subprocess", "e2b"] = "subprocess",
|
||||
verbose: bool = False,
|
||||
unsafe_mode: bool = False,
|
||||
import_white_list: List[str] | None = None,
|
||||
require_confirm: bool = False,
|
||||
timeout: float | None = None,
|
||||
) -> None:
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(sandbox, verbose, unsafe_mode, import_white_list, require_confirm, timeout)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseCodeExecutionToolkit.execute_code,
|
||||
)
|
||||
def execute_code(self, code: str, code_type: str = "python") -> str:
|
||||
return super().execute_code(code, code_type)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseCodeExecutionToolkit.execute_command,
|
||||
)
|
||||
def execute_command(self, command: str) -> str | tuple[str, str]:
|
||||
return super().execute_command(command)
|
||||
|
||||
def get_tools(self) -> List[FunctionTool]:
|
||||
return [
|
||||
FunctionTool(self.execute_code),
|
||||
]
|
||||
29
backend/app/utils/toolkit/craw4ai_toolkit.py
Normal file
29
backend/app/utils/toolkit/craw4ai_toolkit.py
Normal file
@ -0,0 +1,29 @@
|
||||
from camel.toolkits import Crawl4AIToolkit as BaseCrawl4AIToolkit
|
||||
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class Crawl4AIToolkit(BaseCrawl4AIToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.search_agent
|
||||
|
||||
def __init__(self, api_task_id: str, timeout: float | None = None):
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(timeout)
|
||||
|
||||
# async def _get_client(self):
|
||||
# r"""Get or create the AsyncWebCrawler client."""
|
||||
# if self._client is None:
|
||||
# from crawl4ai import AsyncWebCrawler
|
||||
|
||||
# self._client = AsyncWebCrawler(use_managed_browser=True)
|
||||
# await self._client.__aenter__()
|
||||
# return self._client
|
||||
|
||||
@listen_toolkit(BaseCrawl4AIToolkit.scrape)
|
||||
async def scrape(self, url: str) -> str:
|
||||
return await super().scrape(url)
|
||||
|
||||
def toolkit_name(self) -> str:
|
||||
return "Crawl Toolkit"
|
||||
26
backend/app/utils/toolkit/excel_toolkit.py
Normal file
26
backend/app/utils/toolkit/excel_toolkit.py
Normal file
@ -0,0 +1,26 @@
|
||||
import os
|
||||
from camel.toolkits import ExcelToolkit as BaseExcelToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class ExcelToolkit(BaseExcelToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.document_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
timeout: float | None = None,
|
||||
working_directory: str | None = None,
|
||||
):
|
||||
self.api_task_id = api_task_id
|
||||
if working_directory is None:
|
||||
working_directory = env("file_save_path", os.path.expanduser("~/Downloads"))
|
||||
super().__init__(timeout)
|
||||
|
||||
@listen_toolkit(BaseExcelToolkit.extract_excel_content)
|
||||
def extract_excel_content(self, document_path: str) -> str:
|
||||
return super().extract_excel_content(document_path)
|
||||
56
backend/app/utils/toolkit/file_write_toolkit.py
Normal file
56
backend/app/utils/toolkit/file_write_toolkit.py
Normal file
@ -0,0 +1,56 @@
|
||||
import asyncio
|
||||
import os
|
||||
from typing import List
|
||||
from camel.toolkits import FileWriteToolkit as BaseFileWriteToolkit
|
||||
from app.component.environment import env
|
||||
from app.service.task import process_task
|
||||
from app.service.task import ActionWriteFileData, Agents, get_task_lock
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class FileWriteToolkit(BaseFileWriteToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.document_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
working_directory: str | None = None,
|
||||
timeout: float | None = None,
|
||||
default_encoding: str = "utf-8",
|
||||
backup_enabled: bool = True,
|
||||
) -> None:
|
||||
if working_directory is None:
|
||||
working_directory = env("file_save_path", os.path.expanduser("~/Downloads"))
|
||||
super().__init__(working_directory, timeout, default_encoding, backup_enabled)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseFileWriteToolkit.write_to_file,
|
||||
lambda _,
|
||||
title,
|
||||
content,
|
||||
filename,
|
||||
encoding=None,
|
||||
use_latex=False: f"write content to file: {filename} with encoding: {encoding} and use_latex: {use_latex}",
|
||||
)
|
||||
def write_to_file(
|
||||
self,
|
||||
title: str,
|
||||
content: str | List[List[str]],
|
||||
filename: str,
|
||||
encoding: str | None = None,
|
||||
use_latex: bool = False,
|
||||
) -> str:
|
||||
res = super().write_to_file(title, content, filename, encoding, use_latex)
|
||||
if "Content successfully written to file: " in res:
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
asyncio.create_task(
|
||||
task_lock.put_queue(
|
||||
ActionWriteFileData(
|
||||
process_task_id=process_task.get(),
|
||||
data=res.replace("Content successfully written to file: ", ""),
|
||||
)
|
||||
)
|
||||
)
|
||||
return res
|
||||
107
backend/app/utils/toolkit/github_toolkit.py
Normal file
107
backend/app/utils/toolkit/github_toolkit.py
Normal file
@ -0,0 +1,107 @@
|
||||
from typing import Literal
|
||||
from camel.toolkits import GithubToolkit as BaseGithubToolkit
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class GithubToolkit(BaseGithubToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.developer_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
access_token: str | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> None:
|
||||
super().__init__(access_token, timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseGithubToolkit.create_pull_request,
|
||||
lambda _,
|
||||
repo_name,
|
||||
file_path,
|
||||
new_content,
|
||||
pr_title,
|
||||
body,
|
||||
branch_name: f"Create PR in {repo_name} for {file_path} with title '{pr_title}', branch '{branch_name}', content '{new_content}'",
|
||||
)
|
||||
def create_pull_request(
|
||||
self,
|
||||
repo_name: str,
|
||||
file_path: str,
|
||||
new_content: str,
|
||||
pr_title: str,
|
||||
body: str,
|
||||
branch_name: str,
|
||||
) -> str:
|
||||
return super().create_pull_request(repo_name, file_path, new_content, pr_title, body, branch_name)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseGithubToolkit.get_issue_list,
|
||||
lambda _, repo_name, state="all": f"Get issue list from {repo_name} with state '{state}'",
|
||||
lambda issues: f"Retrieved {len(issues)} issues",
|
||||
)
|
||||
def get_issue_list(
|
||||
self, repo_name: str, state: Literal["open", "closed", "all"] = "all"
|
||||
) -> list[dict[str, object]]:
|
||||
return super().get_issue_list(repo_name, state)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseGithubToolkit.get_issue_content,
|
||||
lambda _, repo_name, issue_number: f"Get content of issue {issue_number} from {repo_name}",
|
||||
)
|
||||
def get_issue_content(self, repo_name: str, issue_number: int) -> str:
|
||||
return super().get_issue_content(repo_name, issue_number)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseGithubToolkit.get_pull_request_list,
|
||||
lambda _, repo_name, state="all": f"Get pull request list from {repo_name} with state '{state}'",
|
||||
lambda prs: f"Retrieved {len(prs)} pull requests",
|
||||
)
|
||||
def get_pull_request_list(
|
||||
self, repo_name: str, state: Literal["open", "closed", "all"] = "all"
|
||||
) -> list[dict[str, object]]:
|
||||
return super().get_pull_request_list(repo_name, state)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseGithubToolkit.get_pull_request_code,
|
||||
lambda _, repo_name, pr_number: f"Get code for pull request {pr_number} in {repo_name}",
|
||||
lambda code: f"Retrieved {len(code)} code files",
|
||||
)
|
||||
def get_pull_request_code(self, repo_name: str, pr_number: int) -> list[dict[str, str]]:
|
||||
return super().get_pull_request_code(repo_name, pr_number)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseGithubToolkit.get_pull_request_comments,
|
||||
lambda _, repo_name, pr_number: f"Get comments for pull request {pr_number} in {repo_name}",
|
||||
lambda comments: f"Retrieved {len(comments)} comments",
|
||||
)
|
||||
def get_pull_request_comments(self, repo_name: str, pr_number: int) -> list[dict[str, str]]:
|
||||
return super().get_pull_request_comments(repo_name, pr_number)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseGithubToolkit.get_all_file_paths,
|
||||
lambda _, repo_name, path="": f"Get all file paths from {repo_name}, path '{path}'",
|
||||
lambda paths: f"Retrieved {len(paths)} file paths",
|
||||
)
|
||||
def get_all_file_paths(self, repo_name: str, path: str = "") -> list[str]:
|
||||
return super().get_all_file_paths(repo_name, path)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseGithubToolkit.retrieve_file_content,
|
||||
lambda _, repo_name, file_path: f"Retrieve content of file {file_path} from {repo_name}",
|
||||
lambda content: f"Retrieved content of length {len(content)}",
|
||||
)
|
||||
def retrieve_file_content(self, repo_name: str, file_path: str) -> str:
|
||||
return super().retrieve_file_content(repo_name, file_path)
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
if env("GITHUB_ACCESS_TOKEN"):
|
||||
return GithubToolkit(api_task_id).get_tools()
|
||||
else:
|
||||
return []
|
||||
59
backend/app/utils/toolkit/google_calendar_toolkit.py
Normal file
59
backend/app/utils/toolkit/google_calendar_toolkit.py
Normal file
@ -0,0 +1,59 @@
|
||||
from typing import Any, Dict, List
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
from camel.toolkits import GoogleCalendarToolkit as BaseGoogleCalendarToolkit
|
||||
|
||||
|
||||
class GoogleCalendarToolkit(BaseGoogleCalendarToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.social_medium_agent
|
||||
|
||||
def __init__(self, api_task_id: str, timeout: float | None = None):
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(timeout)
|
||||
|
||||
@listen_toolkit(BaseGoogleCalendarToolkit.create_event)
|
||||
def create_event(
|
||||
self,
|
||||
event_title: str,
|
||||
start_time: str,
|
||||
end_time: str,
|
||||
description: str = "",
|
||||
location: str = "",
|
||||
attendees_email: List[str] | None = None,
|
||||
timezone: str = "UTC",
|
||||
) -> Dict[str, Any]:
|
||||
return super().create_event(event_title, start_time, end_time, description, location, attendees_email, timezone)
|
||||
|
||||
@listen_toolkit(BaseGoogleCalendarToolkit.get_events)
|
||||
def get_events(self, max_results: int = 10, time_min: str | None = None) -> List[Dict[str, Any]] | Dict[str, Any]:
|
||||
return super().get_events(max_results, time_min)
|
||||
|
||||
@listen_toolkit(BaseGoogleCalendarToolkit.update_event)
|
||||
def update_event(
|
||||
self,
|
||||
event_id: str,
|
||||
event_title: str | None = None,
|
||||
start_time: str | None = None,
|
||||
end_time: str | None = None,
|
||||
description: str | None = None,
|
||||
location: str | None = None,
|
||||
attendees_email: List[str] | None = None,
|
||||
) -> Dict[str, Any]:
|
||||
return super().update_event(event_id, event_title, start_time, end_time, description, location, attendees_email)
|
||||
|
||||
@listen_toolkit(BaseGoogleCalendarToolkit.delete_event)
|
||||
def delete_event(self, event_id: str) -> str:
|
||||
return super().delete_event(event_id)
|
||||
|
||||
@listen_toolkit(BaseGoogleCalendarToolkit.get_calendar_details)
|
||||
def get_calendar_details(self) -> Dict[str, Any]:
|
||||
return super().get_calendar_details()
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str):
|
||||
if env("GOOGLE_CLIENT_ID") and env("GOOGLE_CLIENT_SECRET"):
|
||||
return cls(api_task_id).get_tools()
|
||||
else:
|
||||
return []
|
||||
45
backend/app/utils/toolkit/google_drive_mcp_toolkit.py
Normal file
45
backend/app/utils/toolkit/google_drive_mcp_toolkit.py
Normal file
@ -0,0 +1,45 @@
|
||||
from camel.toolkits import GoogleDriveMCPToolkit as BaseGoogleDriveMCPToolkit, MCPToolkit
|
||||
from app.component.command import bun
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
|
||||
|
||||
class GoogleDriveMCPToolkit(BaseGoogleDriveMCPToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.document_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
timeout: float | None = None,
|
||||
credentials_path: str | None = None,
|
||||
input_env: dict[str, str] | None = None,
|
||||
) -> None:
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(timeout, credentials_path)
|
||||
credentials_path = credentials_path or env("GDRIVE_CREDENTIALS_PATH")
|
||||
self._mcp_toolkit = MCPToolkit(
|
||||
config_dict={
|
||||
"mcpServers": {
|
||||
"gdrive": {
|
||||
"command": bun(),
|
||||
"args": ["x", "-y", "@modelcontextprotocol/server-gdrive"],
|
||||
"env": {"GDRIVE_CREDENTIALS_PATH": credentials_path, **(input_env or {})},
|
||||
}
|
||||
}
|
||||
},
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def get_can_use_tools(cls, api_task_id: str, input_env: dict[str, str] | None = None) -> list[FunctionTool]:
|
||||
if env("GDRIVE_CREDENTIALS_PATH") is None:
|
||||
return []
|
||||
toolkit = cls(api_task_id, 180, env("GDRIVE_CREDENTIALS_PATH"), input_env)
|
||||
await toolkit.connect()
|
||||
tools = []
|
||||
for item in toolkit.get_tools():
|
||||
setattr(item, "_toolkit_name", cls.__name__)
|
||||
tools.append(item)
|
||||
return tools
|
||||
53
backend/app/utils/toolkit/google_gmail_mcp_toolkit.py
Normal file
53
backend/app/utils/toolkit/google_gmail_mcp_toolkit.py
Normal file
@ -0,0 +1,53 @@
|
||||
from camel.toolkits import BaseToolkit, FunctionTool, MCPToolkit
|
||||
from app.component.environment import env, env_or_fail
|
||||
from app.component.command import bun
|
||||
from app.service.task import Agents
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class GoogleGmailMCPToolkit(BaseToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.social_medium_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
credentials_path: str | None = None,
|
||||
timeout: float | None = None,
|
||||
input_env: dict[str, str] | None = None,
|
||||
):
|
||||
super().__init__(timeout)
|
||||
self.api_task_id = api_task_id
|
||||
credentials_path = credentials_path or env("GMAIL_CREDENTIALS_PATH")
|
||||
self._mcp_toolkit = MCPToolkit(
|
||||
config_dict={
|
||||
"mcpServers": {
|
||||
"gmail": {
|
||||
"command": bun(),
|
||||
"args": ["x", "-y", "@gongrzhe/server-gmail-autoauth-mcp"],
|
||||
"env": {"GMAIL_CREDENTIALS_PATH": credentials_path, **(input_env or {})},
|
||||
}
|
||||
}
|
||||
},
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
async def connect(self):
|
||||
await self._mcp_toolkit.connect()
|
||||
|
||||
async def disconnect(self):
|
||||
await self._mcp_toolkit.disconnect()
|
||||
|
||||
def get_tools(self) -> list[FunctionTool]:
|
||||
return self._mcp_toolkit.get_tools()
|
||||
|
||||
@classmethod
|
||||
async def get_can_use_tools(cls, api_task_id: str, input_env: dict[str, str] | None = None) -> list[FunctionTool]:
|
||||
if env("GMAIL_CREDENTIALS_PATH") is None:
|
||||
return []
|
||||
toolkit = cls(api_task_id, env_or_fail("GMAIL_CREDENTIALS_PATH"), 180, input_env)
|
||||
await toolkit.connect()
|
||||
tools = []
|
||||
for item in toolkit.get_tools():
|
||||
setattr(item, "_toolkit_name", cls.__name__)
|
||||
tools.append(item)
|
||||
return tools
|
||||
138
backend/app/utils/toolkit/human_toolkit.py
Normal file
138
backend/app/utils/toolkit/human_toolkit.py
Normal file
@ -0,0 +1,138 @@
|
||||
import asyncio
|
||||
from camel.toolkits.base import BaseToolkit
|
||||
from loguru import logger
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from app.service.task import Action, ActionAskData, ActionNoticeData, get_task_lock
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
from app.service.task import process_task
|
||||
# Rewrite HumanToolkit because the system's user interaction was using console, but in electron we cannot use console. Changed to use SSE response to let frontend show dialog for user interaction
|
||||
|
||||
|
||||
class HumanToolkit(BaseToolkit, AbstractToolkit):
|
||||
r"""A class representing a toolkit for human interaction.
|
||||
Note:
|
||||
This toolkit should be called to send a tidy message to the user to
|
||||
keep them informed.
|
||||
"""
|
||||
|
||||
agent_name: str
|
||||
|
||||
def __init__(self, api_task_id: str, agent_name: str, timeout: float | None = None):
|
||||
super().__init__(timeout)
|
||||
self.api_task_id = api_task_id
|
||||
self.agent_name = agent_name
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
task_lock.add_human_input_listen(self.agent_name)
|
||||
|
||||
@listen_toolkit(inputs=lambda _, question: question)
|
||||
async def ask_human_via_gui(self, question: str) -> str:
|
||||
"""Use this tool to ask a question to the user when you are stuck,
|
||||
need clarification, or require a decision to be made. This is a
|
||||
two-way communication channel that will wait for the user's response.
|
||||
You should use it to:
|
||||
- Clarify ambiguous instructions or requirements.
|
||||
- Request missing information that you cannot find (e.g., login
|
||||
credentials, file paths).
|
||||
- Ask for a decision when there are multiple viable options.
|
||||
- Seek help when you encounter an error you cannot resolve on your own.
|
||||
|
||||
Args:
|
||||
question (str): The question to ask the user.
|
||||
|
||||
Returns:
|
||||
str: The user's response to the question.
|
||||
"""
|
||||
logger.info(f"Question: {question}")
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
await task_lock.put_queue(
|
||||
ActionAskData(
|
||||
action=Action.ask,
|
||||
data={
|
||||
"question": question,
|
||||
"agent": self.agent_name,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
reply = await task_lock.get_human_input(self.agent_name)
|
||||
logger.info(f"User reply: {reply}")
|
||||
return reply
|
||||
|
||||
@listen_toolkit()
|
||||
def send_message_to_user(
|
||||
self,
|
||||
message_title: str,
|
||||
message_description: str,
|
||||
message_attachment: str | None = None,
|
||||
) -> str:
|
||||
r"""Use this tool to send a tidy message to the user, including a
|
||||
short title, a one-sentence description, and an optional attachment.
|
||||
|
||||
This one-way tool keeps the user informed about your progress,
|
||||
decisions, or actions. It does not require a response.
|
||||
You should use it to:
|
||||
- Announce what you are about to do.
|
||||
For example:
|
||||
message_title="Starting Task"
|
||||
message_description="Searching for papers on GUI Agents."
|
||||
- Report the result of an action.
|
||||
For example:
|
||||
message_title="Search Complete"
|
||||
message_description="Found 15 relevant papers."
|
||||
- Report a created file.
|
||||
For example:
|
||||
message_title="File Ready"
|
||||
message_description="The report is ready for your review."
|
||||
message_attachment="report.pdf"
|
||||
- State a decision.
|
||||
For example:
|
||||
message_title="Next Step"
|
||||
message_description="Analyzing the top 10 papers."
|
||||
- Give a status update during a long-running task.
|
||||
|
||||
Args:
|
||||
message_title (str): The title of the message.
|
||||
message_description (str): The short description.
|
||||
message_attachment (str): The attachment of the message,
|
||||
which can be a file path or a URL.
|
||||
|
||||
Returns:
|
||||
str: Confirmation that the message was successfully sent.
|
||||
"""
|
||||
print(f"\nAgent Message:\n{message_title} \n{message_description}\n")
|
||||
if message_attachment:
|
||||
print(message_attachment)
|
||||
logger.info(f"\nAgent Message:\n{message_title} {message_description} {message_attachment}")
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
asyncio.create_task(
|
||||
task_lock.put_queue(
|
||||
ActionNoticeData(
|
||||
process_task_id=process_task.get(""),
|
||||
data=f"{message_description}",
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
attachment_info = f" {message_attachment}" if message_attachment else ""
|
||||
return f"Message successfully sent to user: '{message_title} {message_description}{attachment_info}'"
|
||||
|
||||
def get_tools(self) -> list[FunctionTool]:
|
||||
r"""Returns a list of FunctionTool objects representing the
|
||||
functions in the toolkit.
|
||||
|
||||
Returns:
|
||||
List[FunctionTool]: A list of FunctionTool objects
|
||||
representing the functions in the toolkit.
|
||||
"""
|
||||
return [
|
||||
FunctionTool(self.ask_human_via_gui),
|
||||
FunctionTool(self.send_message_to_user),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str, agent_name: str) -> list[FunctionTool]:
|
||||
human = cls(api_task_id, agent_name)
|
||||
return [
|
||||
FunctionTool(human.ask_human_via_gui),
|
||||
]
|
||||
404
backend/app/utils/toolkit/hybrid_browser_python_toolkit.py
Normal file
404
backend/app/utils/toolkit/hybrid_browser_python_toolkit.py
Normal file
@ -0,0 +1,404 @@
|
||||
import asyncio
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
from typing import Any, Dict, List
|
||||
import uuid
|
||||
from camel.models import BaseModelBackend
|
||||
from camel.toolkits.hybrid_browser_toolkit_py import HybridBrowserToolkit as BaseHybridBrowserToolkit
|
||||
from camel.toolkits.hybrid_browser_toolkit_py.config_loader import ConfigLoader
|
||||
from camel.toolkits.hybrid_browser_toolkit_py.browser_session import HybridBrowserSession as BaseHybridBrowserSession
|
||||
from camel.toolkits.hybrid_browser_toolkit_py.actions import ActionExecutor
|
||||
from camel.toolkits.hybrid_browser_toolkit_py.snapshot import PageSnapshot
|
||||
from camel.toolkits.hybrid_browser_toolkit_py.agent import PlaywrightLLMAgent
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from loguru import logger
|
||||
from app.component.environment import env
|
||||
from app.exception.exception import ProgramException
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class BrowserSession(BaseHybridBrowserSession):
|
||||
async def _ensure_browser_inner(self) -> None:
|
||||
from playwright.async_api import async_playwright
|
||||
|
||||
if self._page is not None:
|
||||
return
|
||||
|
||||
self._playwright = await async_playwright().start()
|
||||
|
||||
# Prepare stealth options
|
||||
launch_options: Dict[str, Any] = {"headless": self._headless}
|
||||
context_options: Dict[str, Any] = {}
|
||||
if self._stealth and self._stealth_config:
|
||||
# Use preloaded stealth configuration
|
||||
launch_options["args"] = self._stealth_config["launch_args"]
|
||||
context_options.update(self._stealth_config["context_options"])
|
||||
|
||||
if self._user_data_dir:
|
||||
raise ProgramException("connect over cdp does not support set user_data_dir")
|
||||
# Path(self._user_data_dir).mkdir(parents=True, exist_ok=True)
|
||||
# pl = self._playwright
|
||||
# assert pl is not None
|
||||
# self._context = await pl.chromium.launch_persistent_context(
|
||||
# user_data_dir=self._user_data_dir,
|
||||
# headless=self._headless,
|
||||
# )
|
||||
# self._browser = self._context.browser
|
||||
else:
|
||||
pl = self._playwright
|
||||
assert pl is not None
|
||||
# self._browser = await pl.chromium.launch(headless=self._headless)
|
||||
port = env("browser_port", 9222)
|
||||
self._browser = await pl.chromium.connect_over_cdp(f"http://localhost:{port}")
|
||||
self._context = self._browser.contexts[0]
|
||||
|
||||
# Reuse an already open page (persistent context may restore last
|
||||
# session)
|
||||
# if self._context.pages:
|
||||
# self._page = self._context.pages[0]
|
||||
# else:
|
||||
# self._page = await self._context.new_page()
|
||||
|
||||
# Debug information to help trace concurrency issues
|
||||
|
||||
# Initialize _pages as empty list
|
||||
self._pages = {}
|
||||
|
||||
for index, item in enumerate(self._context.pages):
|
||||
if item.url.startswith("about:blank") and item.url != "about:blank":
|
||||
tab_id = "tab-" + str(index)
|
||||
self._page = item
|
||||
self._pages[tab_id] = self._page
|
||||
self._current_tab_id = tab_id
|
||||
await item.goto("about:blank")
|
||||
break
|
||||
|
||||
# If no suitable page found, create a new one
|
||||
if not self._page:
|
||||
logger.debug(json.dumps([item.url for item in self._context.pages]))
|
||||
await asyncio.sleep(3) # wait 3 sec, retry get new page
|
||||
await self.get_new_tab()
|
||||
logger.debug(json.dumps([item.url for item in self._context.pages]))
|
||||
if not self._page:
|
||||
raise ProgramException("Electron does't has page")
|
||||
|
||||
# Apply stealth modifications if enabled
|
||||
if self._stealth and self._stealth_script:
|
||||
try:
|
||||
await self._page.add_init_script(self._stealth_script)
|
||||
logger.debug("Applied stealth script to main page")
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to apply stealth script: {e}")
|
||||
|
||||
# Set up timeout for navigation
|
||||
self._page.set_default_navigation_timeout(self._navigation_timeout)
|
||||
self._page.set_default_timeout(self._navigation_timeout)
|
||||
|
||||
# helpers
|
||||
self.snapshot = PageSnapshot(self._page)
|
||||
self.executor = ActionExecutor(
|
||||
self._page,
|
||||
self,
|
||||
default_timeout=self._default_timeout,
|
||||
short_timeout=self._short_timeout,
|
||||
)
|
||||
logger.info("Browser session initialized successfully")
|
||||
|
||||
async def get_new_tab(self):
|
||||
assert self._context is not None
|
||||
|
||||
# Initialize _pages if not already done
|
||||
if not hasattr(self, "_pages") or self._pages is None:
|
||||
self._pages = {}
|
||||
|
||||
for index, item in enumerate(self._context.pages):
|
||||
if item.url.startswith("about:blank") and item.url != "about:blank":
|
||||
tab_id = "tab-" + str(index)
|
||||
self._pages[tab_id] = item
|
||||
await item.goto("about:blank")
|
||||
self._page = item
|
||||
self._current_tab_id = tab_id
|
||||
break
|
||||
|
||||
|
||||
class HybridBrowserPythonToolkit(BaseHybridBrowserToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.search_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
*,
|
||||
headless: bool = False,
|
||||
user_data_dir: str | None = None,
|
||||
stealth: bool = False,
|
||||
web_agent_model: BaseModelBackend | None = None,
|
||||
cache_dir: str = os.path.expanduser("~/.eigent/tmp/"),
|
||||
enabled_tools: List[str] | None = None,
|
||||
browser_log_to_file: bool = False,
|
||||
session_id: str | None = None,
|
||||
default_start_url: str = "https://google.com/",
|
||||
default_timeout: int | None = None,
|
||||
short_timeout: int | None = None,
|
||||
navigation_timeout: int | None = None,
|
||||
network_idle_timeout: int | None = None,
|
||||
screenshot_timeout: int | None = None,
|
||||
page_stability_timeout: int | None = None,
|
||||
dom_content_loaded_timeout: int | None = None,
|
||||
) -> None:
|
||||
self.api_task_id = api_task_id
|
||||
self._headless = headless
|
||||
self._user_data_dir = user_data_dir
|
||||
self._stealth = stealth
|
||||
self._web_agent_model = web_agent_model
|
||||
self._cache_dir = cache_dir
|
||||
self._browser_log_to_file = browser_log_to_file
|
||||
self._default_start_url = default_start_url
|
||||
self._session_id = session_id or "default"
|
||||
|
||||
# Store timeout configuration
|
||||
self._default_timeout = default_timeout
|
||||
self._short_timeout = short_timeout
|
||||
self._navigation_timeout = ConfigLoader.get_navigation_timeout(navigation_timeout)
|
||||
self._network_idle_timeout = ConfigLoader.get_network_idle_timeout(network_idle_timeout)
|
||||
self._screenshot_timeout = ConfigLoader.get_screenshot_timeout(screenshot_timeout)
|
||||
self._page_stability_timeout = ConfigLoader.get_page_stability_timeout(page_stability_timeout)
|
||||
self._dom_content_loaded_timeout = ConfigLoader.get_dom_content_loaded_timeout(dom_content_loaded_timeout)
|
||||
|
||||
# Logging configuration - fixed values for simplicity
|
||||
self.enable_action_logging = True
|
||||
self.enable_timing_logging = True
|
||||
self.enable_page_loading_logging = True
|
||||
self.log_to_console = False # Always disabled for cleaner output
|
||||
self.log_to_file = browser_log_to_file
|
||||
self.max_log_length = None # No truncation for file logs
|
||||
|
||||
# Set up log file if needed
|
||||
if self.log_to_file:
|
||||
# Create log directory if it doesn't exist
|
||||
log_dir = "browser_log"
|
||||
os.makedirs(log_dir, exist_ok=True)
|
||||
|
||||
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
self.log_file_path: str | None = os.path.join(
|
||||
log_dir, f"hybrid_browser_toolkit_{timestamp}_{session_id}.log"
|
||||
)
|
||||
else:
|
||||
self.log_file_path = None
|
||||
|
||||
# Initialize log buffer for in-memory storage
|
||||
self.log_buffer: List[Dict[str, Any]] = []
|
||||
|
||||
# Configure enabled tools
|
||||
if enabled_tools is None:
|
||||
self.enabled_tools = self.DEFAULT_TOOLS.copy()
|
||||
else:
|
||||
# Validate enabled tools
|
||||
invalid_tools = [tool for tool in enabled_tools if tool not in self.ALL_TOOLS]
|
||||
if invalid_tools:
|
||||
raise ValueError(f"Invalid tools specified: {invalid_tools}. Available tools: {self.ALL_TOOLS}")
|
||||
self.enabled_tools = enabled_tools.copy()
|
||||
|
||||
logger.info(f"Enabled tools: {self.enabled_tools}")
|
||||
|
||||
# Log initialization if file logging is enabled
|
||||
if self.log_to_file:
|
||||
logger.info("HybridBrowserToolkit initialized with file logging enabled")
|
||||
logger.info(f"Log file path: {self.log_file_path}")
|
||||
|
||||
# Core components
|
||||
temp_session = BrowserSession(
|
||||
headless=headless,
|
||||
user_data_dir=user_data_dir,
|
||||
stealth=stealth,
|
||||
session_id=session_id,
|
||||
default_timeout=default_timeout,
|
||||
short_timeout=short_timeout,
|
||||
)
|
||||
|
||||
# Use the session directly - singleton logic is handled in
|
||||
# ensure_browser
|
||||
self._session = temp_session
|
||||
self._agent: PlaywrightLLMAgent | None = None
|
||||
self._unified_script = self._load_unified_analyzer()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_open)
|
||||
async def browser_open(self) -> Dict[str, str]:
|
||||
return await super().browser_open()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_close)
|
||||
async def browser_close(self) -> str:
|
||||
return await super().browser_close()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_visit_page, lambda _, url: url)
|
||||
async def browser_visit_page(self, url: str) -> Dict[str, Any]:
|
||||
r"""Navigates to a URL.
|
||||
|
||||
This method creates a new tab for the URL instead of navigating
|
||||
in the current tab, allowing better multi-tab management.
|
||||
|
||||
Args:
|
||||
url (str): The web address to load in the browser.
|
||||
|
||||
Returns:
|
||||
Dict[str, Any]: A dictionary containing the result, snapshot, and
|
||||
tab information.
|
||||
"""
|
||||
if not url or not isinstance(url, str):
|
||||
return {
|
||||
"result": "Error: 'url' must be a non-empty string",
|
||||
"snapshot": "",
|
||||
"tabs": [],
|
||||
"current_tab": 0,
|
||||
"total_tabs": 1,
|
||||
}
|
||||
|
||||
if "://" not in url:
|
||||
url = f"https://{url}"
|
||||
|
||||
await self._ensure_browser()
|
||||
session = await self._get_session()
|
||||
|
||||
nav_result = ""
|
||||
|
||||
logger.info(f"Navigating to URL in current tab: {url}")
|
||||
|
||||
if not (await session.get_page()).url.startswith("about:blank"):
|
||||
await session.get_new_tab()
|
||||
|
||||
nav_result = await session.visit(url)
|
||||
|
||||
# Get snapshot
|
||||
snapshot = ""
|
||||
try:
|
||||
snapshot = await session.get_snapshot(force_refresh=True, diff_only=False)
|
||||
except Exception as e:
|
||||
logger.warning(f"Failed to capture snapshot: {e}")
|
||||
|
||||
# Get tab information
|
||||
tab_info = await self._get_tab_info_for_output()
|
||||
|
||||
return {"result": nav_result, "snapshot": snapshot, **tab_info}
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_back)
|
||||
async def browser_back(self) -> Dict[str, Any]:
|
||||
return await super().browser_back()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_forward)
|
||||
async def browser_forward(self) -> Dict[str, Any]:
|
||||
return await super().browser_forward()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_click)
|
||||
async def browser_click(self, *, ref: str) -> Dict[str, Any]:
|
||||
return await super().browser_click(ref=ref)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_type)
|
||||
async def browser_type(self, *, ref: str, text: str) -> Dict[str, Any]:
|
||||
return await super().browser_type(ref=ref, text=text)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_switch_tab)
|
||||
async def browser_switch_tab(self, *, tab_id: str) -> Dict[str, Any]:
|
||||
return await super().browser_switch_tab(tab_id=tab_id)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_select)
|
||||
async def browser_select(self, *, ref: str, value: str) -> Dict[str, str]:
|
||||
return await super().browser_select(ref=ref, value=value)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_scroll)
|
||||
async def browser_scroll(self, *, direction: str, amount: int) -> Dict[str, str]:
|
||||
return await super().browser_scroll(direction=direction, amount=amount)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_wait_user)
|
||||
async def browser_wait_user(self, timeout_sec: float | None = None) -> Dict[str, str]:
|
||||
return await super().browser_wait_user(timeout_sec)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_enter)
|
||||
async def browser_enter(self) -> Dict[str, str]:
|
||||
return await super().browser_enter()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_solve_task)
|
||||
async def browser_solve_task(self, task_prompt: str, start_url: str, max_steps: int = 15) -> str:
|
||||
return await super().browser_solve_task(task_prompt, start_url, max_steps)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_get_page_snapshot)
|
||||
async def browser_get_page_snapshot(self) -> str:
|
||||
return await super().browser_get_page_snapshot()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_get_som_screenshot)
|
||||
async def browser_get_som_screenshot(self):
|
||||
return await super().browser_get_som_screenshot()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_get_page_links)
|
||||
async def browser_get_page_links(self, *, ref: List[str]) -> Dict[str, Any]:
|
||||
return await super().browser_get_page_links(ref=ref)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_close_tab)
|
||||
async def browser_close_tab(self, *, tab_id: str) -> Dict[str, Any]:
|
||||
return await super().browser_close_tab(tab_id=tab_id)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_get_tab_info)
|
||||
async def browser_get_tab_info(self) -> Dict[str, Any]:
|
||||
return await super().browser_get_tab_info()
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
browser = HybridBrowserPythonToolkit(
|
||||
api_task_id,
|
||||
headless=False,
|
||||
browser_log_to_file=True,
|
||||
stealth=True,
|
||||
session_id=str(uuid.uuid4())[:8],
|
||||
default_start_url="about:blank",
|
||||
)
|
||||
|
||||
base_tools = [
|
||||
FunctionTool(browser.browser_click),
|
||||
FunctionTool(browser.browser_type),
|
||||
FunctionTool(browser.browser_back),
|
||||
FunctionTool(browser.browser_forward),
|
||||
FunctionTool(browser.browser_switch_tab),
|
||||
FunctionTool(browser.browser_enter),
|
||||
FunctionTool(browser.browser_visit_page),
|
||||
FunctionTool(browser.browser_scroll),
|
||||
FunctionTool(browser.browser_get_som_screenshot),
|
||||
# FunctionTool(browser.select),
|
||||
# FunctionTool(browser.wait_user),
|
||||
]
|
||||
|
||||
if browser.web_agent_model is not None:
|
||||
base_tools.append(FunctionTool(browser.browser_solve_task))
|
||||
|
||||
return base_tools
|
||||
|
||||
@classmethod
|
||||
def toolkit_name(cls) -> str:
|
||||
return "Browser Toolkit"
|
||||
|
||||
def clone_for_new_session(self, new_session_id: str | None = None) -> "HybridBrowserPythonToolkit":
|
||||
if new_session_id is None:
|
||||
new_session_id = str(uuid.uuid4())[:8]
|
||||
|
||||
return HybridBrowserPythonToolkit(
|
||||
self.api_task_id,
|
||||
headless=self._headless,
|
||||
user_data_dir=self._user_data_dir,
|
||||
stealth=self._stealth,
|
||||
web_agent_model=self._web_agent_model,
|
||||
cache_dir=f"{self._cache_dir.rstrip('/')}_clone_{new_session_id}/",
|
||||
enabled_tools=self.enabled_tools.copy(),
|
||||
browser_log_to_file=self._browser_log_to_file,
|
||||
session_id=new_session_id,
|
||||
default_start_url=self._default_start_url,
|
||||
default_timeout=self._default_timeout,
|
||||
short_timeout=self._short_timeout,
|
||||
navigation_timeout=self._navigation_timeout,
|
||||
network_idle_timeout=self._network_idle_timeout,
|
||||
screenshot_timeout=self._screenshot_timeout,
|
||||
page_stability_timeout=self._page_stability_timeout,
|
||||
dom_content_loaded_timeout=self._dom_content_loaded_timeout,
|
||||
)
|
||||
|
||||
async def _get_session(self) -> BrowserSession:
|
||||
return await super()._get_session() # type: ignore
|
||||
260
backend/app/utils/toolkit/hybrid_browser_toolkit.py
Normal file
260
backend/app/utils/toolkit/hybrid_browser_toolkit.py
Normal file
@ -0,0 +1,260 @@
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
from typing import Any, Dict, List
|
||||
from camel.models import BaseModelBackend
|
||||
from camel.toolkits.hybrid_browser_toolkit.hybrid_browser_toolkit_ts import (
|
||||
HybridBrowserToolkit as BaseHybridBrowserToolkit,
|
||||
)
|
||||
from camel.toolkits.hybrid_browser_toolkit.ws_wrapper import WebSocketBrowserWrapper as BaseWebSocketBrowserWrapper
|
||||
from loguru import logger
|
||||
import websockets
|
||||
from app.component.command import bun, uv
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class WebSocketBrowserWrapper(BaseWebSocketBrowserWrapper):
|
||||
async def start(self):
|
||||
# Check if node_modules exists (dependencies installed)
|
||||
node_modules_path = os.path.join(self.ts_dir, "node_modules")
|
||||
if not os.path.exists(node_modules_path):
|
||||
logger.warning("Node modules not found. Running npm install...")
|
||||
install_result = subprocess.run(
|
||||
[uv(), "run", "npm", "install"],
|
||||
cwd=self.ts_dir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if install_result.returncode != 0:
|
||||
logger.error(f"npm install failed: {install_result.stderr}")
|
||||
raise RuntimeError(
|
||||
f"Failed to install npm dependencies: {install_result.stderr}\n" # noqa:E501
|
||||
f"Please run 'npm install' in {self.ts_dir} manually."
|
||||
)
|
||||
logger.info("npm dependencies installed successfully")
|
||||
|
||||
# Ensure the TypeScript code is built
|
||||
build_result = subprocess.run(
|
||||
[uv(), "run", "npm", "run", "build"],
|
||||
cwd=self.ts_dir,
|
||||
capture_output=True,
|
||||
text=True,
|
||||
)
|
||||
if build_result.returncode != 0:
|
||||
logger.error(f"TypeScript build failed: {build_result.stderr}")
|
||||
raise RuntimeError(f"TypeScript build failed: {build_result.stderr}")
|
||||
|
||||
# Start the WebSocket server
|
||||
self.process = subprocess.Popen(
|
||||
[uv(), "run", "node", "websocket-server.js"], # bun not support playwright, use uv nodejs-bin
|
||||
cwd=self.ts_dir,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
text=True,
|
||||
)
|
||||
|
||||
# Wait for server to output the port
|
||||
server_ready = False
|
||||
timeout = 10 # 10 seconds timeout
|
||||
start_time = time.time()
|
||||
|
||||
while not server_ready and time.time() - start_time < timeout:
|
||||
if self.process.poll() is not None:
|
||||
# Process died
|
||||
stderr = self.process.stderr.read() # type: ignore
|
||||
raise RuntimeError(f"WebSocket server failed to start: {stderr}")
|
||||
|
||||
try:
|
||||
line = self.process.stdout.readline() # type: ignore
|
||||
logger.debug(f"WebSocket server output: {line}")
|
||||
if line.startswith("SERVER_READY:"):
|
||||
self.server_port = int(line.split(":")[1].strip())
|
||||
server_ready = True
|
||||
logger.info(f"WebSocket server ready on port {self.server_port}")
|
||||
except (ValueError, IndexError):
|
||||
continue
|
||||
|
||||
if not server_ready:
|
||||
self.process.kill()
|
||||
raise RuntimeError("WebSocket server failed to start within timeout")
|
||||
|
||||
# Connect to the WebSocket server
|
||||
try:
|
||||
self.websocket = await websockets.connect(
|
||||
f"ws://localhost:{self.server_port}",
|
||||
ping_interval=30,
|
||||
ping_timeout=10,
|
||||
max_size=50 * 1024 * 1024, # 50MB limit to match server
|
||||
)
|
||||
logger.info("Connected to WebSocket server")
|
||||
except Exception as e:
|
||||
self.process.kill()
|
||||
raise RuntimeError(f"Failed to connect to WebSocket server: {e}") from e
|
||||
|
||||
# Initialize the browser toolkit
|
||||
logger.debug(f"send init {self.config}")
|
||||
await self._send_command("init", self.config)
|
||||
logger.debug("WebSocket server initialized successfully")
|
||||
|
||||
|
||||
websocket_browser_wrapper = None
|
||||
"""ensure only one instance of websocket_browser_wrapper"""
|
||||
|
||||
|
||||
class HybridBrowserToolkit(BaseHybridBrowserToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.search_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
*,
|
||||
headless: bool = False,
|
||||
user_data_dir: str | None = None,
|
||||
stealth: bool = True,
|
||||
web_agent_model: BaseModelBackend | None = None,
|
||||
cache_dir: str = "tmp/",
|
||||
enabled_tools: List[str] | None = None,
|
||||
browser_log_to_file: bool = False,
|
||||
session_id: str | None = None,
|
||||
default_start_url: str = "https://google.com/",
|
||||
default_timeout: int | None = None,
|
||||
short_timeout: int | None = None,
|
||||
navigation_timeout: int | None = None,
|
||||
network_idle_timeout: int | None = None,
|
||||
screenshot_timeout: int | None = None,
|
||||
page_stability_timeout: int | None = None,
|
||||
dom_content_loaded_timeout: int | None = None,
|
||||
viewport_limit: bool = False,
|
||||
connect_over_cdp: bool = False,
|
||||
cdp_url: str | None = None,
|
||||
) -> None:
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(
|
||||
headless=headless,
|
||||
user_data_dir=user_data_dir,
|
||||
stealth=stealth,
|
||||
web_agent_model=web_agent_model,
|
||||
cache_dir=cache_dir,
|
||||
enabled_tools=enabled_tools,
|
||||
browser_log_to_file=browser_log_to_file,
|
||||
session_id=session_id,
|
||||
default_start_url=default_start_url,
|
||||
default_timeout=default_timeout,
|
||||
short_timeout=short_timeout,
|
||||
navigation_timeout=navigation_timeout,
|
||||
network_idle_timeout=network_idle_timeout,
|
||||
screenshot_timeout=screenshot_timeout,
|
||||
page_stability_timeout=page_stability_timeout,
|
||||
dom_content_loaded_timeout=dom_content_loaded_timeout,
|
||||
viewport_limit=viewport_limit,
|
||||
connect_over_cdp=connect_over_cdp,
|
||||
cdp_url=cdp_url,
|
||||
)
|
||||
|
||||
async def _ensure_ws_wrapper(self):
|
||||
"""Ensure WebSocket wrapper is initialized."""
|
||||
if self._ws_wrapper is None:
|
||||
global websocket_browser_wrapper
|
||||
if websocket_browser_wrapper is None:
|
||||
websocket_browser_wrapper = WebSocketBrowserWrapper(self._ws_config)
|
||||
self._ws_wrapper = websocket_browser_wrapper
|
||||
await self._ws_wrapper.start()
|
||||
|
||||
def clone_for_new_session(self, new_session_id: str | None = None) -> "HybridBrowserToolkit":
|
||||
import uuid
|
||||
|
||||
if new_session_id is None:
|
||||
new_session_id = str(uuid.uuid4())[:8]
|
||||
|
||||
return HybridBrowserToolkit(
|
||||
self.api_task_id,
|
||||
headless=self._headless,
|
||||
user_data_dir=self._user_data_dir,
|
||||
stealth=self._stealth,
|
||||
web_agent_model=self._web_agent_model,
|
||||
cache_dir=f"{self._cache_dir.rstrip('/')}/_clone_{new_session_id}/",
|
||||
enabled_tools=self.enabled_tools.copy(),
|
||||
browser_log_to_file=self._browser_log_to_file,
|
||||
session_id=new_session_id,
|
||||
default_start_url=self._default_start_url,
|
||||
default_timeout=self._default_timeout,
|
||||
short_timeout=self._short_timeout,
|
||||
navigation_timeout=self._navigation_timeout,
|
||||
network_idle_timeout=self._network_idle_timeout,
|
||||
screenshot_timeout=self._screenshot_timeout,
|
||||
page_stability_timeout=self._page_stability_timeout,
|
||||
dom_content_loaded_timeout=self._dom_content_loaded_timeout,
|
||||
viewport_limit=self._viewport_limit,
|
||||
connect_over_cdp=self.config_loader.get_browser_config().connect_over_cdp,
|
||||
cdp_url=self.config_loader.get_browser_config().cdp_url,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def toolkit_name(cls) -> str:
|
||||
return "Browser Toolkit"
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_open)
|
||||
async def browser_open(self) -> Dict[str, Any]:
|
||||
return await super().browser_open()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_close)
|
||||
async def browser_close(self) -> str:
|
||||
return await super().browser_close()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_visit_page)
|
||||
async def browser_visit_page(self, url: str) -> Dict[str, Any]:
|
||||
return await super().browser_visit_page(url)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_back)
|
||||
async def browser_back(self) -> Dict[str, Any]:
|
||||
return await super().browser_back()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_forward)
|
||||
async def browser_forward(self) -> Dict[str, Any]:
|
||||
return await super().browser_forward()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_get_page_snapshot)
|
||||
async def browser_get_page_snapshot(self) -> str:
|
||||
return await super().browser_get_page_snapshot()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_get_som_screenshot)
|
||||
async def browser_get_som_screenshot(self, read_image: bool = False, instruction: str | None = None) -> str:
|
||||
return await super().browser_get_som_screenshot(read_image, instruction)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_click)
|
||||
async def browser_click(self, *, ref: str) -> Dict[str, Any]:
|
||||
return await super().browser_click(ref=ref)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_type)
|
||||
async def browser_type(self, *, ref: str, text: str) -> Dict[str, Any]:
|
||||
return await super().browser_type(ref=ref, text=text)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_select)
|
||||
async def browser_select(self, *, ref: str, value: str) -> Dict[str, Any]:
|
||||
return await super().browser_select(ref=ref, value=value)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_scroll)
|
||||
async def browser_scroll(self, *, direction: str, amount: int = 500) -> Dict[str, Any]:
|
||||
return await super().browser_scroll(direction=direction, amount=amount)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_enter)
|
||||
async def browser_enter(self) -> Dict[str, Any]:
|
||||
return await super().browser_enter()
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_wait_user)
|
||||
async def browser_wait_user(self, timeout_sec: float | None = None) -> Dict[str, Any]:
|
||||
return await super().browser_wait_user(timeout_sec)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_switch_tab)
|
||||
async def browser_switch_tab(self, *, tab_id: str) -> Dict[str, Any]:
|
||||
return await super().browser_switch_tab(tab_id=tab_id)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_close_tab)
|
||||
async def browser_close_tab(self, *, tab_id: str) -> Dict[str, Any]:
|
||||
return await super().browser_close_tab(tab_id=tab_id)
|
||||
|
||||
@listen_toolkit(BaseHybridBrowserToolkit.browser_get_tab_info)
|
||||
async def browser_get_tab_info(self) -> Dict[str, Any]:
|
||||
return await super().browser_get_tab_info()
|
||||
40
backend/app/utils/toolkit/image_analysis_toolkit.py
Normal file
40
backend/app/utils/toolkit/image_analysis_toolkit.py
Normal file
@ -0,0 +1,40 @@
|
||||
from camel.models import BaseModelBackend
|
||||
from camel.toolkits import ImageAnalysisToolkit as BaseImageAnalysisToolkit
|
||||
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class ImageAnalysisToolkit(BaseImageAnalysisToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.multi_modal_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
model: BaseModelBackend | None = None,
|
||||
timeout: float | None = None,
|
||||
):
|
||||
super().__init__(model, timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseImageAnalysisToolkit.image_to_text,
|
||||
lambda _,
|
||||
image_path,
|
||||
sys_prompt: f"transcribe image from {image_path} and ask sys_prompt: {sys_prompt}",
|
||||
)
|
||||
def image_to_text(self, image_path: str, sys_prompt: str | None = None) -> str:
|
||||
return super().image_to_text(image_path, sys_prompt)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseImageAnalysisToolkit.ask_question_about_image,
|
||||
lambda _,
|
||||
image_path,
|
||||
question,
|
||||
sys_prompt: f"transcribe image from {image_path} and ask question: {question} with sys_prompt: {sys_prompt}",
|
||||
)
|
||||
def ask_question_about_image(
|
||||
self, image_path: str, question: str, sys_prompt: str | None = None
|
||||
) -> str:
|
||||
return super().ask_question_about_image(image_path, question, sys_prompt)
|
||||
42
backend/app/utils/toolkit/linkedin_toolkit.py
Normal file
42
backend/app/utils/toolkit/linkedin_toolkit.py
Normal file
@ -0,0 +1,42 @@
|
||||
from camel.toolkits import LinkedInToolkit as BaseLinkedInToolkit
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class LinkedInToolkit(BaseLinkedInToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.social_medium_agent
|
||||
|
||||
def __init__(self, api_task_id: str, timeout: float | None = None):
|
||||
super().__init__(timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseLinkedInToolkit.create_post,
|
||||
lambda _, text: f"create a LinkedIn post with text: {text}",
|
||||
)
|
||||
def create_post(self, text: str) -> dict:
|
||||
return super().create_post(text)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseLinkedInToolkit.delete_post,
|
||||
lambda _, post_id: f"delete LinkedIn post with id: {post_id}",
|
||||
)
|
||||
def delete_post(self, post_id: str) -> str:
|
||||
return super().delete_post(post_id)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseLinkedInToolkit.get_profile,
|
||||
lambda _, include_id: f"get LinkedIn profile with include_id: {include_id}",
|
||||
)
|
||||
def get_profile(self, include_id: bool = False) -> dict:
|
||||
return super().get_profile(include_id)
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
if env("LINKEDIN_ACCESS_TOKEN"):
|
||||
return LinkedInToolkit(api_task_id).get_tools()
|
||||
else:
|
||||
return []
|
||||
18
backend/app/utils/toolkit/markitdown_toolkit.py
Normal file
18
backend/app/utils/toolkit/markitdown_toolkit.py
Normal file
@ -0,0 +1,18 @@
|
||||
from typing import Dict, List
|
||||
from camel.toolkits import MarkItDownToolkit as BaseMarkItDownToolkit
|
||||
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class MarkItDownToolkit(BaseMarkItDownToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.document_agent
|
||||
|
||||
def __init__(self, api_task_id: str, timeout: float | None = None):
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(timeout)
|
||||
|
||||
@listen_toolkit(BaseMarkItDownToolkit.read_files)
|
||||
def read_files(self, file_paths: List[str]) -> Dict[str, str]:
|
||||
return super().read_files(file_paths)
|
||||
59
backend/app/utils/toolkit/mcp_search_toolkit.py
Normal file
59
backend/app/utils/toolkit/mcp_search_toolkit.py
Normal file
@ -0,0 +1,59 @@
|
||||
from typing import Any, List
|
||||
from camel.toolkits import BaseToolkit, FunctionTool
|
||||
import httpx
|
||||
from app.service.task import Action, ActionSearchMcpData, Agents, get_task_lock
|
||||
from app.component.environment import env_not_empty
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class McpSearchToolkit(BaseToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.mcp_agent
|
||||
|
||||
def __init__(self, api_task_id: str, timeout: float | None = None):
|
||||
super().__init__(timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
inputs=lambda _,
|
||||
keyword,
|
||||
size,
|
||||
page: f"keyword: {keyword}, size: {size}, page: {page}",
|
||||
return_msg=lambda res: f"Search {len(res)} results: ",
|
||||
)
|
||||
async def search(
|
||||
self,
|
||||
keyword: str,
|
||||
size: int = 15,
|
||||
page: int = 0,
|
||||
) -> dict[str, Any]:
|
||||
"""Search mcp server for keyword.
|
||||
|
||||
Args:
|
||||
keyword (str): mcp server name keyword.
|
||||
size (int): count per page.
|
||||
page (int): page.
|
||||
|
||||
Returns:
|
||||
dict[str, Any]: _description_
|
||||
"""
|
||||
async with httpx.AsyncClient() as client:
|
||||
response = await client.get(
|
||||
env_not_empty("MCP_URL"),
|
||||
params={
|
||||
"keyword": keyword,
|
||||
"size": size,
|
||||
"page": page,
|
||||
},
|
||||
)
|
||||
if response.status_code != 200:
|
||||
raise Exception(f"MCP server search failed: {response.text}")
|
||||
data = response.json()
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
await task_lock.put_queue(
|
||||
ActionSearchMcpData(action=Action.search_mcp, data=data["items"])
|
||||
)
|
||||
return data
|
||||
|
||||
def get_tools(self) -> List[FunctionTool]:
|
||||
return [FunctionTool(self.search)]
|
||||
41
backend/app/utils/toolkit/note_taking_toolkit.py
Normal file
41
backend/app/utils/toolkit/note_taking_toolkit.py
Normal file
@ -0,0 +1,41 @@
|
||||
import os
|
||||
from camel.toolkits import NoteTakingToolkit as BaseNoteTakingToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class NoteTakingToolkit(BaseNoteTakingToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.document_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
agent_name: str | None = None,
|
||||
working_directory: str | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> None:
|
||||
self.api_task_id = api_task_id
|
||||
if agent_name is not None:
|
||||
self.agent_name = agent_name
|
||||
if working_directory is None:
|
||||
working_directory = env("file_save_path", os.path.expanduser("~/.eigent/notes")) + "/note.md"
|
||||
super().__init__(working_directory=working_directory, timeout=timeout)
|
||||
|
||||
@listen_toolkit(BaseNoteTakingToolkit.append_note)
|
||||
def append_note(self, note_name: str, content: str) -> str:
|
||||
return super().append_note(note_name=note_name, content=content)
|
||||
|
||||
@listen_toolkit(BaseNoteTakingToolkit.read_note)
|
||||
def read_note(self) -> str:
|
||||
return super().read_note()
|
||||
|
||||
@listen_toolkit(BaseNoteTakingToolkit.create_note)
|
||||
def create_note(self, note_name: str, content: str = "") -> str:
|
||||
return super().create_note(note_name, content)
|
||||
|
||||
@listen_toolkit(BaseNoteTakingToolkit.list_note)
|
||||
def list_note(self) -> str:
|
||||
return super().list_note()
|
||||
46
backend/app/utils/toolkit/notion_mcp_toolkit.py
Normal file
46
backend/app/utils/toolkit/notion_mcp_toolkit.py
Normal file
@ -0,0 +1,46 @@
|
||||
import os
|
||||
from camel.toolkits import FunctionTool, NotionMCPToolkit as BaseNotionMCPToolkit
|
||||
from app.component.command import bun
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
from camel.toolkits.mcp_toolkit import MCPToolkit
|
||||
|
||||
|
||||
class NotionMCPToolkit(BaseNotionMCPToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.social_medium_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
timeout: float | None = None,
|
||||
):
|
||||
self.api_task_id = api_task_id
|
||||
if timeout is None:
|
||||
timeout = 120.0
|
||||
super().__init__(timeout)
|
||||
self._mcp_toolkit = MCPToolkit(
|
||||
config_dict={
|
||||
"mcpServers": {
|
||||
"notionMCP": {
|
||||
"command": bun(),
|
||||
"args": ["x", "-y", "mcp-remote", "https://mcp.notion.com/mcp"],
|
||||
"env": {
|
||||
"MCP_REMOTE_CONFIG_DIR": env("MCP_REMOTE_CONFIG_DIR", os.path.expanduser("~/.mcp-auth")),
|
||||
},
|
||||
}
|
||||
}
|
||||
},
|
||||
timeout=timeout,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
async def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
tools = []
|
||||
if env("MCP_REMOTE_CONFIG_DIR"):
|
||||
toolkit = cls(api_task_id)
|
||||
await toolkit.connect()
|
||||
for item in toolkit.get_tools():
|
||||
setattr(item, "_toolkit_name", cls.__name__)
|
||||
tools.append(item)
|
||||
return tools
|
||||
50
backend/app/utils/toolkit/notion_toolkit.py
Normal file
50
backend/app/utils/toolkit/notion_toolkit.py
Normal file
@ -0,0 +1,50 @@
|
||||
from typing import List
|
||||
from camel.toolkits import NotionToolkit as BaseNotionToolkit
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class NotionToolkit(BaseNotionToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.document_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
notion_token: str | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> None:
|
||||
super().__init__(notion_token, timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseNotionToolkit.list_all_pages,
|
||||
lambda _: "list all pages in Notion workspace",
|
||||
lambda result: f"{len(result)} pages found",
|
||||
)
|
||||
def list_all_pages(self) -> List[dict]:
|
||||
return super().list_all_pages()
|
||||
|
||||
@listen_toolkit(
|
||||
BaseNotionToolkit.list_all_users,
|
||||
lambda _: "list all users in Notion workspace",
|
||||
lambda result: f"{len(result)} users found",
|
||||
)
|
||||
def list_all_users(self) -> List[dict]:
|
||||
return super().list_all_users()
|
||||
|
||||
@listen_toolkit(
|
||||
BaseNotionToolkit.get_notion_block_text_content,
|
||||
lambda _, page_id: f"get text content of page with id: {page_id}",
|
||||
)
|
||||
def get_notion_block_text_content(self, block_id: str) -> str:
|
||||
return super().get_notion_block_text_content(block_id)
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> List[FunctionTool]:
|
||||
if env("NOTION_TOKEN"):
|
||||
return NotionToolkit(api_task_id).get_tools()
|
||||
else:
|
||||
return []
|
||||
50
backend/app/utils/toolkit/openai_image_toolkit.py
Normal file
50
backend/app/utils/toolkit/openai_image_toolkit.py
Normal file
@ -0,0 +1,50 @@
|
||||
import os
|
||||
from camel.toolkits import OpenAIImageToolkit as BaseOpenAIImageToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
from typing import Literal
|
||||
|
||||
|
||||
class OpenAIImageToolkit(BaseOpenAIImageToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.multi_modal_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
model: None | Literal["gpt-image-1"] | Literal["dall-e-3"] | Literal["dall-e-2"] = "gpt-image-1",
|
||||
timeout: float | None = None,
|
||||
api_key: str | None = None,
|
||||
url: str | None = None,
|
||||
size: None
|
||||
| Literal["256x256"]
|
||||
| Literal["512x512"]
|
||||
| Literal["1024x1024"]
|
||||
| Literal["1536x1024"]
|
||||
| Literal["1024x1536"]
|
||||
| Literal["1792x1024"]
|
||||
| Literal["1024x1792"]
|
||||
| Literal["auto"] = "1024x1024",
|
||||
quality: None
|
||||
| Literal["auto"]
|
||||
| Literal["low"]
|
||||
| Literal["medium"]
|
||||
| Literal["high"]
|
||||
| Literal["standard"]
|
||||
| Literal["hd"] = "standard",
|
||||
response_format: None | Literal["url"] | Literal["b64_json"] = "b64_json",
|
||||
n: int | None = 1,
|
||||
background: None | Literal["transparent"] | Literal["opaque"] | Literal["auto"] = "auto",
|
||||
style: None | Literal["vivid"] | Literal["natural"] = None,
|
||||
working_directory: str | None = None,
|
||||
):
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(
|
||||
model, timeout, api_key, url, size, quality, response_format, n, background, style, working_directory
|
||||
)
|
||||
|
||||
@listen_toolkit(BaseOpenAIImageToolkit.generate_image)
|
||||
def generate_image(self, prompt: str, image_name: str = "image") -> str:
|
||||
return super().generate_image(prompt, image_name)
|
||||
44
backend/app/utils/toolkit/pptx_toolkit.py
Normal file
44
backend/app/utils/toolkit/pptx_toolkit.py
Normal file
@ -0,0 +1,44 @@
|
||||
import asyncio
|
||||
import os
|
||||
from camel.toolkits import PPTXToolkit as BasePPTXToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import ActionWriteFileData, Agents, get_task_lock
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
from app.service.task import process_task
|
||||
|
||||
|
||||
class PPTXToolkit(BasePPTXToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.document_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
working_directory: str | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> None:
|
||||
self.api_task_id = api_task_id
|
||||
if working_directory is None:
|
||||
working_directory = env("file_save_path", os.path.expanduser("~/Downloads"))
|
||||
super().__init__(working_directory, timeout)
|
||||
|
||||
@listen_toolkit(
|
||||
BasePPTXToolkit.create_presentation,
|
||||
lambda _,
|
||||
content,
|
||||
filename,
|
||||
template=None: f"create presentation with content: {content}, filename: {filename}, template: {template}",
|
||||
)
|
||||
def create_presentation(self, content: str, filename: str, template: str | None = None) -> str:
|
||||
if not filename.lower().endswith(".pptx"):
|
||||
filename += ".pptx"
|
||||
|
||||
file_path = self._resolve_filepath(filename)
|
||||
res = super().create_presentation(content, filename, template)
|
||||
if "PowerPoint presentation successfully created" in res:
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
asyncio.create_task(
|
||||
task_lock.put_queue(ActionWriteFileData(process_task_id=process_task.get(), data=str(file_path)))
|
||||
)
|
||||
return res
|
||||
89
backend/app/utils/toolkit/pyautogui_toolkit.py
Normal file
89
backend/app/utils/toolkit/pyautogui_toolkit.py
Normal file
@ -0,0 +1,89 @@
|
||||
import os
|
||||
from typing import List, Literal
|
||||
from camel.toolkits import PyAutoGUIToolkit as BasePyAutoGUIToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class PyAutoGUIToolkit(BasePyAutoGUIToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.search_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
timeout: float | None = None,
|
||||
screenshots_dir: str | None = None,
|
||||
):
|
||||
if screenshots_dir is None:
|
||||
screenshots_dir = env("file_save_path", os.path.expanduser("~/Downloads"))
|
||||
super().__init__(timeout, screenshots_dir)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(BasePyAutoGUIToolkit.mouse_move, lambda _, x, y: f"mouse move to {x}, {y}")
|
||||
def mouse_move(self, x: int, y: int) -> str:
|
||||
return super().mouse_move(x, y)
|
||||
|
||||
@listen_toolkit(
|
||||
BasePyAutoGUIToolkit.mouse_click,
|
||||
lambda _, button="left", clicks=1, x=None, y=None: f"mouse click {button} {clicks} times at {x}, {y}",
|
||||
)
|
||||
def mouse_click(
|
||||
self,
|
||||
button: Literal["left", "middle", "right"] = "left",
|
||||
clicks: int = 1,
|
||||
x: int | None = None,
|
||||
y: int | None = None,
|
||||
) -> str:
|
||||
return super().mouse_click(button, clicks, x, y)
|
||||
|
||||
@listen_toolkit(
|
||||
BasePyAutoGUIToolkit.keyboard_type,
|
||||
lambda _, text, interval=0: f"keyboard type {text}, interval {interval}",
|
||||
)
|
||||
def keyboard_type(self, text: str, interval: float = 0) -> str:
|
||||
return super().keyboard_type(text, interval)
|
||||
|
||||
@listen_toolkit(BasePyAutoGUIToolkit.take_screenshot)
|
||||
def take_screenshot(self) -> str:
|
||||
return super().take_screenshot()
|
||||
|
||||
@listen_toolkit(BasePyAutoGUIToolkit.get_mouse_position)
|
||||
def get_mouse_position(self) -> str:
|
||||
return super().get_mouse_position()
|
||||
|
||||
@listen_toolkit(BasePyAutoGUIToolkit.press_key, lambda _, key: f"press key {key}")
|
||||
def press_key(self, key: str | list[str]) -> str:
|
||||
return super().press_key(key)
|
||||
|
||||
@listen_toolkit(BasePyAutoGUIToolkit.hotkey, lambda _, keys: f"hotkey {keys}")
|
||||
def hotkey(self, keys: List[str]) -> str:
|
||||
return super().hotkey(keys)
|
||||
|
||||
@listen_toolkit(
|
||||
BasePyAutoGUIToolkit.mouse_drag,
|
||||
lambda _,
|
||||
start_x,
|
||||
start_y,
|
||||
end_x,
|
||||
end_y,
|
||||
button="left": f"mouse drag from {start_x}, {start_y} to {end_x}, {end_y} with {button} button",
|
||||
)
|
||||
def mouse_drag(
|
||||
self,
|
||||
start_x: int,
|
||||
start_y: int,
|
||||
end_x: int,
|
||||
end_y: int,
|
||||
button: Literal["left", "middle", "right"] = "left",
|
||||
) -> str:
|
||||
return super().mouse_drag(start_x, start_y, end_x, end_y, button)
|
||||
|
||||
@listen_toolkit(
|
||||
BasePyAutoGUIToolkit.scroll,
|
||||
lambda _, scroll_amount, x=None, y=None: f"scroll {scroll_amount} at {x}, {y}",
|
||||
)
|
||||
def scroll(self, scroll_amount: int, x: int | None = None, y: int | None = None) -> str:
|
||||
return super().scroll(scroll_amount, x, y)
|
||||
69
backend/app/utils/toolkit/reddit_toolkit.py
Normal file
69
backend/app/utils/toolkit/reddit_toolkit.py
Normal file
@ -0,0 +1,69 @@
|
||||
from typing import Any, Dict, List
|
||||
from camel.toolkits import RedditToolkit as BaseRedditToolkit
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class RedditToolkit(BaseRedditToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.social_medium_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
retries: int = 3,
|
||||
delay: float = 0,
|
||||
timeout: float | None = None,
|
||||
):
|
||||
super().__init__(retries, delay, timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseRedditToolkit.collect_top_posts,
|
||||
lambda _,
|
||||
subreddit_name,
|
||||
post_limit=5,
|
||||
comment_limit=5: f"collect top posts from subreddit: {subreddit_name} with post limit: {post_limit} and comment limit: {comment_limit}",
|
||||
lambda result: f"top posts collected: {result}",
|
||||
)
|
||||
def collect_top_posts(
|
||||
self, subreddit_name: str, post_limit: int = 5, comment_limit: int = 5
|
||||
) -> List[Dict[str, Any]] | str:
|
||||
return super().collect_top_posts(subreddit_name, post_limit, comment_limit)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseRedditToolkit.perform_sentiment_analysis,
|
||||
lambda _, data: f"perform sentiment analysis on data number: {len(data)}",
|
||||
lambda result: f"perform analysis result: {result}",
|
||||
)
|
||||
def perform_sentiment_analysis(self, data: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
|
||||
return super().perform_sentiment_analysis(data)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseRedditToolkit.track_keyword_discussions,
|
||||
lambda _,
|
||||
subreddits,
|
||||
keywords,
|
||||
post_limit=10,
|
||||
comment_limit=10,
|
||||
sentiment_analysis=False: f"track keyword discussions for subreddits: {subreddits}, keywords: {keywords}",
|
||||
lambda result: f"track keyword discussions result: {result}",
|
||||
)
|
||||
def track_keyword_discussions(
|
||||
self,
|
||||
subreddits: List[str],
|
||||
keywords: List[str],
|
||||
post_limit: int = 10,
|
||||
comment_limit: int = 10,
|
||||
sentiment_analysis: bool = False,
|
||||
) -> List[Dict[str, Any]] | str:
|
||||
return super().track_keyword_discussions(subreddits, keywords, post_limit, comment_limit, sentiment_analysis)
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
if env("REDDIT_CLIENT_ID") and env("REDDIT_CLIENT_SECRET") and env("REDDIT_USER_AGENT"):
|
||||
return RedditToolkit(api_task_id).get_tools()
|
||||
else:
|
||||
return []
|
||||
27
backend/app/utils/toolkit/screenshot_toolkit.py
Normal file
27
backend/app/utils/toolkit/screenshot_toolkit.py
Normal file
@ -0,0 +1,27 @@
|
||||
import os
|
||||
from camel.toolkits import ScreenshotToolkit as BaseScreenshotToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class ScreenshotToolkit(BaseScreenshotToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.developer_agent
|
||||
|
||||
def __init__(self, api_task_id, working_directory: str | None = None, timeout: float | None = None):
|
||||
self.api_task_id = api_task_id
|
||||
if working_directory is None:
|
||||
working_directory = env("file_save_path", os.path.expanduser("~/Downloads"))
|
||||
super().__init__(working_directory, timeout)
|
||||
|
||||
@listen_toolkit(BaseScreenshotToolkit.take_screenshot_and_read_image)
|
||||
def take_screenshot_and_read_image(
|
||||
self, filename: str, save_to_file: bool = True, read_image: bool = True, instruction: str | None = None
|
||||
) -> str:
|
||||
return super().take_screenshot_and_read_image(filename, save_to_file, read_image, instruction)
|
||||
|
||||
@listen_toolkit(BaseScreenshotToolkit.read_image)
|
||||
def read_image(self, image_path: str, instruction: str = "") -> str:
|
||||
return super().read_image(image_path, instruction)
|
||||
300
backend/app/utils/toolkit/search_toolkit.py
Normal file
300
backend/app/utils/toolkit/search_toolkit.py
Normal file
@ -0,0 +1,300 @@
|
||||
from typing import Any, Dict, List, Literal
|
||||
from camel.toolkits import SearchToolkit as BaseSearchToolkit
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
import httpx
|
||||
from loguru import logger
|
||||
from app.component.environment import env, env_not_empty
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class SearchToolkit(BaseSearchToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.search_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
agent_name: str | None = None,
|
||||
timeout: float | None = None,
|
||||
exclude_domains: List[str] | None = None,
|
||||
):
|
||||
self.api_task_id = api_task_id
|
||||
if agent_name is not None:
|
||||
self.agent_name = agent_name
|
||||
super().__init__(
|
||||
timeout=timeout, exclude_domains=exclude_domains
|
||||
)
|
||||
|
||||
# @listen_toolkit(BaseSearchToolkit.search_wiki)
|
||||
# def search_wiki(self, entity: str) -> str:
|
||||
# return super().search_wiki(entity)
|
||||
|
||||
# @listen_toolkit(
|
||||
# BaseSearchToolkit.search_linkup,
|
||||
# lambda _,
|
||||
# query,
|
||||
# depth="standard",
|
||||
# output_type="searchResults",
|
||||
# structured_output_schema=None: f"Search linkup with query '{query}', depth '{depth}', output type '{output_type}', structured output schema '{structured_output_schema}'",
|
||||
# lambda result: f"Search linkup returned {len(result)} results",
|
||||
# )
|
||||
# def search_linkup(
|
||||
# self,
|
||||
# query: str,
|
||||
# depth: Literal["standard", "deep"] = "standard",
|
||||
# output_type: Literal["searchResults", "sourcedAnswer", "structured"] = "searchResults",
|
||||
# structured_output_schema: str | None = None,
|
||||
# ) -> dict[str, Any]:
|
||||
# return super().search_linkup(query, depth, output_type, structured_output_schema)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseSearchToolkit.search_google,
|
||||
lambda _, query, search_type="web": f"with query '{query}' and {search_type} result pages",
|
||||
)
|
||||
def search_google(self, query: str, search_type: str = "web") -> list[dict[str, Any]]:
|
||||
if env("GOOGLE_API_KEY") and env("SEARCH_ENGINE_ID"):
|
||||
return super().search_google(query, search_type)
|
||||
else:
|
||||
return self.cloud_search_google(query, search_type)
|
||||
|
||||
def cloud_search_google(self, query: str, search_type):
|
||||
url = env_not_empty("SERVER_URL")
|
||||
res = httpx.get(
|
||||
url + "/proxy/google",
|
||||
params={"query": query, "search_type": search_type},
|
||||
headers={"api-key": env_not_empty("cloud_api_key")},
|
||||
)
|
||||
return res.json()
|
||||
|
||||
# @listen_toolkit(
|
||||
# BaseSearchToolkit.search_duckduckgo,
|
||||
# lambda _,
|
||||
# query,
|
||||
# source="text",
|
||||
# max_results=5: f"Search DuckDuckGo with query '{query}', source '{source}', and max results {max_results}",
|
||||
# lambda result: f"Search DuckDuckGo returned {len(result)} results",
|
||||
# )
|
||||
# def search_duckduckgo(self, query: str, source: str = "text", max_results: int = 5) -> list[dict[str, Any]]:
|
||||
# return super().search_duckduckgo(query, source, max_results)
|
||||
|
||||
# @listen_toolkit(
|
||||
# BaseSearchToolkit.tavily_search,
|
||||
# lambda _, query, num_results=5, **kwargs: f"Search Tavily with query '{query}' and {num_results} results",
|
||||
# lambda result: f"Search Tavily returned {len(result)} results",
|
||||
# )
|
||||
# def tavily_search(self, query: str, num_results: int = 5, **kwargs) -> list[dict[str, Any]]:
|
||||
# return super().tavily_search(query, num_results, **kwargs)
|
||||
|
||||
# @listen_toolkit(
|
||||
# BaseSearchToolkit.search_brave,
|
||||
# lambda _, query, *args, **kwargs: f"Search Brave with query '{query}'",
|
||||
# lambda result: f"Search Brave returned {len(result)} results",
|
||||
# )
|
||||
# def search_brave(
|
||||
# self,
|
||||
# q: str,
|
||||
# country: str = "US",
|
||||
# search_lang: str = "en",
|
||||
# ui_lang: str = "en-US",
|
||||
# count: int = 20,
|
||||
# offset: int = 0,
|
||||
# safesearch: str = "moderate",
|
||||
# freshness: str | None = None,
|
||||
# text_decorations: bool = True,
|
||||
# spellcheck: bool = True,
|
||||
# result_filter: str | None = None,
|
||||
# goggles_id: str | None = None,
|
||||
# units: str | None = None,
|
||||
# extra_snippets: bool | None = None,
|
||||
# summary: bool | None = None,
|
||||
# ) -> dict[str, Any]:
|
||||
# return super().search_brave(
|
||||
# q,
|
||||
# country,
|
||||
# search_lang,
|
||||
# ui_lang,
|
||||
# count,
|
||||
# offset,
|
||||
# safesearch,
|
||||
# freshness,
|
||||
# text_decorations,
|
||||
# spellcheck,
|
||||
# result_filter,
|
||||
# goggles_id,
|
||||
# units,
|
||||
# extra_snippets,
|
||||
# summary,
|
||||
# )
|
||||
|
||||
# @listen_toolkit(
|
||||
# BaseSearchToolkit.search_bocha,
|
||||
# lambda _,
|
||||
# query,
|
||||
# freshness="noLimit",
|
||||
# summary=False,
|
||||
# count=10,
|
||||
# page=1: f"Search Bocha with query '{query}', freshness '{freshness}', summary '{summary}', count {count}, and page {page}",
|
||||
# lambda result: f"Search Bocha returned {len(result)} results",
|
||||
# )
|
||||
# def search_bocha(
|
||||
# self,
|
||||
# query: str,
|
||||
# freshness: str = "noLimit",
|
||||
# summary: bool = False,
|
||||
# count: int = 10,
|
||||
# page: int = 1,
|
||||
# ) -> dict[str, Any]:
|
||||
# return super().search_bocha(query, freshness, summary, count, page)
|
||||
|
||||
# @listen_toolkit(
|
||||
# BaseSearchToolkit.search_baidu,
|
||||
# lambda _, query, max_results=5: f"Search Baidu with query '{query}' and max results {max_results}",
|
||||
# lambda result: f"Search Baidu returned {len(result)} results",
|
||||
# )
|
||||
# def search_baidu(self, query: str, max_results: int = 5) -> dict[str, Any]:
|
||||
# return super().search_baidu(query, max_results)
|
||||
|
||||
# @listen_toolkit(
|
||||
# BaseSearchToolkit.search_bing,
|
||||
# lambda _, query: f"with query '{query}'",
|
||||
# lambda result: f"Search Bing returned {len(result)} results",
|
||||
# )
|
||||
# def search_bing(self, query: str) -> dict[str, Any]:
|
||||
# return super().search_bing(query)
|
||||
|
||||
@listen_toolkit(BaseSearchToolkit.search_exa, lambda _, query, *args, **kwargs: f"{query}, {args}, {kwargs}")
|
||||
def search_exa(
|
||||
self,
|
||||
query: str,
|
||||
search_type: Literal["auto", "neural", "keyword"] = "auto",
|
||||
category: None
|
||||
| Literal[
|
||||
"company",
|
||||
"research paper",
|
||||
"news",
|
||||
"pdf",
|
||||
"github",
|
||||
"tweet",
|
||||
"personal site",
|
||||
"linkedin profile",
|
||||
"financial report",
|
||||
] = None,
|
||||
include_text: List[str] | None = None,
|
||||
exclude_text: List[str] | None = None,
|
||||
use_autoprompt: bool = True,
|
||||
text: bool = False,
|
||||
) -> Dict[str, Any]:
|
||||
if env("EXA_API_KEY"):
|
||||
res = super().search_exa(query, search_type, category, include_text, exclude_text, use_autoprompt, text)
|
||||
return res
|
||||
else:
|
||||
return self.cloud_search_exa(query, search_type, category, include_text, exclude_text, use_autoprompt, text)
|
||||
|
||||
def cloud_search_exa(
|
||||
self,
|
||||
query: str,
|
||||
search_type: Literal["auto", "neural", "keyword"] = "auto",
|
||||
category: None
|
||||
| Literal[
|
||||
"company",
|
||||
"research paper",
|
||||
"news",
|
||||
"pdf",
|
||||
"github",
|
||||
"tweet",
|
||||
"personal site",
|
||||
"linkedin profile",
|
||||
"financial report",
|
||||
] = None,
|
||||
include_text: List[str] | None = None,
|
||||
exclude_text: List[str] | None = None,
|
||||
use_autoprompt: bool = True,
|
||||
text: bool = False,
|
||||
):
|
||||
url = env_not_empty("SERVER_URL")
|
||||
logger.debug(f">>>>>>>>>>>>>>>>{url}<<<<")
|
||||
res = httpx.post(
|
||||
url + "/proxy/exa",
|
||||
json={
|
||||
"query": query,
|
||||
"search_type": search_type,
|
||||
"category": category,
|
||||
"include_text": include_text,
|
||||
"exclude_text": exclude_text,
|
||||
"use_autoprompt": use_autoprompt,
|
||||
"text": text,
|
||||
},
|
||||
headers={"api-key": env_not_empty("cloud_api_key")},
|
||||
)
|
||||
logger.debug(">>>>>>>>>>>>>>>>>")
|
||||
logger.debug(res)
|
||||
return res.json()
|
||||
|
||||
# @listen_toolkit(
|
||||
# BaseSearchToolkit.search_alibaba_tongxiao,
|
||||
# lambda _, *args, **kwargs: f"Search Alibaba Tongxiao with args {args} and kwargs {kwargs}",
|
||||
# lambda result: f"Search Alibaba Tongxiao returned {len(result)} results",
|
||||
# )
|
||||
# def search_alibaba_tongxiao(
|
||||
# self,
|
||||
# query: str,
|
||||
# time_range: Literal["OneDay", "OneWeek", "OneMonth", "OneYear", "NoLimit"] = "NoLimit",
|
||||
# industry: Literal[
|
||||
# "finance",
|
||||
# "law",
|
||||
# "medical",
|
||||
# "internet",
|
||||
# "tax",
|
||||
# "news_province",
|
||||
# "news_center",
|
||||
# ]
|
||||
# | None = None,
|
||||
# page: int = 1,
|
||||
# return_main_text: bool = False,
|
||||
# return_markdown_text: bool = True,
|
||||
# enable_rerank: bool = True,
|
||||
# ) -> Dict[str, Any]:
|
||||
# return super().search_alibaba_tongxiao(
|
||||
# query,
|
||||
# time_range,
|
||||
# industry,
|
||||
# page,
|
||||
# return_main_text,
|
||||
# return_markdown_text,
|
||||
# enable_rerank,
|
||||
# )
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
search_toolkit = SearchToolkit(api_task_id)
|
||||
tools = [
|
||||
# FunctionTool(search_toolkit.search_wiki),
|
||||
# FunctionTool(search_toolkit.search_duckduckgo),
|
||||
# FunctionTool(search_toolkit.search_baidu),
|
||||
# FunctionTool(search_toolkit.search_bing),
|
||||
]
|
||||
# if env("LINKUP_API_KEY"):
|
||||
# tools.append(FunctionTool(search_toolkit.search_linkup))
|
||||
|
||||
# if env("BRAVE_API_KEY"):
|
||||
# tools.append(FunctionTool(search_toolkit.search_brave))
|
||||
|
||||
if (env("GOOGLE_API_KEY") and env("SEARCH_ENGINE_ID")) or env("cloud_api_key"):
|
||||
tools.append(FunctionTool(search_toolkit.search_google))
|
||||
|
||||
# if env("TAVILY_API_KEY"):
|
||||
# tools.append(FunctionTool(search_toolkit.tavily_search))
|
||||
|
||||
# if env("BOCHA_API_KEY"):
|
||||
# tools.append(FunctionTool(search_toolkit.search_bocha))
|
||||
|
||||
if env("EXA_API_KEY") or env("cloud_api_key"):
|
||||
tools.append(FunctionTool(search_toolkit.search_exa))
|
||||
|
||||
# if env("TONGXIAO_API_KEY"):
|
||||
# tools.append(FunctionTool(search_toolkit.search_alibaba_tongxiao))
|
||||
return tools
|
||||
|
||||
def get_tools(self) -> List[FunctionTool]:
|
||||
return [FunctionTool(self.search_exa)]
|
||||
77
backend/app/utils/toolkit/slack_toolkit.py
Normal file
77
backend/app/utils/toolkit/slack_toolkit.py
Normal file
@ -0,0 +1,77 @@
|
||||
from camel.toolkits import SlackToolkit as BaseSlackToolkit
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from loguru import logger
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class SlackToolkit(BaseSlackToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.social_medium_agent
|
||||
|
||||
def __init__(self, api_task_id: str, timeout: float | None = None):
|
||||
super().__init__(timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseSlackToolkit.create_slack_channel,
|
||||
lambda _, name, is_private=True: f"create a Slack channel with name: {name} and is_private: {is_private}",
|
||||
)
|
||||
def create_slack_channel(self, name: str, is_private: bool | None = True) -> str:
|
||||
return super().create_slack_channel(name, is_private)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseSlackToolkit.join_slack_channel,
|
||||
lambda _, channel_id: f"join Slack channel with id: {channel_id}",
|
||||
)
|
||||
def join_slack_channel(self, channel_id: str) -> str:
|
||||
return super().join_slack_channel(channel_id)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseSlackToolkit.leave_slack_channel,
|
||||
lambda _, channel_id: f"leave Slack channel with id: {channel_id}",
|
||||
)
|
||||
def leave_slack_channel(self, channel_id: str) -> str:
|
||||
return super().leave_slack_channel(channel_id)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseSlackToolkit.get_slack_channel_information,
|
||||
lambda _: "get Slack channel information",
|
||||
)
|
||||
def get_slack_channel_information(self) -> str:
|
||||
return super().get_slack_channel_information()
|
||||
|
||||
@listen_toolkit(
|
||||
BaseSlackToolkit.get_slack_channel_message,
|
||||
lambda _, channel_id: f"get Slack channel message for channel id: {channel_id}",
|
||||
)
|
||||
def get_slack_channel_message(self, channel_id: str) -> str:
|
||||
return super().get_slack_channel_message(channel_id)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseSlackToolkit.send_slack_message,
|
||||
lambda _,
|
||||
message,
|
||||
channel_id,
|
||||
user=None: f"send Slack message: {message} to channel id: {channel_id} for user: {user}",
|
||||
)
|
||||
def send_slack_message(self, message: str, channel_id: str, user: str | None = None) -> str:
|
||||
return super().send_slack_message(message, channel_id, user)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseSlackToolkit.delete_slack_message,
|
||||
lambda _,
|
||||
time_stamp,
|
||||
channel_id: f"delete Slack message with timestamp: {time_stamp} in channel id: {channel_id}",
|
||||
)
|
||||
def delete_slack_message(self, time_stamp: str, channel_id: str) -> str:
|
||||
return super().delete_slack_message(time_stamp, channel_id)
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
logger.debug(f"slack===={env('SLACK_BOT_TOKEN')}")
|
||||
if env("SLACK_BOT_TOKEN") or env("SLACK_USER_TOKEN"):
|
||||
return SlackToolkit(api_task_id).get_tools()
|
||||
else:
|
||||
return []
|
||||
104
backend/app/utils/toolkit/terminal_toolkit.py
Normal file
104
backend/app/utils/toolkit/terminal_toolkit.py
Normal file
@ -0,0 +1,104 @@
|
||||
import asyncio
|
||||
import os
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict
|
||||
from camel.toolkits.terminal_toolkit import TerminalToolkit as BaseTerminalToolkit
|
||||
from app.component.command import uv
|
||||
from app.component.environment import env
|
||||
from app.service.task import Action, ActionTerminalData, Agents, get_task_lock
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
from app.service.task import process_task
|
||||
|
||||
|
||||
class TerminalToolkit(BaseTerminalToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.developer_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
agent_name: str | None = None,
|
||||
timeout: float | None = None,
|
||||
shell_sessions: Dict[str, Any] | None = None,
|
||||
working_directory: str | None = None,
|
||||
need_terminal: bool = True,
|
||||
use_shell_mode: bool = True,
|
||||
clone_current_env: bool = False,
|
||||
safe_mode: bool = True,
|
||||
):
|
||||
self.api_task_id = api_task_id
|
||||
if agent_name is not None:
|
||||
self.agent_name = agent_name
|
||||
if working_directory is None:
|
||||
working_directory = env("file_save_path", os.path.expanduser("~/.eigent/terminal/"))
|
||||
super().__init__(
|
||||
timeout=timeout,
|
||||
shell_sessions=shell_sessions,
|
||||
working_directory=working_directory,
|
||||
need_terminal=False, # Override the code that creates GUI output logs, use queue for SSE output instead
|
||||
use_shell_mode=use_shell_mode,
|
||||
clone_current_env=clone_current_env,
|
||||
safe_mode=safe_mode,
|
||||
)
|
||||
|
||||
def _update_terminal_output(self, output: str):
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
# This method will be called during init. At that time, the process_task_id parameter does not exist, so it is set to be empty default
|
||||
process_task_id = process_task.get("")
|
||||
task = asyncio.create_task(
|
||||
task_lock.put_queue(
|
||||
ActionTerminalData(
|
||||
action=Action.terminal,
|
||||
process_task_id=process_task_id,
|
||||
data=output,
|
||||
)
|
||||
)
|
||||
)
|
||||
if hasattr(task_lock, "add_background_task"):
|
||||
task_lock.add_background_task(task)
|
||||
|
||||
def _ensure_uv_available(self) -> bool:
|
||||
self.uv_path = uv()
|
||||
return True
|
||||
|
||||
@listen_toolkit(
|
||||
BaseTerminalToolkit.shell_exec,
|
||||
lambda _, id, command: f"id: {id}, command: {command}",
|
||||
)
|
||||
def shell_exec(self, id: str, command: str) -> str:
|
||||
return super().shell_exec(id=id, command=command)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseTerminalToolkit.shell_view,
|
||||
lambda _, id: f"id: {id}",
|
||||
)
|
||||
def shell_view(self, id: str) -> str:
|
||||
return super().shell_view(id)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseTerminalToolkit.shell_wait,
|
||||
lambda _, id, seconds: f"id: {id}, seconds: {seconds}",
|
||||
)
|
||||
def shell_wait(self, id: str, seconds: int | None = None) -> str:
|
||||
return super().shell_wait(id=id, seconds=seconds)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseTerminalToolkit.shell_write_to_process,
|
||||
lambda _, id, input, press_enter: f"id: {id}, input: {input}, press_enter: {press_enter}",
|
||||
)
|
||||
def shell_write_to_process(self, id: str, input: str, press_enter: bool) -> str:
|
||||
return super().shell_write_to_process(id=id, input=input, press_enter=press_enter)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseTerminalToolkit.shell_kill_process,
|
||||
lambda _, id: f"id: {id}",
|
||||
)
|
||||
def shell_kill_process(self, id: str) -> str:
|
||||
return super().shell_kill_process(id=id)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseTerminalToolkit.ask_user_for_help,
|
||||
lambda _, id: f"id: {id}",
|
||||
)
|
||||
def ask_user_for_help(self, id: str) -> str:
|
||||
return super().ask_user_for_help(id=id)
|
||||
40
backend/app/utils/toolkit/thinking_toolkit.py
Normal file
40
backend/app/utils/toolkit/thinking_toolkit.py
Normal file
@ -0,0 +1,40 @@
|
||||
from camel.toolkits import ThinkingToolkit as BaseThinkingToolkit
|
||||
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class ThinkingToolkit(BaseThinkingToolkit, AbstractToolkit):
|
||||
|
||||
def __init__(self, api_task_id: str, agent_name: str, timeout: float | None = None):
|
||||
super().__init__(timeout)
|
||||
self.api_task_id = api_task_id
|
||||
self.agent_name = agent_name
|
||||
|
||||
@listen_toolkit(BaseThinkingToolkit.plan)
|
||||
def plan(self, plan: str) -> str:
|
||||
return super().plan(plan)
|
||||
|
||||
@listen_toolkit(BaseThinkingToolkit.hypothesize)
|
||||
def hypothesize(self, hypothesis: str) -> str:
|
||||
return super().hypothesize(hypothesis)
|
||||
|
||||
@listen_toolkit(BaseThinkingToolkit.think)
|
||||
def think(self, thought: str) -> str:
|
||||
return super().think(thought)
|
||||
|
||||
@listen_toolkit(BaseThinkingToolkit.contemplate)
|
||||
def contemplate(self, contemplation: str) -> str:
|
||||
return super().contemplate(contemplation)
|
||||
|
||||
@listen_toolkit(BaseThinkingToolkit.critique)
|
||||
def critique(self, critique: str) -> str:
|
||||
return super().critique(critique)
|
||||
|
||||
@listen_toolkit(BaseThinkingToolkit.synthesize)
|
||||
def synthesize(self, synthesis: str) -> str:
|
||||
return super().synthesize(synthesis)
|
||||
|
||||
@listen_toolkit(BaseThinkingToolkit.reflect)
|
||||
def reflect(self, reflection: str) -> str:
|
||||
return super().reflect(reflection)
|
||||
75
backend/app/utils/toolkit/twitter_toolkit.py
Normal file
75
backend/app/utils/toolkit/twitter_toolkit.py
Normal file
@ -0,0 +1,75 @@
|
||||
from typing import List
|
||||
from camel.toolkits import FunctionTool, TwitterToolkit as BaseTwitterToolkit
|
||||
from camel.toolkits.twitter_toolkit import (
|
||||
create_tweet,
|
||||
delete_tweet,
|
||||
get_my_user_profile,
|
||||
get_user_by_username,
|
||||
)
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class TwitterToolkit(BaseTwitterToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.social_medium_agent
|
||||
|
||||
def __init__(self, api_task_id: str, timeout: float | None = None):
|
||||
super().__init__(timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
create_tweet,
|
||||
lambda _, text, **kwargs: f"create tweet with text: {text} and options: {kwargs}",
|
||||
)
|
||||
def create_tweet(
|
||||
self,
|
||||
text: str,
|
||||
poll_options: list[str] | None = None,
|
||||
poll_duration_minutes: int | None = None,
|
||||
quote_tweet_id: int | str | None = None,
|
||||
) -> str:
|
||||
return create_tweet(text, poll_options, poll_duration_minutes, quote_tweet_id)
|
||||
|
||||
@listen_toolkit(
|
||||
delete_tweet,
|
||||
lambda _, tweet_id: f"delete tweet with id: {tweet_id}",
|
||||
)
|
||||
def delete_tweet(self, tweet_id: str) -> str:
|
||||
return delete_tweet(tweet_id)
|
||||
|
||||
@listen_toolkit(
|
||||
get_user_by_username,
|
||||
lambda _: "get my user profile",
|
||||
)
|
||||
def get_my_user_profile(self) -> str:
|
||||
return get_my_user_profile()
|
||||
|
||||
@listen_toolkit(
|
||||
get_user_by_username,
|
||||
lambda _, username: f"get user by username: {username}",
|
||||
)
|
||||
def get_user_by_username(self, username: str) -> str:
|
||||
return get_user_by_username(username)
|
||||
|
||||
def get_tools(self) -> List[FunctionTool]:
|
||||
return [
|
||||
FunctionTool(self.create_tweet),
|
||||
FunctionTool(self.delete_tweet),
|
||||
FunctionTool(self.get_my_user_profile),
|
||||
FunctionTool(self.get_user_by_username),
|
||||
]
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> List[FunctionTool]:
|
||||
if (
|
||||
env("TWITTER_CONSUMER_KEY")
|
||||
and env("TWITTER_CONSUMER_SECRET")
|
||||
and env("TWITTER_ACCESS_TOKEN")
|
||||
and env("TWITTER_ACCESS_TOKEN_SECRET")
|
||||
):
|
||||
return TwitterToolkit(api_task_id).get_tools()
|
||||
else:
|
||||
return []
|
||||
45
backend/app/utils/toolkit/video_analysis_toolkit.py
Normal file
45
backend/app/utils/toolkit/video_analysis_toolkit.py
Normal file
@ -0,0 +1,45 @@
|
||||
import os
|
||||
from camel.models import BaseModelBackend
|
||||
from camel.toolkits import VideoAnalysisToolkit as BaseVideoAnalysisToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class VideoAnalysisToolkit(BaseVideoAnalysisToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.multi_modal_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
working_directory: str | None = None,
|
||||
model: BaseModelBackend | None = None,
|
||||
use_audio_transcription: bool = False,
|
||||
use_ocr: bool = False,
|
||||
frame_interval: float = 4,
|
||||
output_language: str = "English",
|
||||
cookies_path: str | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> None:
|
||||
self.api_task_id = api_task_id
|
||||
if working_directory is None:
|
||||
working_directory = env("file_save_path", os.path.expanduser("~/Downloads"))
|
||||
super().__init__(
|
||||
working_directory,
|
||||
model,
|
||||
use_audio_transcription,
|
||||
use_ocr,
|
||||
frame_interval,
|
||||
output_language,
|
||||
cookies_path,
|
||||
timeout,
|
||||
)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseVideoAnalysisToolkit.ask_question_about_video,
|
||||
lambda _, video_path, question: f"transcribe video from {video_path} and ask question: {question}",
|
||||
)
|
||||
def ask_question_about_video(self, video_path: str, question: str) -> str:
|
||||
return super().ask_question_about_video(video_path, question)
|
||||
45
backend/app/utils/toolkit/video_download_toolkit.py
Normal file
45
backend/app/utils/toolkit/video_download_toolkit.py
Normal file
@ -0,0 +1,45 @@
|
||||
import os
|
||||
from typing import List
|
||||
from PIL.Image import Image
|
||||
from camel.toolkits import VideoDownloaderToolkit as BaseVideoDownloaderToolkit
|
||||
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class VideoDownloaderToolkit(BaseVideoDownloaderToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.multi_modal_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
working_directory: str | None = None,
|
||||
cookies_path: str | None = None,
|
||||
timeout: float | None = None,
|
||||
) -> None:
|
||||
if working_directory is None:
|
||||
working_directory = env("file_save_path", os.path.expanduser("~/Downloads"))
|
||||
super().__init__(working_directory, cookies_path, timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(BaseVideoDownloaderToolkit.download_video)
|
||||
def download_video(self, url: str) -> str:
|
||||
return super().download_video(url)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseVideoDownloaderToolkit.get_video_bytes,
|
||||
lambda _, video_path: f"get video bytes from {video_path}",
|
||||
lambda _: "get video bytes",
|
||||
)
|
||||
def get_video_bytes(self, video_path: str) -> bytes:
|
||||
return super().get_video_bytes(video_path)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseVideoDownloaderToolkit.get_video_screenshots,
|
||||
lambda _, video_path, amount: f"get video screenshots from {video_path}, amount: {amount}",
|
||||
lambda results: f"get video screenshots {len(results)}",
|
||||
)
|
||||
def get_video_screenshots(self, video_path: str, amount: int) -> List[Image]:
|
||||
return super().get_video_screenshots(video_path, amount)
|
||||
53
backend/app/utils/toolkit/web_deploy_toolkit.py
Normal file
53
backend/app/utils/toolkit/web_deploy_toolkit.py
Normal file
@ -0,0 +1,53 @@
|
||||
import uuid
|
||||
from typing import Any, Dict
|
||||
from camel.toolkits import WebDeployToolkit as BaseWebDeployToolkit
|
||||
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class WebDeployToolkit(BaseWebDeployToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.developer_agent
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
timeout: float | None = None,
|
||||
add_branding_tag: bool = True,
|
||||
logo_path: str = "../../../../public/favicon.png",
|
||||
tag_text: str = "Created by Eigent",
|
||||
tag_url: str = "https://main.eigent.ai/",
|
||||
remote_server_ip: str | None = "space.eigent.ai",
|
||||
remote_server_port: int = 8080,
|
||||
):
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(timeout, add_branding_tag, logo_path, tag_text, tag_url, remote_server_ip, remote_server_port)
|
||||
|
||||
@listen_toolkit(BaseWebDeployToolkit.deploy_html_content)
|
||||
def deploy_html_content(
|
||||
self,
|
||||
html_content: str | None = None,
|
||||
html_file_path: str | None = None,
|
||||
file_name: str = "index.html",
|
||||
port: int = 8080,
|
||||
domain: str | None = None,
|
||||
subdirectory: str | None = None,
|
||||
) -> Dict[str, Any]:
|
||||
subdirectory = str(uuid.uuid4())
|
||||
return super().deploy_html_content(html_content, html_file_path, file_name, port, domain, subdirectory)
|
||||
|
||||
@listen_toolkit(BaseWebDeployToolkit.deploy_folder)
|
||||
def deploy_folder(
|
||||
self, folder_path: str, port: int = 8080, domain: str | None = None, subdirectory: str | None = None
|
||||
) -> Dict[str, Any]:
|
||||
subdirectory = str(uuid.uuid4())
|
||||
return super().deploy_folder(folder_path, port, domain, subdirectory)
|
||||
|
||||
@listen_toolkit(BaseWebDeployToolkit.stop_server)
|
||||
def stop_server(self, port: int) -> Dict[str, Any]:
|
||||
return super().stop_server(port)
|
||||
|
||||
@listen_toolkit(BaseWebDeployToolkit.list_running_servers)
|
||||
def list_running_servers(self) -> Dict[str, Any]:
|
||||
return super().list_running_servers()
|
||||
46
backend/app/utils/toolkit/whatsapp_toolkit.py
Normal file
46
backend/app/utils/toolkit/whatsapp_toolkit.py
Normal file
@ -0,0 +1,46 @@
|
||||
from typing import Any, Dict, List
|
||||
from camel.toolkits import WhatsAppToolkit as BaseWhatsAppToolkit
|
||||
from camel.toolkits.function_tool import FunctionTool
|
||||
from app.component.environment import env
|
||||
from app.service.task import Agents
|
||||
from app.utils.listen.toolkit_listen import listen_toolkit
|
||||
from app.utils.toolkit.abstract_toolkit import AbstractToolkit
|
||||
|
||||
|
||||
class WhatsAppToolkit(BaseWhatsAppToolkit, AbstractToolkit):
|
||||
agent_name: str = Agents.social_medium_agent
|
||||
|
||||
def __init__(self, api_task_id: str, timeout: float | None = None):
|
||||
super().__init__(timeout)
|
||||
self.api_task_id = api_task_id
|
||||
|
||||
@listen_toolkit(
|
||||
BaseWhatsAppToolkit.send_message,
|
||||
lambda _, to, message: f"send message to {to}: {message}",
|
||||
lambda result: f"message sent result: {result}",
|
||||
)
|
||||
def send_message(self, to: str, message: str) -> Dict[str, Any] | str:
|
||||
return super().send_message(to, message)
|
||||
|
||||
@listen_toolkit(
|
||||
BaseWhatsAppToolkit.get_message_templates,
|
||||
lambda _: "get message templates",
|
||||
lambda result: f"message templates: {result}",
|
||||
)
|
||||
def get_message_templates(self) -> List[Dict[str, Any]] | str:
|
||||
return super().get_message_templates()
|
||||
|
||||
@listen_toolkit(
|
||||
BaseWhatsAppToolkit.get_business_profile,
|
||||
lambda _: "get business profile",
|
||||
lambda result: f"business profile: {result}",
|
||||
)
|
||||
def get_business_profile(self) -> Dict[str, Any] | str:
|
||||
return super().get_business_profile()
|
||||
|
||||
@classmethod
|
||||
def get_can_use_tools(cls, api_task_id: str) -> list[FunctionTool]:
|
||||
if env("WHATSAPP_ACCESS_TOKEN") and env("WHATSAPP_PHONE_NUMBER_ID"):
|
||||
return WhatsAppToolkit(api_task_id).get_tools()
|
||||
else:
|
||||
return []
|
||||
271
backend/app/utils/workforce.py
Normal file
271
backend/app/utils/workforce.py
Normal file
@ -0,0 +1,271 @@
|
||||
import asyncio
|
||||
from typing import Generator, List
|
||||
from camel.agents import ChatAgent
|
||||
from camel.societies.workforce.workforce import (
|
||||
Workforce as BaseWorkforce,
|
||||
WorkforceState,
|
||||
DEFAULT_WORKER_POOL_SIZE,
|
||||
)
|
||||
from camel.societies.workforce.task_channel import TaskChannel
|
||||
from camel.societies.workforce.base import BaseNode
|
||||
from camel.societies.workforce.utils import TaskAssignResult
|
||||
from loguru import logger
|
||||
from camel.tasks.task import Task, TaskState, validate_task_content
|
||||
from app.component import code
|
||||
from app.exception.exception import UserException
|
||||
from app.utils.agent import ListenChatAgent
|
||||
from app.service.task import (
|
||||
Action,
|
||||
ActionAssignTaskData,
|
||||
ActionEndData,
|
||||
ActionTaskStateData,
|
||||
get_camel_task,
|
||||
get_task_lock,
|
||||
)
|
||||
from app.utils.single_agent_worker import SingleAgentWorker
|
||||
|
||||
# === Debug sink === Write detailed dependency debug logs to file (logs/workforce_debug.log)
|
||||
# Create a new file every day, keep the logs for the last 7 days, and write asynchronously without blocking the main process
|
||||
logger.add(
|
||||
"logs/workforce_debug_{time:YYYY-MM-DD}.log",
|
||||
rotation="00:00",
|
||||
retention="7 days",
|
||||
enqueue=True,
|
||||
level="DEBUG",
|
||||
)
|
||||
# Independent sink: only collect the "[WF]" debug lines we insert to quickly view the dependency chain
|
||||
logger.add(
|
||||
"logs/wf_trace_{time:YYYY-MM-DD-HH}.log",
|
||||
rotation="00:00",
|
||||
retention="7 days",
|
||||
enqueue=True,
|
||||
level="DEBUG",
|
||||
filter=lambda record: record["message"].startswith("[WF]"),
|
||||
)
|
||||
|
||||
|
||||
class Workforce(BaseWorkforce):
|
||||
def __init__(
|
||||
self,
|
||||
api_task_id: str,
|
||||
description: str,
|
||||
children: List[BaseNode] | None = None,
|
||||
coordinator_agent: ChatAgent | None = None,
|
||||
task_agent: ChatAgent | None = None,
|
||||
new_worker_agent: ChatAgent | None = None,
|
||||
graceful_shutdown_timeout: float = 3,
|
||||
share_memory: bool = False,
|
||||
use_structured_output_handler: bool = True,
|
||||
) -> None:
|
||||
self.api_task_id = api_task_id
|
||||
super().__init__(
|
||||
description=description,
|
||||
children=children,
|
||||
coordinator_agent=coordinator_agent,
|
||||
task_agent=task_agent,
|
||||
new_worker_agent=new_worker_agent,
|
||||
graceful_shutdown_timeout=graceful_shutdown_timeout,
|
||||
share_memory=share_memory,
|
||||
use_structured_output_handler=use_structured_output_handler,
|
||||
)
|
||||
|
||||
def eigent_make_sub_tasks(self, task: Task):
|
||||
"""split process_task method to eigent_make_sub_tasks and eigent_start method"""
|
||||
|
||||
if not validate_task_content(task.content, task.id):
|
||||
task.state = TaskState.FAILED
|
||||
task.result = "Task failed: Invalid or empty content provided"
|
||||
logger.warning(
|
||||
f"Task {task.id} rejected: Invalid or empty content. Content preview: '{task.content[:50]}...'"
|
||||
)
|
||||
raise UserException(code.error, task.result)
|
||||
|
||||
self.reset()
|
||||
self._task = task
|
||||
self._state = WorkforceState.RUNNING
|
||||
task.state = TaskState.OPEN
|
||||
self._pending_tasks.append(task)
|
||||
|
||||
# Decompose the task into subtasks first
|
||||
subtasks_result = self._decompose_task(task)
|
||||
|
||||
# Handle both streaming and non-streaming results
|
||||
if isinstance(subtasks_result, Generator):
|
||||
# This is a generator (streaming mode)
|
||||
subtasks = []
|
||||
for new_tasks in subtasks_result:
|
||||
subtasks.extend(new_tasks)
|
||||
else:
|
||||
# This is a regular list (non-streaming mode)
|
||||
subtasks = subtasks_result
|
||||
|
||||
return subtasks
|
||||
|
||||
async def eigent_start(self, subtasks: list[Task]):
|
||||
"""start the workforce"""
|
||||
logger.debug(f"start the workforce {subtasks=}")
|
||||
self._pending_tasks.extendleft(reversed(subtasks))
|
||||
self.set_channel(TaskChannel())
|
||||
# Save initial snapshot
|
||||
self.save_snapshot("Initial task decomposition")
|
||||
|
||||
try:
|
||||
await self.start()
|
||||
except Exception as e:
|
||||
logger.error(f"Error in workforce execution: {e}")
|
||||
self._state = WorkforceState.STOPPED
|
||||
raise
|
||||
finally:
|
||||
if self._state != WorkforceState.STOPPED:
|
||||
self._state = WorkforceState.IDLE
|
||||
|
||||
async def _find_assignee(self, tasks: List[Task]) -> TaskAssignResult:
|
||||
# Task assignment phase: send "waiting for execution" notification to the frontend, and send "start execution" notification when the task actually begins execution
|
||||
assigned = await super()._find_assignee(tasks)
|
||||
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
for item in assigned.assignments:
|
||||
# DEBUG ▶ Task has been assigned to which worker and its dependencies
|
||||
logger.debug(f"[WF] ASSIGN {item.task_id} -> {item.assignee_id} deps={item.dependencies}")
|
||||
# The main task itself does not need notification
|
||||
if self._task and item.task_id == self._task.id:
|
||||
continue
|
||||
# Find task content
|
||||
task_obj = get_camel_task(item.task_id, tasks)
|
||||
content = task_obj.content if task_obj else ""
|
||||
# Asynchronously send waiting notification
|
||||
task = asyncio.create_task(
|
||||
task_lock.put_queue(
|
||||
ActionAssignTaskData(
|
||||
action=Action.assign_task,
|
||||
data={
|
||||
"assignee_id": item.assignee_id,
|
||||
"task_id": item.task_id,
|
||||
"content": content,
|
||||
"state": "waiting", # Mark as waiting state
|
||||
},
|
||||
)
|
||||
)
|
||||
)
|
||||
# Track the task for cleanup
|
||||
task_lock.add_background_task(task)
|
||||
return assigned
|
||||
|
||||
async def _post_task(self, task: Task, assignee_id: str) -> None:
|
||||
# DEBUG ▶ Dependencies are met, the task really starts to execute
|
||||
logger.debug(f"[WF] POST {task.id} -> {assignee_id}")
|
||||
"""Override the _post_task method to notify the frontend when the task really starts to execute"""
|
||||
# When the dependency check is passed and the task is about to be published to the execution queue, send a notification to the frontend
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
if self._task and task.id != self._task.id: # Skip the main task itself
|
||||
await task_lock.put_queue(
|
||||
ActionAssignTaskData(
|
||||
action=Action.assign_task,
|
||||
data={
|
||||
"assignee_id": assignee_id,
|
||||
"task_id": task.id,
|
||||
"content": task.content,
|
||||
"state": "running", # running state
|
||||
},
|
||||
)
|
||||
)
|
||||
# Call the parent class method to continue the normal task publishing process
|
||||
await super()._post_task(task, assignee_id)
|
||||
|
||||
def add_single_agent_worker(
|
||||
self, description: str, worker: ListenChatAgent, pool_max_size: int = DEFAULT_WORKER_POOL_SIZE
|
||||
) -> BaseWorkforce:
|
||||
if self._state == WorkforceState.RUNNING:
|
||||
raise RuntimeError("Cannot add workers while workforce is running. Pause the workforce first.")
|
||||
|
||||
# Validate worker agent compatibility
|
||||
self._validate_agent_compatibility(worker, "Worker agent")
|
||||
|
||||
# Ensure the worker agent shares this workforce's pause control
|
||||
self._attach_pause_event_to_agent(worker)
|
||||
|
||||
worker_node = SingleAgentWorker(
|
||||
description=description,
|
||||
worker=worker,
|
||||
pool_max_size=pool_max_size,
|
||||
use_structured_output_handler=self.use_structured_output_handler,
|
||||
)
|
||||
self._children.append(worker_node)
|
||||
|
||||
# If we have a channel set up, set it for the new worker
|
||||
if hasattr(self, "_channel") and self._channel is not None:
|
||||
worker_node.set_channel(self._channel)
|
||||
|
||||
# If workforce is paused, start the worker's listening task
|
||||
self._start_child_node_when_paused(worker_node.start())
|
||||
|
||||
if self.metrics_logger:
|
||||
self.metrics_logger.log_worker_created(
|
||||
worker_id=worker_node.node_id,
|
||||
worker_type="SingleAgentWorker",
|
||||
role=worker_node.description,
|
||||
)
|
||||
return self
|
||||
|
||||
async def _handle_completed_task(self, task: Task) -> None:
|
||||
# DEBUG ▶ Task completed
|
||||
logger.debug(f"[WF] DONE {task.id}")
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
|
||||
await task_lock.put_queue(
|
||||
ActionTaskStateData(
|
||||
data={
|
||||
"task_id": task.id,
|
||||
"content": task.content,
|
||||
"state": task.state,
|
||||
"result": task.result or "",
|
||||
"failure_count": task.failure_count,
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
return await super()._handle_completed_task(task)
|
||||
|
||||
async def _handle_failed_task(self, task: Task) -> bool:
|
||||
# DEBUG ▶ Task failed
|
||||
logger.debug(f"[WF] FAIL {task.id} retry={task.failure_count}")
|
||||
|
||||
result = await super()._handle_failed_task(task)
|
||||
|
||||
error_message = ""
|
||||
if self.metrics_logger and hasattr(self.metrics_logger, "log_entries"):
|
||||
for entry in reversed(self.metrics_logger.log_entries):
|
||||
if entry.get("event_type") == "task_failed" and entry.get("task_id") == task.id:
|
||||
error_message = entry.get("error_message")
|
||||
break
|
||||
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
await task_lock.put_queue(
|
||||
ActionTaskStateData(
|
||||
data={
|
||||
"task_id": task.id,
|
||||
"content": task.content,
|
||||
"state": task.state,
|
||||
"failure_count": task.failure_count,
|
||||
"result": str(error_message),
|
||||
}
|
||||
)
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
def stop(self) -> None:
|
||||
super().stop()
|
||||
task_lock = get_task_lock(self.api_task_id)
|
||||
task = asyncio.create_task(task_lock.put_queue(ActionEndData()))
|
||||
task_lock.add_background_task(task)
|
||||
|
||||
async def cleanup(self) -> None:
|
||||
r"""Clean up resources when workforce is done"""
|
||||
try:
|
||||
# Clean up the task lock
|
||||
from app.service.task import delete_task_lock
|
||||
|
||||
await delete_task_lock(self.api_task_id)
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up workforce resources: {e}")
|
||||
1
backend/babel.cfg
Normal file
1
backend/babel.cfg
Normal file
@ -0,0 +1 @@
|
||||
[python: **.py]
|
||||
9
backend/cli.py
Normal file
9
backend/cli.py
Normal file
@ -0,0 +1,9 @@
|
||||
from app.component.environment import auto_import
|
||||
from app.command import cli
|
||||
|
||||
|
||||
auto_import("app.command")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
cli()
|
||||
34
backend/lang/en_US/LC_MESSAGES/messages.po
Normal file
34
backend/lang/en_US/LC_MESSAGES/messages.po
Normal file
@ -0,0 +1,34 @@
|
||||
# English (United States) translations for PROJECT.
|
||||
# Copyright (C) 2025 ORGANIZATION
|
||||
# This file is distributed under the same license as the PROJECT project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PROJECT VERSION\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2025-04-02 11:27+0800\n"
|
||||
"PO-Revision-Date: 2025-04-02 11:44+0800\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language: en_US\n"
|
||||
"Language-Team: en_US <LL@li.org>\n"
|
||||
"Plural-Forms: nplurals=2; plural=(n != 1);\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.17.0\n"
|
||||
|
||||
#: app/controller/chat_controller.py:24
|
||||
msgid "Please end the previous task"
|
||||
msgstr ""
|
||||
|
||||
#: app/controller/chat_controller.py:33
|
||||
msgid "Please end the previous task first"
|
||||
msgstr ""
|
||||
|
||||
#~ msgid "no auth"
|
||||
#~ msgstr ""
|
||||
|
||||
#~ msgid "hello"
|
||||
#~ msgstr ""
|
||||
|
||||
36
backend/lang/zh_CN/LC_MESSAGES/messages.po
Normal file
36
backend/lang/zh_CN/LC_MESSAGES/messages.po
Normal file
@ -0,0 +1,36 @@
|
||||
# Chinese (Simplified, China) translations for PROJECT.
|
||||
# Copyright (C) 2025 ORGANIZATION
|
||||
# This file is distributed under the same license as the PROJECT project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2025.
|
||||
#
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PROJECT VERSION\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2025-07-16 23:07+0800\n"
|
||||
"PO-Revision-Date: 2025-07-16 23:07+0800\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language: zh_Hans_CN\n"
|
||||
"Language-Team: zh_Hans_CN <LL@li.org>\n"
|
||||
"Plural-Forms: nplurals=1; plural=0;\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.17.0\n"
|
||||
|
||||
#: app/controller/chat_controller.py:38
|
||||
msgid "Task was done"
|
||||
msgstr ""
|
||||
|
||||
#: app/controller/chat_controller.py:47
|
||||
msgid "Please wait task done"
|
||||
msgstr ""
|
||||
|
||||
#: app/service/task.py:269 app/service/task.py:288
|
||||
msgid "Task not found"
|
||||
msgstr ""
|
||||
|
||||
#: app/service/task.py:275
|
||||
msgid "Task already exists"
|
||||
msgstr ""
|
||||
|
||||
85
backend/main.py
Normal file
85
backend/main.py
Normal file
@ -0,0 +1,85 @@
|
||||
import os
|
||||
import pathlib
|
||||
import signal
|
||||
import asyncio
|
||||
import atexit
|
||||
from app import api
|
||||
from loguru import logger
|
||||
from app.component.environment import auto_include_routers, env
|
||||
|
||||
|
||||
os.environ["PYTHONIOENCODING"] = "utf-8"
|
||||
|
||||
prefix = env("url_prefix", "")
|
||||
auto_include_routers(api, prefix, "app/controller")
|
||||
|
||||
|
||||
# Configure Loguru
|
||||
logger.add(
|
||||
os.path.expanduser("~/.eigent/runtime/log/app.log"), # Log file
|
||||
rotation="10 MB", # Log rotation: 10MB per file
|
||||
retention="10 days", # Retain logs for the last 10 days
|
||||
level="DEBUG", # Log level
|
||||
encoding="utf-8",
|
||||
)
|
||||
|
||||
dir = pathlib.Path(__file__).parent / "runtime"
|
||||
dir.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
|
||||
# Write PID file asynchronously
|
||||
async def write_pid_file():
|
||||
r"""Write PID file asynchronously"""
|
||||
import aiofiles
|
||||
|
||||
async with aiofiles.open(dir / "run.pid", "w") as f:
|
||||
await f.write(str(os.getpid()))
|
||||
|
||||
|
||||
# Create task to write PID
|
||||
asyncio.create_task(write_pid_file())
|
||||
|
||||
# Graceful shutdown handler
|
||||
shutdown_event = asyncio.Event()
|
||||
|
||||
|
||||
async def cleanup_resources():
|
||||
r"""Cleanup all resources on shutdown"""
|
||||
logger.info("Starting graceful shutdown...")
|
||||
|
||||
from app.service.task import task_locks, _cleanup_task
|
||||
|
||||
if _cleanup_task and not _cleanup_task.done():
|
||||
_cleanup_task.cancel()
|
||||
try:
|
||||
await _cleanup_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
# Cleanup all task locks
|
||||
for task_id in list(task_locks.keys()):
|
||||
try:
|
||||
task_lock = task_locks[task_id]
|
||||
await task_lock.cleanup()
|
||||
except Exception as e:
|
||||
logger.error(f"Error cleaning up task {task_id}: {e}")
|
||||
|
||||
# Remove PID file
|
||||
pid_file = dir / "run.pid"
|
||||
if pid_file.exists():
|
||||
pid_file.unlink()
|
||||
|
||||
logger.info("Graceful shutdown completed")
|
||||
|
||||
|
||||
def signal_handler(signum, frame):
|
||||
r"""Handle shutdown signals"""
|
||||
logger.info(f"Received signal {signum}")
|
||||
asyncio.create_task(cleanup_resources())
|
||||
shutdown_event.set()
|
||||
|
||||
|
||||
signal.signal(signal.SIGTERM, signal_handler)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
atexit.register(lambda: asyncio.run(cleanup_resources()))
|
||||
35
backend/messages.pot
Normal file
35
backend/messages.pot
Normal file
@ -0,0 +1,35 @@
|
||||
# Translations template for PROJECT.
|
||||
# Copyright (C) 2025 ORGANIZATION
|
||||
# This file is distributed under the same license as the PROJECT project.
|
||||
# FIRST AUTHOR <EMAIL@ADDRESS>, 2025.
|
||||
#
|
||||
#, fuzzy
|
||||
msgid ""
|
||||
msgstr ""
|
||||
"Project-Id-Version: PROJECT VERSION\n"
|
||||
"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n"
|
||||
"POT-Creation-Date: 2025-07-16 23:07+0800\n"
|
||||
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
|
||||
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
|
||||
"Language-Team: LANGUAGE <LL@li.org>\n"
|
||||
"MIME-Version: 1.0\n"
|
||||
"Content-Type: text/plain; charset=utf-8\n"
|
||||
"Content-Transfer-Encoding: 8bit\n"
|
||||
"Generated-By: Babel 2.17.0\n"
|
||||
|
||||
#: app/controller/chat_controller.py:38
|
||||
msgid "Task was done"
|
||||
msgstr ""
|
||||
|
||||
#: app/controller/chat_controller.py:47
|
||||
msgid "Please wait task done"
|
||||
msgstr ""
|
||||
|
||||
#: app/service/task.py:269 app/service/task.py:288
|
||||
msgid "Task not found"
|
||||
msgstr ""
|
||||
|
||||
#: app/service/task.py:275
|
||||
msgid "Task already exists"
|
||||
msgstr ""
|
||||
|
||||
31
backend/pyproject.toml
Normal file
31
backend/pyproject.toml
Normal file
@ -0,0 +1,31 @@
|
||||
[project]
|
||||
name = "backend"
|
||||
version = "0.1.0"
|
||||
description = "Add your description here"
|
||||
readme = "README.md"
|
||||
requires-python = "==3.10.16"
|
||||
dependencies = [
|
||||
"camel-ai[eigent]>=0.2.74a4",
|
||||
"fastapi>=0.115.12",
|
||||
"fastapi-babel>=1.0.0",
|
||||
"uvicorn[standard]>=0.34.2",
|
||||
"pydantic-i18n>=0.4.5",
|
||||
"python-dotenv>=1.1.0",
|
||||
"httpx[socks]>=0.28.1",
|
||||
"loguru>=0.7.3",
|
||||
"pydash>=8.0.5",
|
||||
"inflection>=0.5.1",
|
||||
"aiofiles>=24.1.0",
|
||||
]
|
||||
|
||||
|
||||
[dependency-groups]
|
||||
dev = ["babel>=2.17.0"]
|
||||
|
||||
[tool.ruff]
|
||||
line-length = 120
|
||||
|
||||
[tool.ruff.lint]
|
||||
extend-select = [
|
||||
"B006", # forbid def demo(mutation = [])
|
||||
]
|
||||
2689
backend/uv.lock
generated
Normal file
2689
backend/uv.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
BIN
build/icon.icns
Normal file
BIN
build/icon.icns
Normal file
Binary file not shown.
BIN
build/icon.ico
Normal file
BIN
build/icon.ico
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 102 KiB |
BIN
build/icon.png
Normal file
BIN
build/icon.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 8.3 KiB |
4
build/installer.nsh
Normal file
4
build/installer.nsh
Normal file
@ -0,0 +1,4 @@
|
||||
!macro customInstallMode
|
||||
; 跳过“选择安装类型”页面,直接进入下一步
|
||||
Abort
|
||||
!macroend
|
||||
21
components.json
Normal file
21
components.json
Normal file
@ -0,0 +1,21 @@
|
||||
{
|
||||
"$schema": "https://ui.shadcn.com/schema.json",
|
||||
"style": "new-york",
|
||||
"rsc": false,
|
||||
"tsx": true,
|
||||
"tailwind": {
|
||||
"config": "tailwind.config.js",
|
||||
"css": "src/index.css",
|
||||
"baseColor": "neutral",
|
||||
"cssVariables": true,
|
||||
"prefix": ""
|
||||
},
|
||||
"aliases": {
|
||||
"components": "@/components",
|
||||
"utils": "@/lib/utils",
|
||||
"ui": "@/components/ui",
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
},
|
||||
"iconLibrary": "lucide"
|
||||
}
|
||||
27
config/notarize.cjs
Normal file
27
config/notarize.cjs
Normal file
@ -0,0 +1,27 @@
|
||||
const { notarize } = require("@electron/notarize");
|
||||
require("dotenv").config();
|
||||
|
||||
exports.default = async function notarizing(context) {
|
||||
if (process.platform !== "darwin") {
|
||||
return;
|
||||
}
|
||||
const appOutDir = context.appOutDir;
|
||||
const appName = context.packager.appInfo.productName;
|
||||
console.log("appOutDir", appOutDir);
|
||||
console.log("process.env.APPLEID", process.env.APPLEID);
|
||||
console.log("process.env.APPLEIDPASS", process.env.APPLEIDPASS);
|
||||
console.log("process.env.APPLETEAMID", process.env.APPLETEAMID);
|
||||
return notarize({
|
||||
tool: "notarytool",
|
||||
teamId: process.env.APPLETEAMID,
|
||||
appBundleId: "com.eigent.app",
|
||||
appPath: `${appOutDir}/${appName}.app`,
|
||||
appleId: process.env.APPLEID,
|
||||
appleIdPassword: process.env.APPLEIDPASS,
|
||||
ascProvider: process.env.APPLETEAMID,
|
||||
})
|
||||
.then((res) => {
|
||||
console.log("success!");
|
||||
})
|
||||
.catch(console.log);
|
||||
};
|
||||
61
docs/core/concepts.md
Normal file
61
docs/core/concepts.md
Normal file
@ -0,0 +1,61 @@
|
||||
---
|
||||
title: "Concepts"
|
||||
description: "Understand the core terms and features that make Eigent unique."
|
||||
icon: "key"
|
||||
---
|
||||
|
||||
## Workers
|
||||
|
||||
Autonomous agents tailored to specific roles that run tasks independently or together. Think of them as individual members of your team, like a "Researcher," a "Programmer," or a "Writer."
|
||||
|
||||
Each Worker is designed with specific capabilities and can be customized to handle particular types of tasks efficiently.
|
||||
|
||||

|
||||
|
||||
## Workforce
|
||||
|
||||
A coordinated team of Workers that collaborate to complete complex workflows. Think of it as your AI project team.
|
||||
|
||||
The Workforce orchestrates multiple Workers, ensuring they work together seamlessly to achieve your goals.
|
||||
|
||||

|
||||
|
||||
## Workspace
|
||||
|
||||
A live window into a Worker's process where you can watch or take control. For example, a terminal, a browser, or a file viewer.
|
||||
|
||||
Workspaces provide real-time visibility into what your Workers are doing, allowing you to monitor progress and intervene when needed.
|
||||
|
||||

|
||||
|
||||
## Tasks & Subtasks
|
||||
|
||||
You define a mission (task), the Workforce breaks it into components (subtasks), and assigns them to the appropriate Workers.
|
||||
|
||||
This hierarchical approach ensures complex projects are broken down into manageable pieces and executed efficiently.
|
||||
|
||||

|
||||
|
||||
## Chat
|
||||
|
||||
Your primary interface for communicating with your Workforce. You use it to define your main Task, sharing files and interacting with agents in real time.
|
||||
|
||||
The Chat interface serves as your command center, where you can give instructions, ask questions, and receive updates from your AI team.
|
||||
|
||||

|
||||
|
||||
## MCP
|
||||
|
||||
Model Context Protocol that allows Workers to use external tools. It connects your agents to databases, APIs, and documentation sources, empowering them to act across platforms.
|
||||
|
||||
MCP extends your Workers' capabilities by providing access to real-world data and tools, making them more powerful and versatile.
|
||||
|
||||

|
||||
|
||||
## Models
|
||||
|
||||
Different AI "brains" that power your Workers. Eigent allows you to choose from various models (like GPT-4.1 or Gemini 2.5 Pro), each with different strengths in speed, reasoning, and cost.
|
||||
|
||||
Choose the right model for each task based on your specific needs for performance, accuracy, or cost efficiency.
|
||||
|
||||

|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user