fix async bug, optimize log info, tool call error handling

This commit is contained in:
Wendong
2025-03-14 02:34:25 +08:00
parent 3bdc4139f6
commit ab4b9f3515
9 changed files with 72 additions and 14 deletions

View File

@@ -179,7 +179,7 @@ source .venv/bin/activate
.venv\Scripts\activate
# Install from requirements.txt
pip install -r requirements.txt
pip install -r requirements.txt --use-pep517
```
## Option 3: Using conda
@@ -201,7 +201,7 @@ conda activate owl
pip install -e .
# Option 2: Install from requirements.txt
pip install -r requirements.txt
pip install -r requirements.txt --use-pep517
# Exit the conda environment when done
conda deactivate

View File

@@ -176,7 +176,7 @@ source .venv/bin/activate
.venv\Scripts\activate
# 从 requirements.txt 安装
pip install -r requirements.txt
pip install -r requirements.txt --use-pep517
```
## 选项3使用 conda
@@ -198,7 +198,7 @@ conda activate owl
pip install -e .
# 选项2从 requirements.txt 安装
pip install -r requirements.txt
pip install -r requirements.txt --use-pep517
# 完成后退出 conda 环境
conda deactivate

View File

@@ -47,7 +47,7 @@ def construct_society(question: str) -> OwlRolePlaying:
# Create models for different components using Azure OpenAI
base_model_config = {
"model_platform": ModelPlatformType.AZURE,
"model_type": os.getenv("AZURE_OPENAI_MODEL_TYPE"),
"model_type": os.getenv("AZURE_OPENAI_MODEL_TYPE"),
"model_config_dict": ChatGPTConfig(temperature=0.4, max_tokens=4096).as_dict(),
}

View File

@@ -437,7 +437,61 @@ class OwlGAIARolePlaying(OwlRolePlaying):
)
async def run_society(
def run_society(
society: OwlRolePlaying,
round_limit: int = 15,
) -> Tuple[str, List[dict], dict]:
overall_completion_token_count = 0
overall_prompt_token_count = 0
chat_history = []
init_prompt = """
Now please give me instructions to solve over overall task step by step. If the task requires some specific knowledge, please instruct me to use tools to complete the task.
"""
input_msg = society.init_chat(init_prompt)
for _round in range(round_limit):
assistant_response, user_response = society.step(input_msg)
overall_completion_token_count += (
assistant_response.info["usage"]["completion_tokens"]
+ user_response.info["usage"]["completion_tokens"]
)
# convert tool call to dict
tool_call_records: List[dict] = []
for tool_call in assistant_response.info["tool_calls"]:
tool_call_records.append(tool_call.as_dict())
_data = {
"user": user_response.msg.content,
"assistant": assistant_response.msg.content,
"tool_calls": tool_call_records,
}
chat_history.append(_data)
logger.info(f"Round #{_round} user_response:\n {user_response.msgs[0].content}")
logger.info(
f"Round #{_round} assistant_response:\n {assistant_response.msgs[0].content}"
)
if (
assistant_response.terminated
or user_response.terminated
or "TASK_DONE" in user_response.msg.content
):
break
input_msg = assistant_response.msg
answer = chat_history[-1]["assistant"]
token_info = {
"completion_token_count": overall_completion_token_count,
"prompt_token_count": overall_prompt_token_count,
}
return answer, chat_history, token_info
async def arun_society(
society: OwlRolePlaying,
round_limit: int = 15,
) -> Tuple[str, List[dict], dict]:

View File

@@ -21,7 +21,7 @@ keywords = [
"learning-systems"
]
dependencies = [
"camel-ai[all]==0.2.28",
"camel-ai[all]==0.2.29",
"chunkr-ai>=0.0.41",
"docx2markdown>=0.1.1",
"gradio>=3.50.2",

View File

@@ -1,4 +1,4 @@
camel-ai[all]==0.2.28
camel-ai[all]==0.2.29
chunkr-ai>=0.0.41
docx2markdown>=0.1.1
gradio>=3.50.2

View File

@@ -49,7 +49,9 @@ def main():
print(
f"Error: Unable to import necessary modules. Please ensure all dependencies are installed: {e}"
)
print("Tip: Run 'pip install -r requirements.txt' to install all dependencies")
print(
"Tip: Run 'pip install -r requirements.txt --use-pep517' to install all dependencies"
)
sys.exit(1)
except Exception as e:
print(f"Error occurred while starting the application: {e}")

View File

@@ -47,7 +47,9 @@ def main():
except ImportError as e:
print(f"错误: 无法导入必要的模块。请确保已安装所有依赖项: {e}")
print("提示: 运行 'pip install -r requirements.txt' 安装所有依赖项")
print(
"提示: 运行 'pip install -r requirements.txt --use-pep517' 安装所有依赖项"
)
sys.exit(1)
except Exception as e:
print(f"启动应用程序时出错: {e}")

8
uv.lock generated
View File

@@ -482,7 +482,7 @@ wheels = [
[[package]]
name = "camel-ai"
version = "0.2.28"
version = "0.2.29"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama" },
@@ -499,9 +499,9 @@ dependencies = [
{ name = "pyyaml" },
{ name = "tiktoken" },
]
sdist = { url = "https://files.pythonhosted.org/packages/6a/3b/7f350ae3c5bf42263688d3a69333e3908af4d45ce8f5f838af634a2720b3/camel_ai-0.2.28.tar.gz", hash = "sha256:f47e12bdf59df6e789db4587f0c5bd0adf43b2029d6be1bfcc31bfd41cab9d9f", size = 443082 }
sdist = { url = "https://files.pythonhosted.org/packages/00/f8/fdb2478ec3b61f78af2a8a8ab0b575e795a015e89c2c058cee61d63a3951/camel_ai-0.2.29.tar.gz", hash = "sha256:b077885ea7a1fd6b4d53dd77e83b6b4c2ded96e43ced6a2f4bd51a434a29bbdb", size = 440795 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/5d/27/8a6e97f660354ce03413872268c7f4a40ceefdf39b20f161cb7f672dc67c/camel_ai-0.2.28-py3-none-any.whl", hash = "sha256:079e7e905a36b64be47a6a27ad4b99d21ca0403b27027a4d777744968a22040a", size = 748237 },
{ url = "https://files.pythonhosted.org/packages/2b/c4/4c0c388464d4c8f8ec7704d39459883e0769268b566a82245f545b09f703/camel_ai-0.2.29-py3-none-any.whl", hash = "sha256:812143a204e364703be40066101c0cf34769bc589dac81373444acc6bab8fe7b", size = 746424 },
]
[package.optional-dependencies]
@@ -3622,7 +3622,7 @@ dependencies = [
[package.metadata]
requires-dist = [
{ name = "camel-ai", extras = ["all"], specifier = "==0.2.28" },
{ name = "camel-ai", extras = ["all"], specifier = "==0.2.29" },
{ name = "chunkr-ai", specifier = ">=0.0.41" },
{ name = "docx2markdown", specifier = ">=0.1.1" },
{ name = "gradio", specifier = ">=3.50.2" },