diff --git a/README.md b/README.md index 2ec7294..cc94473 100644 --- a/README.md +++ b/README.md @@ -386,7 +386,7 @@ You can run OWL agent with your own task by modifying the `examples/run.py` scri ```python # Define your own task -question = "Task description here." +task = "Task description here." society = construct_society(question) answer, chat_history, token_count = run_society(society) @@ -398,7 +398,7 @@ For uploading files, simply provide the file path along with your question: ```python # Task with a local file (e.g., file path: `tmp/example.docx`) -question = "What is in the given DOCX file? Here is the file path: tmp/example.docx" +task = "What is in the given DOCX file? Here is the file path: tmp/example.docx" society = construct_society(question) answer, chat_history, token_count = run_society(society) diff --git a/README_zh.md b/README_zh.md index 63487a2..02005a1 100644 --- a/README_zh.md +++ b/README_zh.md @@ -379,7 +379,7 @@ python examples/run_ollama.py ```python # Define your own task -question = "Task description here." +task = "Task description here." society = construct_society(question) answer, chat_history, token_count = run_society(society) @@ -391,7 +391,7 @@ print(f"\033[94mAnswer: {answer}\033[0m") ```python # 处理本地文件(例如,文件路径为 `tmp/example.docx`) -question = "给定的 DOCX 文件中有什么内容?文件路径如下:tmp/example.docx" +task = "给定的 DOCX 文件中有什么内容?文件路径如下:tmp/example.docx" society = construct_society(question) answer, chat_history, token_count = run_society(society) diff --git a/examples/run.py b/examples/run.py index 60a118c..d98d721 100644 --- a/examples/run.py +++ b/examples/run.py @@ -11,6 +11,8 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import sys +import pathlib from dotenv import load_dotenv from camel.models import ModelFactory from camel.toolkits import ( @@ -29,8 +31,6 @@ from camel.societies import RolePlaying from owl.utils import run_society, DocumentProcessingToolkit -import pathlib - base_dir = pathlib.Path(__file__).parent.parent env_path = base_dir / "owl" / ".env" load_dotenv(dotenv_path=str(env_path)) @@ -60,7 +60,7 @@ def construct_society(question: str) -> RolePlaying: model_type=ModelType.GPT_4O, model_config_dict={"temperature": 0}, ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.OPENAI, model_type=ModelType.GPT_4O, model_config_dict={"temperature": 0}, @@ -91,7 +91,7 @@ def construct_society(question: str) -> RolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), *VideoAnalysisToolkit(model=models["video"]).get_tools(), @@ -130,11 +130,14 @@ def construct_society(question: str) -> RolePlaying: def main(): r"""Main function to run the OWL system with an example question.""" - # Example research question - question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + # Default research question + default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task # Construct and run the society - society = construct_society(question) + society = construct_society(task) answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_azure_openai.py b/examples/run_azure_openai.py index 4c11f8f..76313d8 100644 --- a/examples/run_azure_openai.py +++ b/examples/run_azure_openai.py @@ -12,6 +12,7 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= import os +import sys from dotenv import load_dotenv from camel.configs import ChatGPTConfig from camel.models import ModelFactory @@ -58,7 +59,7 @@ def construct_society(question: str) -> OwlRolePlaying: models = { "user": ModelFactory.create(**base_model_config), "assistant": ModelFactory.create(**base_model_config), - "web": ModelFactory.create(**base_model_config), + "browsing": ModelFactory.create(**base_model_config), "planning": ModelFactory.create(**base_model_config), "image": ModelFactory.create(**base_model_config), } @@ -67,7 +68,7 @@ def construct_society(question: str) -> OwlRolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(), @@ -104,10 +105,14 @@ def construct_society(question: str) -> OwlRolePlaying: def main(): r"""Main function to run the OWL system with Azure OpenAI.""" # Example question - question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task # Construct and run the society - society = construct_society(question) + society = construct_society(task) + answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_cli.py b/examples/run_cli.py index b8e453e..4df312a 100644 --- a/examples/run_cli.py +++ b/examples/run_cli.py @@ -110,7 +110,7 @@ def construct_society() -> RolePlaying: model_type=selected_model_type, model_config_dict={"temperature": 0}, ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=selected_model_platform, model_type=selected_model_type, model_config_dict={"temperature": 0}, @@ -141,9 +141,8 @@ def construct_society() -> RolePlaying: tools = [ *BrowserToolkit( headless=False, - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], - output_language="Chinese", ).get_tools(), *VideoAnalysisToolkit(model=models["video"]).get_tools(), *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(), diff --git a/examples/run_deepseek_zh.py b/examples/run_deepseek_zh.py index aa7e12c..d471c89 100644 --- a/examples/run_deepseek_zh.py +++ b/examples/run_deepseek_zh.py @@ -17,7 +17,7 @@ # You can obtain your API key from DeepSeek platform: https://platform.deepseek.com/api_keys # Set it as DEEPSEEK_API_KEY="your-api-key" in your .env file or add it to your environment variables - +import sys from dotenv import load_dotenv from camel.models import ModelFactory @@ -102,10 +102,14 @@ def construct_society(question: str) -> RolePlaying: def main(): r"""Main function to run the OWL system with an example question.""" # Example research question - question = "搜索OWL项目最近的新闻并生成一篇报告,最后保存到本地。" + default_task = "搜索OWL项目最近的新闻并生成一篇报告,最后保存到本地。" + + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task # Construct and run the society - society = construct_society(question) + society = construct_society(task) + answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_gaia_roleplaying.py b/examples/run_gaia_roleplaying.py index d399de8..86e9268 100644 --- a/examples/run_gaia_roleplaying.py +++ b/examples/run_gaia_roleplaying.py @@ -71,7 +71,7 @@ def main(): model_type=ModelType.GPT_4O, model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(), ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.OPENAI, model_type=ModelType.GPT_4O, model_config_dict=ChatGPTConfig(temperature=0, top_p=1).as_dict(), @@ -97,7 +97,7 @@ def main(): tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), *VideoAnalysisToolkit( diff --git a/examples/run_groq.py b/examples/run_groq.py index 9f3f3a6..24a17e5 100644 --- a/examples/run_groq.py +++ b/examples/run_groq.py @@ -26,6 +26,7 @@ To use this module: 3. Run with: python -m examples.run_groq """ +import sys from dotenv import load_dotenv from camel.models import ModelFactory from camel.toolkits import ( @@ -70,7 +71,7 @@ def construct_society(question: str) -> OwlRolePlaying: model_type=ModelType.GROQ_LLAMA_3_3_70B, # Main assistant needs tool capability model_config_dict={"temperature": 0}, ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.GROQ, model_type=ModelType.GROQ_LLAMA_3_3_70B, # Web browsing requires tool usage model_config_dict={"temperature": 0}, @@ -101,7 +102,7 @@ def construct_society(question: str) -> OwlRolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), *VideoAnalysisToolkit(model=models["video"]).get_tools(), @@ -141,13 +142,18 @@ def construct_society(question: str) -> OwlRolePlaying: def main(): r"""Main function to run the OWL system with an example question.""" # Example research question - question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." # Construct and run the society # Note: This configuration uses GROQ_LLAMA_3_3_70B for tool-intensive roles (assistant, web, planning, video, image) # and GROQ_MIXTRAL_8_7B for document processing. GROQ_LLAMA_3_1_8B is used only for the user role # which doesn't require tool usage capabilities. - society = construct_society(question) + + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task + + # Construct and run the society + society = construct_society(task) answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_mcp.py b/examples/run_mcp.py index 71b6854..023b0d0 100644 --- a/examples/run_mcp.py +++ b/examples/run_mcp.py @@ -73,6 +73,7 @@ Note: """ import asyncio +import sys from pathlib import Path from typing import List @@ -146,15 +147,19 @@ async def main(): try: await mcp_toolkit.connect() - question = ( + # Default task + default_task = ( "I'd like a academic report about Andrew Ng, including " "his research direction, published papers (At least 3)," " institutions, etc. " ) + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task + # Connect to all MCP toolkits tools = [*mcp_toolkit.get_tools()] - society = await construct_society(question, tools) + society = await construct_society(task, tools) answer, chat_history, token_count = await arun_society(society) print(f"\033[94mAnswer: {answer}\033[0m") diff --git a/examples/run_mini.py b/examples/run_mini.py index a2e4a84..727b44c 100644 --- a/examples/run_mini.py +++ b/examples/run_mini.py @@ -11,6 +11,7 @@ # See the License for the specific language governing permissions and # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +import sys from dotenv import load_dotenv from camel.models import ModelFactory @@ -58,7 +59,7 @@ def construct_society(question: str) -> RolePlaying: model_type=ModelType.GPT_4O, model_config_dict={"temperature": 0}, ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.OPENAI, model_type=ModelType.GPT_4O, model_config_dict={"temperature": 0}, @@ -74,7 +75,7 @@ def construct_society(question: str) -> RolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), SearchToolkit().search_duckduckgo, @@ -106,11 +107,14 @@ def construct_society(question: str) -> RolePlaying: def main(): r"""Main function to run the OWL system with an example question.""" - # Example research question - question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + # Default research question + default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task # Construct and run the society - society = construct_society(question) + society = construct_society(task) answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_ollama.py b/examples/run_ollama.py index e4efd37..049bad5 100644 --- a/examples/run_ollama.py +++ b/examples/run_ollama.py @@ -13,6 +13,7 @@ # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= # run_ollama.py by tj-scripts(https://github.com/tj-scripts) +import sys from dotenv import load_dotenv from camel.models import ModelFactory from camel.toolkits import ( @@ -64,7 +65,7 @@ def construct_society(question: str) -> RolePlaying: url="http://localhost:11434/v1", model_config_dict={"temperature": 0.2, "max_tokens": 1000000}, ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.OLLAMA, model_type="llava:latest", url="http://localhost:11434/v1", @@ -88,7 +89,7 @@ def construct_society(question: str) -> RolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(), @@ -124,11 +125,14 @@ def construct_society(question: str) -> RolePlaying: def main(): r"""Main function to run the OWL system with an example question.""" - # Example research question - question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + # Default research question + default_task = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task # Construct and run the society - society = construct_society(question) + society = construct_society(task) answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_openai_compatiable_model.py b/examples/run_openai_compatiable_model.py index 37057e1..cad8fbb 100644 --- a/examples/run_openai_compatiable_model.py +++ b/examples/run_openai_compatiable_model.py @@ -53,6 +53,7 @@ def construct_society(question: str) -> RolePlaying: models = { "user": ModelFactory.create( model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, + model_type=os.getenv("USER_ROLE_API_MODEL_TYPE", os.getenv("LLM_ROLE_API_MODEL_TYPE", "qwen-max")), api_key=os.getenv("USER_ROLE_API_KEY", os.getenv("LLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))), url=os.getenv("USER_ROLE_API_BASE_URL", os.getenv("LLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")), @@ -70,9 +71,11 @@ def construct_society(question: str) -> RolePlaying: "temperature": float(os.getenv("ASSISTANT_ROLE_API_MODEL_TEMPERATURE", os.getenv("LLM_ROLE_API_MODEL_TEMPERATURE", "0.4"))), "max_tokens": int(os.getenv("ASSISTANT_ROLE_API_MODEL_MAX_TOKENS", os.getenv("LLM_ROLE_API_MODEL_MAX_TOKENS", "4096"))) }, + ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, + model_type=os.getenv("WEB_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_MODEL_TYPE", "qwen-vl-max")), api_key=os.getenv("WEB_ROLE_API_KEY", os.getenv("VLLM_ROLE_API_KEY", os.getenv("QWEN_API_KEY", "Your_Key"))), url=os.getenv("USER_ROLE_API_BASE_URL", os.getenv("VLLM_ROLE_API_BASE_URL", "https://dashscope.aliyuncs.com/compatible-mode/v1")), @@ -107,7 +110,7 @@ def construct_society(question: str) -> RolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(), @@ -141,6 +144,7 @@ def construct_society(question: str) -> RolePlaying: return society + def main(question: str = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."): r"""Main function to run the OWL system with an example question. Args: @@ -150,8 +154,10 @@ def main(question: str = "Navigate to Amazon.com and identify one product that i Returns: None """ + # Construct and run the society - society = construct_society(question) + society = construct_society(task) + answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_qwen_mini_zh.py b/examples/run_qwen_mini_zh.py index 59d27cd..e7c7eec 100644 --- a/examples/run_qwen_mini_zh.py +++ b/examples/run_qwen_mini_zh.py @@ -17,7 +17,7 @@ # Set it as QWEN_API_KEY="your-api-key" in your .env file or add it to your environment variables from dotenv import load_dotenv - +import sys from camel.models import ModelFactory from camel.toolkits import BrowserToolkit, SearchToolkit, FileWriteToolkit from camel.types import ModelPlatformType, ModelType @@ -101,9 +101,14 @@ def construct_society(question: str) -> RolePlaying: # Example case -question = "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格" +default_task = "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格" + +# Override default task if command line argument is provided +task = sys.argv[1] if len(sys.argv) > 1 else default_task + +# Construct and run the society +society = construct_society(task) -society = construct_society(question) answer, chat_history, token_count = run_society(society) print(f"\033[94mAnswer: {answer}\033[0m") diff --git a/examples/run_qwen_zh.py b/examples/run_qwen_zh.py index 2d2198e..a34c77f 100644 --- a/examples/run_qwen_zh.py +++ b/examples/run_qwen_zh.py @@ -16,6 +16,7 @@ # You can obtain your API key from Bailian platform: bailian.console.aliyun.com # Set it as QWEN_API_KEY="your-api-key" in your .env file or add it to your environment variables +import sys from dotenv import load_dotenv from camel.models import ModelFactory from camel.toolkits import ( @@ -67,7 +68,7 @@ def construct_society(question: str) -> RolePlaying: model_type=ModelType.QWEN_MAX, model_config_dict={"temperature": 0}, ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.QWEN, model_type=ModelType.QWEN_VL_MAX, model_config_dict={"temperature": 0}, @@ -98,7 +99,7 @@ def construct_society(question: str) -> RolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], output_language="Chinese", ).get_tools(), @@ -140,10 +141,13 @@ def construct_society(question: str) -> RolePlaying: def main(): r"""Main function to run the OWL system with an example question.""" # Example research question - question = "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格" + default_task = "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格" + + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task # Construct and run the society - society = construct_society(question) + society = construct_society(task) answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_terminal.py b/examples/run_terminal.py index 00ce667..2d47dd3 100644 --- a/examples/run_terminal.py +++ b/examples/run_terminal.py @@ -12,6 +12,7 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from dotenv import load_dotenv +import sys import os from camel.models import ModelFactory from camel.toolkits import ( @@ -58,7 +59,7 @@ def construct_society(question: str) -> RolePlaying: model_type=ModelType.GPT_4O, model_config_dict={"temperature": 0}, ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.OPENAI, model_type=ModelType.GPT_4O, model_config_dict={"temperature": 0}, @@ -74,7 +75,7 @@ def construct_society(question: str) -> RolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), SearchToolkit().search_duckduckgo, @@ -108,13 +109,16 @@ def construct_society(question: str) -> RolePlaying: def main(): r"""Main function to run the OWL system with an example question.""" # Example research question - question = f"""Open Google Search, summarize the number of GitHub stars, forks, etc., of the camel framework of camel-ai, + default_task = f"""Open Google Search, summarize the number of GitHub stars, forks, etc., of the camel framework of camel-ai, and write the numbers into a Python file using the plot package, save it to "+{os.path.join(base_dir, 'final_output')}+", and execute the Python file with the local terminal to display the graph for me.""" + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task + # Construct and run the society - society = construct_society(question) + society = construct_society(task) answer, chat_history, token_count = run_society(society) # Output the result diff --git a/examples/run_terminal_zh.py b/examples/run_terminal_zh.py index 74c9dbf..46fb30a 100644 --- a/examples/run_terminal_zh.py +++ b/examples/run_terminal_zh.py @@ -12,6 +12,7 @@ # limitations under the License. # ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= from dotenv import load_dotenv +import sys import os from camel.models import ModelFactory from camel.toolkits import ( @@ -58,7 +59,7 @@ def construct_society(question: str) -> RolePlaying: model_type=ModelType.GPT_4O, model_config_dict={"temperature": 0}, ), - "web": ModelFactory.create( + "browsing": ModelFactory.create( model_platform=ModelPlatformType.OPENAI, model_type=ModelType.GPT_4O, model_config_dict={"temperature": 0}, @@ -74,7 +75,7 @@ def construct_society(question: str) -> RolePlaying: tools = [ *BrowserToolkit( headless=False, # Set to True for headless mode (e.g., on remote servers) - web_agent_model=models["web"], + web_agent_model=models["browsing"], planning_agent_model=models["planning"], ).get_tools(), SearchToolkit().search_duckduckgo, @@ -108,11 +109,14 @@ def construct_society(question: str) -> RolePlaying: def main(): r"""Main function to run the OWL system with an example question.""" # Example research question - question = f"""打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到"+{os.path.join + default_task = f"""打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到"+{os.path.join (base_dir, 'final_output')}+",用本地终端执行python文件显示图出来给我""" + # Override default task if command line argument is provided + task = sys.argv[1] if len(sys.argv) > 1 else default_task + # Construct and run the society - society = construct_society(question) + society = construct_society(task) answer, chat_history, token_count = run_society(society) # Output the result diff --git a/pyproject.toml b/pyproject.toml index 675afa1..6987e74 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -21,7 +21,7 @@ keywords = [ "learning-systems" ] dependencies = [ - "camel-ai[all]==0.2.34", + "camel-ai[all]==0.2.35", "chunkr-ai>=0.0.41", "docx2markdown>=0.1.1", "gradio>=3.50.2", diff --git a/requirements.txt b/requirements.txt index be1437d..8f3194a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -camel-ai[all]==0.2.34 +camel-ai[all]==0.2.35 chunkr-ai>=0.0.41 docx2markdown>=0.1.1 gradio>=3.50.2 diff --git a/uv.lock b/uv.lock index afde280..2313fe4 100644 --- a/uv.lock +++ b/uv.lock @@ -482,7 +482,7 @@ wheels = [ [[package]] name = "camel-ai" -version = "0.2.34" +version = "0.2.35" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama" }, @@ -496,9 +496,9 @@ dependencies = [ { name = "pyyaml" }, { name = "tiktoken" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/8c/52/ec6271bad0af4160bb4510f1039722346cfacb4117f8d049672dd383f3e4/camel_ai-0.2.34.tar.gz", hash = "sha256:d1a4965c1cb46c4bfcf79c49849f27e21636a88bc88172f831eb9566168db5e1", size = 444800 } +sdist = { url = "https://files.pythonhosted.org/packages/6b/9d/fac44260ebec63b9199d170fc22b5f3b77a5ab51fb6014fa962b0173f524/camel_ai-0.2.35.tar.gz", hash = "sha256:b90e54a81a73c473a0e673b14db5a32fb3eeec394d61a071cf510d87490f4d49", size = 451009 } wheels = [ - { url = "https://files.pythonhosted.org/packages/92/f9/3b4dd1badff092507a380a46b5d3f721e21b9d2bfd934e9d8542d52aa62f/camel_ai-0.2.34-py3-none-any.whl", hash = "sha256:6cf4e0d00a6f67d79965dc578a4cee57b31ae08e2702ebed2fe6204be0be8102", size = 755831 }, + { url = "https://files.pythonhosted.org/packages/cb/6a/c38145134e6e1a92e553c336d9bb51286bc94c1334e8bc311daa37c6a742/camel_ai-0.2.35-py3-none-any.whl", hash = "sha256:3778da315e7e4893d4d841b561f9d4e0fa6e7976b430daa52e0c887ae18431ec", size = 765923 }, ] [package.optional-dependencies] @@ -3601,7 +3601,7 @@ dependencies = [ [package.metadata] requires-dist = [ - { name = "camel-ai", extras = ["all"], specifier = "==0.2.34" }, + { name = "camel-ai", extras = ["all"], specifier = "==0.2.35" }, { name = "chunkr-ai", specifier = ">=0.0.41" }, { name = "docx2markdown", specifier = ">=0.1.1" }, { name = "gradio", specifier = ">=3.50.2" },