diff --git a/README.md b/README.md index 638c5fd..bb7b4c0 100644 --- a/README.md +++ b/README.md @@ -246,6 +246,9 @@ python owl/run_deepseek_zh.py # Run with other OpenAI-compatible models python owl/run_openai_compatiable_model.py + +# Run with Ollama API +python owl/run_ollama.py ``` For a simpler version that only requires an LLM API key, you can try our minimal example: diff --git a/README_zh.md b/README_zh.md index e17758c..35c6af4 100644 --- a/README_zh.md +++ b/README_zh.md @@ -246,6 +246,9 @@ python owl/run_deepseek_zh.py # 使用其他 OpenAI 兼容模型运行 python owl/run_openai_compatiable_model.py + +# 使用 Ollama API 运行 +python owl/run_ollama.py ``` 你可以通过修改 `run.py` 脚本来运行自己的任务: diff --git a/owl/.env_template b/owl/.env_template index 550f899..0d4cc01 100644 --- a/owl/.env_template +++ b/owl/.env_template @@ -1,8 +1,11 @@ # MODEL & API (See https://github.com/camel-ai/camel/blob/master/camel/types/enums.py) +# Ollama API +OLLAMA_API_KEY="" + # OPENAI API -OPENAI_API_KEY = "" -# OPENAI_API_BASE_URL = "" +OPENAI_API_KEY= "" +# OPENAI_API_BASE_URL="" # Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key) # QWEN_API_KEY="" diff --git a/owl/app.py b/owl/app.py index e214577..7c8d31e 100644 --- a/owl/app.py +++ b/owl/app.py @@ -40,6 +40,7 @@ SCRIPTS = { "Default": "run.py", "GAIA Roleplaying": "run_gaia_roleplaying.py", "OpenAI Compatible": "run_openai_compatiable_model.py", + "Ollama": "run_ollama.py", } # 脚本描述 @@ -51,6 +52,7 @@ SCRIPT_DESCRIPTIONS = { "Default": "默认OWL实现,使用OpenAI GPT-4o模型和全套工具", "GAIA Roleplaying": "GAIA基准测试实现,用于评估模型能力", "OpenAI Compatible": "使用兼容OpenAI API的第三方模型,支持自定义API端点", + "Ollama": "使用Ollama API", } # 环境变量分组 @@ -84,6 +86,13 @@ ENV_GROUPS = { "required": False, "help": "DeepSeek API密钥,用于访问DeepSeek模型。获取方式:https://platform.deepseek.com/api_keys", }, + { + "name": "OLLAMA_API_KEY", + "label": "Ollama API秘钥", + "type": "password", + "required": False, + "help": "Ollama API秘钥,没啥用的", + }, ], "搜索工具": [ { diff --git a/owl/run_ollama.py b/owl/run_ollama.py new file mode 100644 index 0000000..ca0977c --- /dev/null +++ b/owl/run_ollama.py @@ -0,0 +1,137 @@ +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. ========= +# run_ollama.py by tj-scripts(https://github.com/tj-scripts) +import os + +from dotenv import load_dotenv +from camel.models import ModelFactory +from camel.toolkits import ( + CodeExecutionToolkit, + ExcelToolkit, + ImageAnalysisToolkit, + SearchToolkit, + WebToolkit, +) +from camel.types import ModelPlatformType + +from utils import OwlRolePlaying, run_society + +from camel.logger import set_log_level + +set_log_level(level="DEBUG") + +load_dotenv() + + +def construct_society(question: str) -> OwlRolePlaying: + r"""Construct a society of agents based on the given question. + + Args: + question (str): The task or question to be addressed by the society. + + Returns: + OwlRolePlaying: A configured society of agents ready to address the question. + """ + + # Create models for different components + models = { + "user": ModelFactory.create( + model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, + model_type="qwen2.5:3b", + api_key=os.getenv("OLLAMA_API_KEY"), + url="http://localhost:11434/v1", + model_config_dict={"temperature": 0.8, "max_tokens": 4096}, + ), + "assistant": ModelFactory.create( + model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, + model_type="qwen2.5:3b", + api_key=os.getenv("OLLAMA_API_KEY"), + url="http://localhost:11434/v1", + model_config_dict={"temperature": 0.2, "max_tokens": 4096}, + ), + "web": ModelFactory.create( + model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, + model_type="llava:latest", + api_key=os.getenv("QWEN_API_KEY"), + url="http://localhost:11434/v1", + model_config_dict={"temperature": 0.4, "max_tokens": 4096}, + ), + "planning": ModelFactory.create( + model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, + model_type="qwen2.5:3b", + api_key=os.getenv("ollama"), + url="http://localhost:11434/v1", + model_config_dict={"temperature": 0.4, "max_tokens": 4096}, + ), + "image": ModelFactory.create( + model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, + model_type="llava:latest", + api_key=os.getenv("QWEN_API_KEY"), + url="http://localhost:11434/v1", + model_config_dict={"temperature": 0.4, "max_tokens": 4096}, + ), + } + + # Configure toolkits + tools = [ + *WebToolkit( + headless=False, # Set to True for headless mode (e.g., on remote servers) + web_agent_model=models["web"], + planning_agent_model=models["planning"], + ).get_tools(), + *CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(), + *ImageAnalysisToolkit(model=models["image"]).get_tools(), + SearchToolkit().search_duckduckgo, + #SearchToolkit().search_google, # Comment this out if you don't have google search + SearchToolkit().search_wiki, + *ExcelToolkit().get_tools(), + ] + + # Configure agent roles and parameters + user_agent_kwargs = {"model": models["user"]} + assistant_agent_kwargs = {"model": models["assistant"], "tools": tools} + + # Configure task parameters + task_kwargs = { + "task_prompt": question, + "with_task_specify": False, + } + + # Create and return the society + society = OwlRolePlaying( + **task_kwargs, + user_role_name="user", + user_agent_kwargs=user_agent_kwargs, + assistant_role_name="assistant", + assistant_agent_kwargs=assistant_agent_kwargs, + ) + + return society + + +def main(): + r"""Main function to run the OWL system with an example question.""" + # Example research question + question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer." + + # Construct and run the society + society = construct_society(question) + answer, chat_history, token_count = run_society(society) + + # Output the result + print(f"\033[94mAnswer: {answer}\033[0m") + + +if __name__ == "__main__": + main()