From 3c57044c71a4cb9ff6402cc0d1a9a0392e77211b Mon Sep 17 00:00:00 2001
From: "yifeng.wang" <3038880699@qq.com>
Date: Thu, 13 Mar 2025 12:22:12 +0800
Subject: [PATCH 01/38] refactor direactory
---
owl/app.py | 891 -----------------
owl/app_en.py | 918 ------------------
owl/{ => examples}/run.py | 0
owl/{ => examples}/run_deepseek_zh.py | 0
owl/{ => examples}/run_gaia_roleplaying.py | 0
owl/{ => examples}/run_mini.py | 0
owl/{ => examples}/run_ollama.py | 0
.../run_openai_compatiable_model.py | 0
owl/{ => examples}/run_qwen_mini_zh.py | 0
owl/{ => examples}/run_qwen_zh.py | 0
owl/{ => examples}/run_terminal.py | 0
owl/{ => examples}/run_terminal_zh.py | 0
owl/script_adapter.py | 267 -----
owl/webapp.py | 400 ++++++++
run_app.py | 62 --
run_app_zh.py | 60 --
16 files changed, 400 insertions(+), 2198 deletions(-)
delete mode 100644 owl/app.py
delete mode 100644 owl/app_en.py
rename owl/{ => examples}/run.py (100%)
rename owl/{ => examples}/run_deepseek_zh.py (100%)
rename owl/{ => examples}/run_gaia_roleplaying.py (100%)
rename owl/{ => examples}/run_mini.py (100%)
rename owl/{ => examples}/run_ollama.py (100%)
rename owl/{ => examples}/run_openai_compatiable_model.py (100%)
rename owl/{ => examples}/run_qwen_mini_zh.py (100%)
rename owl/{ => examples}/run_qwen_zh.py (100%)
rename owl/{ => examples}/run_terminal.py (100%)
rename owl/{ => examples}/run_terminal_zh.py (100%)
delete mode 100644 owl/script_adapter.py
create mode 100644 owl/webapp.py
delete mode 100644 run_app.py
delete mode 100644 run_app_zh.py
diff --git a/owl/app.py b/owl/app.py
deleted file mode 100644
index 15b967b..0000000
--- a/owl/app.py
+++ /dev/null
@@ -1,891 +0,0 @@
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-import os
-import sys
-import gradio as gr
-import subprocess
-import threading
-import time
-from datetime import datetime
-import queue
-from pathlib import Path
-import json
-import signal
-import dotenv
-
-# 设置日志队列
-log_queue: queue.Queue[str] = queue.Queue()
-
-# 当前运行的进程
-current_process = None
-process_lock = threading.Lock()
-
-# 脚本选项
-SCRIPTS = {
- "Qwen Mini (中文)": "run_qwen_mini_zh.py",
- "Qwen (中文)": "run_qwen_zh.py",
- "Mini": "run_mini.py",
- "DeepSeek (中文)": "run_deepseek_zh.py",
- "Default": "run.py",
- "GAIA Roleplaying": "run_gaia_roleplaying.py",
- "OpenAI Compatible": "run_openai_compatiable_model.py",
- "Ollama": "run_ollama.py",
- "Terminal": "run_terminal_zh.py",
-}
-
-# 脚本描述
-SCRIPT_DESCRIPTIONS = {
- "Qwen Mini (中文)": "使用阿里云Qwen模型的中文版本,适合中文问答和任务",
- "Qwen (中文)": "使用阿里云Qwen模型,支持多种工具和功能",
- "Mini": "轻量级版本,使用OpenAI GPT-4o模型",
- "DeepSeek (中文)": "使用DeepSeek模型,适合非多模态任务",
- "Default": "默认OWL实现,使用OpenAI GPT-4o模型和全套工具",
- "GAIA Roleplaying": "GAIA基准测试实现,用于评估模型能力",
- "OpenAI Compatible": "使用兼容OpenAI API的第三方模型,支持自定义API端点",
- "Ollama": "使用Ollama API",
- "Terminal": "使用本地终端执行python文件",
-}
-
-# 环境变量分组
-ENV_GROUPS = {
- "模型API": [
- {
- "name": "OPENAI_API_KEY",
- "label": "OpenAI API密钥",
- "type": "password",
- "required": False,
- "help": "OpenAI API密钥,用于访问GPT模型。获取方式:https://platform.openai.com/api-keys",
- },
- {
- "name": "OPENAI_API_BASE_URL",
- "label": "OpenAI API基础URL",
- "type": "text",
- "required": False,
- "help": "OpenAI API的基础URL,可选。如果使用代理或自定义端点,请设置此项。",
- },
- {
- "name": "QWEN_API_KEY",
- "label": "阿里云Qwen API密钥",
- "type": "password",
- "required": False,
- "help": "阿里云Qwen API密钥,用于访问Qwen模型。获取方式:https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key",
- },
- {
- "name": "DEEPSEEK_API_KEY",
- "label": "DeepSeek API密钥",
- "type": "password",
- "required": False,
- "help": "DeepSeek API密钥,用于访问DeepSeek模型。获取方式:https://platform.deepseek.com/api_keys",
- },
- ],
- "搜索工具": [
- {
- "name": "GOOGLE_API_KEY",
- "label": "Google API密钥",
- "type": "password",
- "required": False,
- "help": "Google搜索API密钥,用于网络搜索功能。获取方式:https://developers.google.com/custom-search/v1/overview",
- },
- {
- "name": "SEARCH_ENGINE_ID",
- "label": "搜索引擎ID",
- "type": "text",
- "required": False,
- "help": "Google自定义搜索引擎ID,与Google API密钥配合使用。获取方式:https://developers.google.com/custom-search/v1/overview",
- },
- ],
- "其他工具": [
- {
- "name": "HF_TOKEN",
- "label": "Hugging Face令牌",
- "type": "password",
- "required": False,
- "help": "Hugging Face API令牌,用于访问Hugging Face模型和数据集。获取方式:https://huggingface.co/join",
- },
- {
- "name": "CHUNKR_API_KEY",
- "label": "Chunkr API密钥",
- "type": "password",
- "required": False,
- "help": "Chunkr API密钥,用于文档处理功能。获取方式:https://chunkr.ai/",
- },
- {
- "name": "FIRECRAWL_API_KEY",
- "label": "Firecrawl API密钥",
- "type": "password",
- "required": False,
- "help": "Firecrawl API密钥,用于网页爬取功能。获取方式:https://www.firecrawl.dev/",
- },
- ],
- "自定义环境变量": [], # 用户自定义的环境变量将存储在这里
-}
-
-
-def get_script_info(script_name):
- """获取脚本的详细信息"""
- return SCRIPT_DESCRIPTIONS.get(script_name, "无描述信息")
-
-
-def load_env_vars():
- """加载环境变量"""
- env_vars = {}
- # 尝试从.env文件加载
- dotenv.load_dotenv()
-
- # 获取所有环境变量
- for group in ENV_GROUPS.values():
- for var in group:
- env_vars[var["name"]] = os.environ.get(var["name"], "")
-
- # 加载.env文件中可能存在的其他环境变量
- if Path(".env").exists():
- try:
- with open(".env", "r", encoding="utf-8") as f:
- for line in f:
- line = line.strip()
- if line and not line.startswith("#") and "=" in line:
- try:
- key, value = line.split("=", 1)
- key = key.strip()
- value = value.strip()
-
- # 处理引号包裹的值
- if (value.startswith('"') and value.endswith('"')) or (
- value.startswith("'") and value.endswith("'")
- ):
- value = value[1:-1] # 移除首尾的引号
-
- # 检查是否是已知的环境变量
- known_var = False
- for group in ENV_GROUPS.values():
- if any(var["name"] == key for var in group):
- known_var = True
- break
-
- # 如果不是已知的环境变量,添加到自定义环境变量组
- if not known_var and key not in env_vars:
- ENV_GROUPS["自定义环境变量"].append(
- {
- "name": key,
- "label": key,
- "type": "text",
- "required": False,
- "help": "用户自定义环境变量",
- }
- )
- env_vars[key] = value
- except Exception as e:
- print(f"解析环境变量行时出错: {line}, 错误: {str(e)}")
- except Exception as e:
- print(f"加载.env文件时出错: {str(e)}")
-
- return env_vars
-
-
-def save_env_vars(env_vars):
- """保存环境变量到.env文件"""
- # 读取现有的.env文件内容
- env_path = Path(".env")
- existing_content = {}
-
- if env_path.exists():
- try:
- with open(env_path, "r", encoding="utf-8") as f:
- for line in f:
- line = line.strip()
- if line and not line.startswith("#") and "=" in line:
- try:
- key, value = line.split("=", 1)
- existing_content[key.strip()] = value.strip()
- except Exception as e:
- print(f"解析环境变量行时出错: {line}, 错误: {str(e)}")
- except Exception as e:
- print(f"读取.env文件时出错: {str(e)}")
-
- # 更新环境变量
- for key, value in env_vars.items():
- if value is not None: # 允许空字符串值,但不允许None
- # 确保值是字符串形式
- value = str(value) # 确保值是字符串
-
- # 检查值是否已经被引号包裹
- if (value.startswith('"') and value.endswith('"')) or (
- value.startswith("'") and value.endswith("'")
- ):
- # 已经被引号包裹,保持原样
- existing_content[key] = value
- # 更新环境变量时移除引号
- os.environ[key] = value[1:-1]
- else:
- # 没有被引号包裹,添加双引号
- # 用双引号包裹值,确保特殊字符被正确处理
- quoted_value = f'"{value}"'
- existing_content[key] = quoted_value
- # 同时更新当前进程的环境变量(使用未引用的值)
- os.environ[key] = value
-
- # 写入.env文件
- try:
- with open(env_path, "w", encoding="utf-8") as f:
- for key, value in existing_content.items():
- f.write(f"{key}={value}\n")
- except Exception as e:
- print(f"写入.env文件时出错: {str(e)}")
- return f"❌ 保存环境变量失败: {str(e)}"
-
- return "✅ 环境变量已保存"
-
-
-def add_custom_env_var(name, value, var_type):
- """添加自定义环境变量"""
- if not name:
- return "❌ 环境变量名不能为空", None
-
- # 检查是否已存在同名环境变量
- for group in ENV_GROUPS.values():
- if any(var["name"] == name for var in group):
- return f"❌ 环境变量 {name} 已存在", None
-
- # 添加到自定义环境变量组
- ENV_GROUPS["自定义环境变量"].append(
- {
- "name": name,
- "label": name,
- "type": var_type,
- "required": False,
- "help": "用户自定义环境变量",
- }
- )
-
- # 保存环境变量
- env_vars = {name: value}
- save_env_vars(env_vars)
-
- # 返回成功消息和更新后的环境变量组
- return f"✅ 已添加环境变量 {name}", ENV_GROUPS["自定义环境变量"]
-
-
-def update_custom_env_var(name, value, var_type):
- """更改自定义环境变量"""
- if not name:
- return "❌ 环境变量名不能为空", None
-
- # 检查环境变量是否存在于自定义环境变量组中
- found = False
- for i, var in enumerate(ENV_GROUPS["自定义环境变量"]):
- if var["name"] == name:
- # 更新类型
- ENV_GROUPS["自定义环境变量"][i]["type"] = var_type
- found = True
- break
-
- if not found:
- return f"❌ 自定义环境变量 {name} 不存在", None
-
- # 保存环境变量值
- env_vars = {name: value}
- save_env_vars(env_vars)
-
- # 返回成功消息和更新后的环境变量组
- return f"✅ 已更新环境变量 {name}", ENV_GROUPS["自定义环境变量"]
-
-
-def delete_custom_env_var(name):
- """删除自定义环境变量"""
- if not name:
- return "❌ 环境变量名不能为空", None
-
- # 检查环境变量是否存在于自定义环境变量组中
- found = False
- for i, var in enumerate(ENV_GROUPS["自定义环境变量"]):
- if var["name"] == name:
- # 从自定义环境变量组中删除
- del ENV_GROUPS["自定义环境变量"][i]
- found = True
- break
-
- if not found:
- return f"❌ 自定义环境变量 {name} 不存在", None
-
- # 从.env文件中删除该环境变量
- env_path = Path(".env")
- if env_path.exists():
- try:
- with open(env_path, "r", encoding="utf-8") as f:
- lines = f.readlines()
-
- with open(env_path, "w", encoding="utf-8") as f:
- for line in lines:
- try:
- # 更精确地匹配环境变量行
- line_stripped = line.strip()
- # 检查是否为注释行或空行
- if not line_stripped or line_stripped.startswith("#"):
- f.write(line) # 保留注释行和空行
- continue
-
- # 检查是否包含等号
- if "=" not in line_stripped:
- f.write(line) # 保留不包含等号的行
- continue
-
- # 提取变量名并检查是否与要删除的变量匹配
- var_name = line_stripped.split("=", 1)[0].strip()
- if var_name != name:
- f.write(line) # 保留不匹配的变量
- except Exception as e:
- print(f"处理.env文件行时出错: {line}, 错误: {str(e)}")
- # 出错时保留原行
- f.write(line)
- except Exception as e:
- print(f"删除环境变量时出错: {str(e)}")
- return f"❌ 删除环境变量失败: {str(e)}", None
-
- # 从当前进程的环境变量中删除
- if name in os.environ:
- del os.environ[name]
-
- # 返回成功消息和更新后的环境变量组
- return f"✅ 已删除环境变量 {name}", ENV_GROUPS["自定义环境变量"]
-
-
-def terminate_process():
- """终止当前运行的进程"""
- global current_process
-
- with process_lock:
- if current_process is not None and current_process.poll() is None:
- try:
- # 在Windows上使用taskkill强制终止进程树
- if os.name == "nt":
- # 获取进程ID
- pid = current_process.pid
- # 使用taskkill命令终止进程及其子进程 - 避免使用shell=True以提高安全性
- try:
- subprocess.run(
- ["taskkill", "/F", "/T", "/PID", str(pid)], check=False
- )
- except subprocess.SubprocessError as e:
- log_queue.put(f"终止进程时出错: {str(e)}\n")
- return f"❌ 终止进程时出错: {str(e)}"
- else:
- # 在Unix上使用SIGTERM和SIGKILL
- current_process.terminate()
- try:
- current_process.wait(timeout=3)
- except subprocess.TimeoutExpired:
- current_process.kill()
-
- # 等待进程终止
- try:
- current_process.wait(timeout=2)
- except subprocess.TimeoutExpired:
- pass # 已经尝试强制终止,忽略超时
-
- log_queue.put("进程已终止\n")
- return "✅ 进程已终止"
- except Exception as e:
- log_queue.put(f"终止进程时出错: {str(e)}\n")
- return f"❌ 终止进程时出错: {str(e)}"
- else:
- return "❌ 没有正在运行的进程"
-
-
-def run_script(script_dropdown, question, progress=gr.Progress()):
- """运行选定的脚本并返回输出"""
- global current_process
-
- script_name = SCRIPTS.get(script_dropdown)
- if not script_name:
- return "❌ 无效的脚本选择", "", "", "", None
-
- if not question.strip():
- return "请输入问题!", "", "", "", None
-
- # 清空日志队列
- while not log_queue.empty():
- log_queue.get()
-
- # 创建日志目录
- log_dir = Path("logs")
- log_dir.mkdir(exist_ok=True)
-
- # 创建带时间戳的日志文件
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- log_file = log_dir / f"{script_name.replace('.py', '')}_{timestamp}.log"
-
- # 构建命令
- # 获取当前脚本所在的基础路径
- base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-
- cmd = [
- sys.executable,
- os.path.join(base_path, "owl", "script_adapter.py"),
- os.path.join(base_path, "owl", script_name),
- ]
-
- # 创建环境变量副本并添加问题
- env = os.environ.copy()
- # 确保问题是字符串类型
- if not isinstance(question, str):
- question = str(question)
- # 保留换行符,但确保是有效的字符串
- env["OWL_QUESTION"] = question
-
- # 启动进程
- with process_lock:
- current_process = subprocess.Popen(
- cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- text=True,
- bufsize=1,
- env=env,
- encoding="utf-8",
- )
-
- # 创建线程来读取输出
- def read_output():
- try:
- # 使用唯一的时间戳确保日志文件名不重复
- timestamp_unique = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
- unique_log_file = (
- log_dir / f"{script_name.replace('.py', '')}_{timestamp_unique}.log"
- )
-
- # 使用这个唯一的文件名写入日志
- with open(unique_log_file, "w", encoding="utf-8") as f:
- # 更新全局日志文件路径
- nonlocal log_file
- log_file = unique_log_file
-
- for line in iter(current_process.stdout.readline, ""):
- if line:
- # 写入日志文件
- f.write(line)
- f.flush()
- # 添加到队列
- log_queue.put(line)
- except Exception as e:
- log_queue.put(f"读取输出时出错: {str(e)}\n")
-
- # 启动读取线程
- threading.Thread(target=read_output, daemon=True).start()
-
- # 收集日志
- logs = []
- progress(0, desc="正在运行...")
-
- # 等待进程完成或超时
- start_time = time.time()
- timeout = 1800 # 30分钟超时
-
- while current_process.poll() is None:
- # 检查是否超时
- if time.time() - start_time > timeout:
- with process_lock:
- if current_process.poll() is None:
- if os.name == "nt":
- current_process.send_signal(signal.CTRL_BREAK_EVENT)
- else:
- current_process.terminate()
- log_queue.put("执行超时,已终止进程\n")
- break
-
- # 从队列获取日志
- while not log_queue.empty():
- log = log_queue.get()
- logs.append(log)
-
- # 更新进度
- elapsed = time.time() - start_time
- progress(min(elapsed / 300, 0.99), desc="正在运行...")
-
- # 短暂休眠以减少CPU使用
- time.sleep(0.1)
-
- # 每秒更新一次日志显示
- yield (
- status_message(current_process),
- extract_answer(logs),
- "".join(logs),
- str(log_file),
- None,
- )
-
- # 获取剩余日志
- while not log_queue.empty():
- logs.append(log_queue.get())
-
- # 提取聊天历史(如果有)
- chat_history = extract_chat_history(logs)
-
- # 返回最终状态和日志
- return (
- status_message(current_process),
- extract_answer(logs),
- "".join(logs),
- str(log_file),
- chat_history,
- )
-
-
-def status_message(process):
- """根据进程状态返回状态消息"""
- if process.poll() is None:
- return "⏳ 正在运行..."
- elif process.returncode == 0:
- return "✅ 执行成功"
- else:
- return f"❌ 执行失败 (返回码: {process.returncode})"
-
-
-def extract_answer(logs):
- """从日志中提取答案"""
- answer = ""
- for log in logs:
- if "Answer:" in log:
- answer = log.split("Answer:", 1)[1].strip()
- break
- return answer
-
-
-def extract_chat_history(logs):
- """尝试从日志中提取聊天历史"""
- try:
- chat_json_str = ""
- capture_json = False
-
- for log in logs:
- if "chat_history" in log:
- # 开始捕获JSON
- start_idx = log.find("[")
- if start_idx != -1:
- capture_json = True
- chat_json_str = log[start_idx:]
- elif capture_json:
- # 继续捕获JSON直到找到匹配的结束括号
- chat_json_str += log
- if "]" in log:
- # 找到结束括号,尝试解析JSON
- end_idx = chat_json_str.rfind("]") + 1
- if end_idx > 0:
- try:
- # 清理可能的额外文本
- json_str = chat_json_str[:end_idx].strip()
- chat_data = json.loads(json_str)
-
- # 格式化为Gradio聊天组件可用的格式
- formatted_chat = []
- for msg in chat_data:
- if "role" in msg and "content" in msg:
- role = "用户" if msg["role"] == "user" else "助手"
- formatted_chat.append([role, msg["content"]])
- return formatted_chat
- except json.JSONDecodeError:
- # 如果解析失败,继续捕获
- pass
- except Exception:
- # 其他错误,停止捕获
- capture_json = False
- except Exception:
- pass
- return None
-
-
-def create_ui():
- """创建Gradio界面"""
- # 加载环境变量
- env_vars = load_env_vars()
-
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as app:
- gr.Markdown(
- """
- # 🦉 OWL 智能助手运行平台
-
- 选择一个模型并输入您的问题,系统将运行相应的脚本并显示结果。
- """
- )
-
- with gr.Tabs():
- with gr.TabItem("运行模式"):
- with gr.Row():
- with gr.Column(scale=1):
- # 确保默认值是SCRIPTS中存在的键
- default_script = list(SCRIPTS.keys())[0] if SCRIPTS else None
- script_dropdown = gr.Dropdown(
- choices=list(SCRIPTS.keys()),
- value=default_script,
- label="选择模式",
- )
-
- script_info = gr.Textbox(
- value=get_script_info(default_script)
- if default_script
- else "",
- label="模型描述",
- interactive=False,
- )
-
- script_dropdown.change(
- fn=lambda x: get_script_info(x),
- inputs=script_dropdown,
- outputs=script_info,
- )
-
- question_input = gr.Textbox(
- lines=8,
- placeholder="请输入您的问题...",
- label="问题",
- elem_id="question_input",
- show_copy_button=True,
- )
-
- gr.Markdown(
- """
- > **注意**: 您输入的问题将替换脚本中的默认问题。系统会自动处理问题的替换,确保您的问题被正确使用。
- > 支持多行输入,换行将被保留。
- """
- )
-
- with gr.Row():
- run_button = gr.Button("运行", variant="primary")
- stop_button = gr.Button("终止", variant="stop")
-
- with gr.Column(scale=2):
- with gr.Tabs():
- with gr.TabItem("结果"):
- status_output = gr.Textbox(label="状态")
- answer_output = gr.Textbox(label="回答", lines=10)
- log_file_output = gr.Textbox(label="日志文件路径")
-
- with gr.TabItem("运行日志"):
- log_output = gr.Textbox(label="完整日志", lines=25)
-
- with gr.TabItem("聊天历史"):
- chat_output = gr.Chatbot(label="对话历史")
-
- # 示例问题
- examples = [
- [
- "Qwen Mini (中文)",
- "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格",
- ],
- [
- "DeepSeek (中文)",
- "请分析GitHub上CAMEL-AI项目的最新统计数据。找出该项目的星标数量、贡献者数量和最近的活跃度。然后,创建一个简单的Excel表格来展示这些数据,并生成一个柱状图来可视化这些指标。最后,总结CAMEL项目的受欢迎程度和发展趋势。",
- ],
- [
- "Default",
- "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer.",
- ],
- ]
-
- gr.Examples(examples=examples, inputs=[script_dropdown, question_input])
-
- with gr.TabItem("环境变量配置"):
- env_inputs = {}
- save_status = gr.Textbox(label="保存状态", interactive=False)
-
- # 添加自定义环境变量部分
- with gr.Accordion("添加自定义环境变量", open=True):
- with gr.Row():
- new_var_name = gr.Textbox(
- label="环境变量名", placeholder="例如:MY_CUSTOM_API_KEY"
- )
- new_var_value = gr.Textbox(
- label="环境变量值", placeholder="输入值"
- )
- new_var_type = gr.Dropdown(
- choices=["text", "password"], value="text", label="类型"
- )
-
- add_var_button = gr.Button("添加环境变量", variant="primary")
- add_var_status = gr.Textbox(label="添加状态", interactive=False)
-
- # 自定义环境变量列表
- custom_vars_list = gr.JSON(
- value=ENV_GROUPS["自定义环境变量"],
- label="已添加的自定义环境变量",
- visible=len(ENV_GROUPS["自定义环境变量"]) > 0,
- )
-
- # 更改和删除自定义环境变量部分
- with gr.Accordion(
- "更改或删除自定义环境变量",
- open=True,
- visible=len(ENV_GROUPS["自定义环境变量"]) > 0,
- ) as update_delete_accordion:
- with gr.Row():
- # 创建下拉菜单,显示所有自定义环境变量
- custom_var_dropdown = gr.Dropdown(
- choices=[
- var["name"] for var in ENV_GROUPS["自定义环境变量"]
- ],
- label="选择环境变量",
- interactive=True,
- )
- update_var_value = gr.Textbox(
- label="新的环境变量值", placeholder="输入新值"
- )
- update_var_type = gr.Dropdown(
- choices=["text", "password"], value="text", label="类型"
- )
-
- with gr.Row():
- update_var_button = gr.Button("更新环境变量", variant="primary")
- delete_var_button = gr.Button("删除环境变量", variant="stop")
-
- update_var_status = gr.Textbox(label="操作状态", interactive=False)
-
- # 添加环境变量按钮点击事件
- add_var_button.click(
- fn=add_custom_env_var,
- inputs=[new_var_name, new_var_value, new_var_type],
- outputs=[add_var_status, custom_vars_list],
- ).then(
- fn=lambda vars: {"visible": len(vars) > 0},
- inputs=[custom_vars_list],
- outputs=[update_delete_accordion],
- )
-
- # 更新环境变量按钮点击事件
- update_var_button.click(
- fn=update_custom_env_var,
- inputs=[custom_var_dropdown, update_var_value, update_var_type],
- outputs=[update_var_status, custom_vars_list],
- )
-
- # 删除环境变量按钮点击事件
- delete_var_button.click(
- fn=delete_custom_env_var,
- inputs=[custom_var_dropdown],
- outputs=[update_var_status, custom_vars_list],
- ).then(
- fn=lambda vars: {"visible": len(vars) > 0},
- inputs=[custom_vars_list],
- outputs=[update_delete_accordion],
- )
-
- # 当自定义环境变量列表更新时,更新下拉菜单选项
- custom_vars_list.change(
- fn=lambda vars: {
- "choices": [var["name"] for var in vars],
- "value": None,
- },
- inputs=[custom_vars_list],
- outputs=[custom_var_dropdown],
- )
-
- # 现有环境变量配置
- for group_name, vars in ENV_GROUPS.items():
- if (
- group_name != "自定义环境变量" or len(vars) > 0
- ): # 只显示非空的自定义环境变量组
- with gr.Accordion(
- group_name, open=(group_name != "自定义环境变量")
- ):
- for var in vars:
- # 添加帮助信息
- gr.Markdown(f"**{var['help']}**")
-
- if var["type"] == "password":
- env_inputs[var["name"]] = gr.Textbox(
- value=env_vars.get(var["name"], ""),
- label=var["label"],
- placeholder=f"请输入{var['label']}",
- type="password",
- )
- else:
- env_inputs[var["name"]] = gr.Textbox(
- value=env_vars.get(var["name"], ""),
- label=var["label"],
- placeholder=f"请输入{var['label']}",
- )
-
- save_button = gr.Button("保存环境变量", variant="primary")
-
- # 保存环境变量
- save_inputs = [
- env_inputs[var_name]
- for group in ENV_GROUPS.values()
- for var in group
- for var_name in [var["name"]]
- if var_name in env_inputs
- ]
- save_button.click(
- fn=lambda *values: save_env_vars(
- dict(
- zip(
- [
- var["name"]
- for group in ENV_GROUPS.values()
- for var in group
- if var["name"] in env_inputs
- ],
- values,
- )
- )
- ),
- inputs=save_inputs,
- outputs=save_status,
- )
-
- # 运行脚本
- run_button.click(
- fn=run_script,
- inputs=[script_dropdown, question_input],
- outputs=[
- status_output,
- answer_output,
- log_output,
- log_file_output,
- chat_output,
- ],
- show_progress=True,
- )
-
- # 终止运行
- stop_button.click(fn=terminate_process, inputs=[], outputs=[status_output])
-
- # 添加页脚
- gr.Markdown(
- """
- ### 📝 使用说明
-
- - 选择一个模型并输入您的问题
- - 点击"运行"按钮开始执行
- - 如需终止运行,点击"终止"按钮
- - 在"结果"标签页查看执行状态和回答
- - 在"运行日志"标签页查看完整日志
- - 在"聊天历史"标签页查看对话历史(如果有)
- - 在"环境变量配置"标签页配置API密钥和其他环境变量
- - 您可以添加自定义环境变量,满足特殊需求
-
- ### ⚠️ 注意事项
-
- - 运行某些模型可能需要API密钥,请确保在"环境变量配置"标签页中设置了相应的环境变量
- - 某些脚本可能需要较长时间运行,请耐心等待
- - 如果运行超过30分钟,进程将自动终止
- - 您输入的问题将替换脚本中的默认问题,确保问题与所选模型兼容
- """
- )
-
- return app
-
-
-if __name__ == "__main__":
- # 创建并启动应用
- app = create_ui()
- app.queue().launch(share=True)
diff --git a/owl/app_en.py b/owl/app_en.py
deleted file mode 100644
index 094c1f5..0000000
--- a/owl/app_en.py
+++ /dev/null
@@ -1,918 +0,0 @@
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-import os
-import sys
-import gradio as gr
-import subprocess
-import threading
-import time
-from datetime import datetime
-import queue
-from pathlib import Path
-import json
-import signal
-import dotenv
-
-# Set up log queue
-log_queue: queue.Queue[str] = queue.Queue()
-
-# Currently running process
-current_process = None
-process_lock = threading.Lock()
-
-# Script options
-SCRIPTS = {
- "Qwen Mini (Chinese)": "run_qwen_mini_zh.py",
- "Qwen (Chinese)": "run_qwen_zh.py",
- "Mini": "run_mini.py",
- "DeepSeek (Chinese)": "run_deepseek_zh.py",
- "Default": "run.py",
- "GAIA Roleplaying": "run_gaia_roleplaying.py",
- "OpenAI Compatible": "run_openai_compatiable_model.py",
- "Ollama": "run_ollama.py",
- "Terminal": "run_terminal.py",
-}
-
-# Script descriptions
-SCRIPT_DESCRIPTIONS = {
- "Qwen Mini (Chinese)": "Uses the Chinese version of Alibaba Cloud's Qwen model, suitable for Chinese Q&A and tasks",
- "Qwen (Chinese)": "Uses Alibaba Cloud's Qwen model, supports various tools and functions",
- "Mini": "Lightweight version, uses OpenAI GPT-4o model",
- "DeepSeek (Chinese)": "Uses DeepSeek model, suitable for non-multimodal tasks",
- "Default": "Default OWL implementation, uses OpenAI GPT-4o model and full set of tools",
- "GAIA Roleplaying": "GAIA benchmark implementation, used to evaluate model capabilities",
- "OpenAI Compatible": "Uses third-party models compatible with OpenAI API, supports custom API endpoints",
- "Ollama": "Uses Ollama API",
- "Terminal": "Uses local terminal to execute python files",
-}
-
-# Environment variable groups
-ENV_GROUPS = {
- "Model API": [
- {
- "name": "OPENAI_API_KEY",
- "label": "OpenAI API Key",
- "type": "password",
- "required": False,
- "help": "OpenAI API key for accessing GPT models. Get it from: https://platform.openai.com/api-keys",
- },
- {
- "name": "OPENAI_API_BASE_URL",
- "label": "OpenAI API Base URL",
- "type": "text",
- "required": False,
- "help": "Base URL for OpenAI API, optional. Set this if using a proxy or custom endpoint.",
- },
- {
- "name": "QWEN_API_KEY",
- "label": "Alibaba Cloud Qwen API Key",
- "type": "password",
- "required": False,
- "help": "Alibaba Cloud Qwen API key for accessing Qwen models. Get it from: https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key",
- },
- {
- "name": "DEEPSEEK_API_KEY",
- "label": "DeepSeek API Key",
- "type": "password",
- "required": False,
- "help": "DeepSeek API key for accessing DeepSeek models. Get it from: https://platform.deepseek.com/api_keys",
- },
- ],
- "Search Tools": [
- {
- "name": "GOOGLE_API_KEY",
- "label": "Google API Key",
- "type": "password",
- "required": False,
- "help": "Google Search API key for web search functionality. Get it from: https://developers.google.com/custom-search/v1/overview",
- },
- {
- "name": "SEARCH_ENGINE_ID",
- "label": "Search Engine ID",
- "type": "text",
- "required": False,
- "help": "Google Custom Search Engine ID, used with Google API key. Get it from: https://developers.google.com/custom-search/v1/overview",
- },
- ],
- "Other Tools": [
- {
- "name": "HF_TOKEN",
- "label": "Hugging Face Token",
- "type": "password",
- "required": False,
- "help": "Hugging Face API token for accessing Hugging Face models and datasets. Get it from: https://huggingface.co/join",
- },
- {
- "name": "CHUNKR_API_KEY",
- "label": "Chunkr API Key",
- "type": "password",
- "required": False,
- "help": "Chunkr API key for document processing functionality. Get it from: https://chunkr.ai/",
- },
- {
- "name": "FIRECRAWL_API_KEY",
- "label": "Firecrawl API Key",
- "type": "password",
- "required": False,
- "help": "Firecrawl API key for web crawling functionality. Get it from: https://www.firecrawl.dev/",
- },
- ],
- "Custom Environment Variables": [], # User-defined environment variables will be stored here
-}
-
-
-def get_script_info(script_name):
- """Get detailed information about the script"""
- return SCRIPT_DESCRIPTIONS.get(script_name, "No description available")
-
-
-def load_env_vars():
- """Load environment variables"""
- env_vars = {}
- # Try to load from .env file
- dotenv.load_dotenv()
-
- # Get all environment variables
- for group in ENV_GROUPS.values():
- for var in group:
- env_vars[var["name"]] = os.environ.get(var["name"], "")
-
- # Load other environment variables that may exist in the .env file
- if Path(".env").exists():
- try:
- with open(".env", "r", encoding="utf-8") as f:
- for line in f:
- line = line.strip()
- if line and not line.startswith("#") and "=" in line:
- try:
- key, value = line.split("=", 1)
- key = key.strip()
- value = value.strip()
-
- # Handle quoted values
- if (value.startswith('"') and value.endswith('"')) or (
- value.startswith("'") and value.endswith("'")
- ):
- value = value[
- 1:-1
- ] # Remove quotes at the beginning and end
-
- # Check if it's a known environment variable
- known_var = False
- for group in ENV_GROUPS.values():
- if any(var["name"] == key for var in group):
- known_var = True
- break
-
- # If it's not a known environment variable, add it to the custom environment variables group
- if not known_var and key not in env_vars:
- ENV_GROUPS["Custom Environment Variables"].append(
- {
- "name": key,
- "label": key,
- "type": "text",
- "required": False,
- "help": "User-defined environment variable",
- }
- )
- env_vars[key] = value
- except Exception as e:
- print(
- f"Error parsing environment variable line: {line}, error: {str(e)}"
- )
- except Exception as e:
- print(f"Error loading .env file: {str(e)}")
-
- return env_vars
-
-
-def save_env_vars(env_vars):
- """Save environment variables to .env file"""
- # Read existing .env file content
- env_path = Path(".env")
- existing_content = {}
-
- if env_path.exists():
- try:
- with open(env_path, "r", encoding="utf-8") as f:
- for line in f:
- line = line.strip()
- if line and not line.startswith("#") and "=" in line:
- try:
- key, value = line.split("=", 1)
- existing_content[key.strip()] = value.strip()
- except Exception as e:
- print(
- f"Error parsing environment variable line: {line}, error: {str(e)}"
- )
- except Exception as e:
- print(f"Error reading .env file: {str(e)}")
-
- # Update environment variables
- for key, value in env_vars.items():
- if value is not None: # Allow empty string values, but not None
- # Ensure the value is a string
- value = str(value) # Ensure the value is a string
-
- # Check if the value is already wrapped in quotes
- if (value.startswith('"') and value.endswith('"')) or (
- value.startswith("'") and value.endswith("'")
- ):
- # Already wrapped in quotes, keep as is
- existing_content[key] = value
- # Update environment variable by removing quotes
- os.environ[key] = value[1:-1]
- else:
- # Not wrapped in quotes, add double quotes
- # Wrap the value in double quotes to ensure special characters are handled correctly
- quoted_value = f'"{value}"'
- existing_content[key] = quoted_value
- # Also update the environment variable for the current process (using the unquoted value)
- os.environ[key] = value
-
- # Write to .env file
- try:
- with open(env_path, "w", encoding="utf-8") as f:
- for key, value in existing_content.items():
- f.write(f"{key}={value}\n")
- except Exception as e:
- print(f"Error writing to .env file: {str(e)}")
- return f"❌ Failed to save environment variables: {str(e)}"
-
- return "✅ Environment variables saved"
-
-
-def add_custom_env_var(name, value, var_type):
- """Add custom environment variable"""
- if not name:
- return "❌ Environment variable name cannot be empty", None
-
- # Check if an environment variable with the same name already exists
- for group in ENV_GROUPS.values():
- if any(var["name"] == name for var in group):
- return f"❌ Environment variable {name} already exists", None
-
- # Add to custom environment variables group
- ENV_GROUPS["Custom Environment Variables"].append(
- {
- "name": name,
- "label": name,
- "type": var_type,
- "required": False,
- "help": "User-defined environment variable",
- }
- )
-
- # Save environment variables
- env_vars = {name: value}
- save_env_vars(env_vars)
-
- # Return success message and updated environment variable group
- return f"✅ Added environment variable {name}", ENV_GROUPS[
- "Custom Environment Variables"
- ]
-
-
-def update_custom_env_var(name, value, var_type):
- """Update custom environment variable"""
- if not name:
- return "❌ Environment variable name cannot be empty", None
-
- # Check if the environment variable exists in the custom environment variables group
- found = False
- for i, var in enumerate(ENV_GROUPS["Custom Environment Variables"]):
- if var["name"] == name:
- # Update type
- ENV_GROUPS["Custom Environment Variables"][i]["type"] = var_type
- found = True
- break
-
- if not found:
- return f"❌ Custom environment variable {name} does not exist", None
-
- # Save environment variable value
- env_vars = {name: value}
- save_env_vars(env_vars)
-
- # Return success message and updated environment variable group
- return f"✅ Updated environment variable {name}", ENV_GROUPS[
- "Custom Environment Variables"
- ]
-
-
-def delete_custom_env_var(name):
- """Delete custom environment variable"""
- if not name:
- return "❌ Environment variable name cannot be empty", None
-
- # Check if the environment variable exists in the custom environment variables group
- found = False
- for i, var in enumerate(ENV_GROUPS["Custom Environment Variables"]):
- if var["name"] == name:
- # Delete from custom environment variables group
- del ENV_GROUPS["Custom Environment Variables"][i]
- found = True
- break
-
- if not found:
- return f"❌ Custom environment variable {name} does not exist", None
-
- # Delete the environment variable from .env file
- env_path = Path(".env")
- if env_path.exists():
- try:
- with open(env_path, "r", encoding="utf-8") as f:
- lines = f.readlines()
-
- with open(env_path, "w", encoding="utf-8") as f:
- for line in lines:
- try:
- # More precisely match environment variable lines
- line_stripped = line.strip()
- # Check if it's a comment line or empty line
- if not line_stripped or line_stripped.startswith("#"):
- f.write(line) # Keep comment lines and empty lines
- continue
-
- # Check if it contains an equals sign
- if "=" not in line_stripped:
- f.write(line) # Keep lines without equals sign
- continue
-
- # Extract variable name and check if it matches the variable to be deleted
- var_name = line_stripped.split("=", 1)[0].strip()
- if var_name != name:
- f.write(line) # Keep variables that don't match
- except Exception as e:
- print(
- f"Error processing .env file line: {line}, error: {str(e)}"
- )
- # Keep the original line when an error occurs
- f.write(line)
- except Exception as e:
- print(f"Error deleting environment variable: {str(e)}")
- return f"❌ Failed to delete environment variable: {str(e)}", None
-
- # Delete from current process environment variables
- if name in os.environ:
- del os.environ[name]
-
- # Return success message and updated environment variable group
- return f"✅ Deleted environment variable {name}", ENV_GROUPS[
- "Custom Environment Variables"
- ]
-
-
-def terminate_process():
- """Terminate the currently running process"""
- global current_process
-
- with process_lock:
- if current_process is not None and current_process.poll() is None:
- try:
- # On Windows, use taskkill to forcibly terminate the process tree
- if os.name == "nt":
- # Get process ID
- pid = current_process.pid
- # Use taskkill command to terminate the process and its children - avoid using shell=True for better security
- try:
- subprocess.run(
- ["taskkill", "/F", "/T", "/PID", str(pid)], check=False
- )
- except subprocess.SubprocessError as e:
- log_queue.put(f"Error terminating process: {str(e)}\n")
- return f"❌ Error terminating process: {str(e)}"
- else:
- # On Unix, use SIGTERM and SIGKILL
- current_process.terminate()
- try:
- current_process.wait(timeout=3)
- except subprocess.TimeoutExpired:
- current_process.kill()
-
- # Wait for process to terminate
- try:
- current_process.wait(timeout=2)
- except subprocess.TimeoutExpired:
- pass # Already tried to force terminate, ignore timeout
-
- log_queue.put("Process terminated\n")
- return "✅ Process terminated"
- except Exception as e:
- log_queue.put(f"Error terminating process: {str(e)}\n")
- return f"❌ Error terminating process: {str(e)}"
- else:
- return "❌ No process is currently running"
-
-
-def run_script(script_dropdown, question, progress=gr.Progress()):
- """Run the selected script and return the output"""
- global current_process
-
- script_name = SCRIPTS.get(script_dropdown)
- if not script_name:
- return "❌ Invalid script selection", "", "", "", None
-
- if not question.strip():
- return "Please enter a question!", "", "", "", None
-
- # Clear the log queue
- while not log_queue.empty():
- log_queue.get()
-
- # Create log directory
- log_dir = Path("logs")
- log_dir.mkdir(exist_ok=True)
-
- # Create log file with timestamp
- timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
- log_file = log_dir / f"{script_name.replace('.py', '')}_{timestamp}.log"
-
- # Build command
- base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
- cmd = [
- sys.executable,
- os.path.join(base_path, "owl", "script_adapter.py"),
- os.path.join(base_path, "owl", script_name),
- ]
-
- # Create a copy of environment variables and add the question
- env = os.environ.copy()
- # Ensure question is a string type
- if not isinstance(question, str):
- question = str(question)
- # Preserve newlines, but ensure it's a valid string
- env["OWL_QUESTION"] = question
-
- # Start the process
- with process_lock:
- current_process = subprocess.Popen(
- cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.STDOUT,
- text=True,
- bufsize=1,
- env=env,
- encoding="utf-8",
- )
-
- # Create thread to read output
- def read_output():
- try:
- # Use a unique timestamp to ensure log filename is not duplicated
- timestamp_unique = datetime.now().strftime("%Y%m%d_%H%M%S_%f")
- unique_log_file = (
- log_dir / f"{script_name.replace('.py', '')}_{timestamp_unique}.log"
- )
-
- # Use this unique filename to write logs
- with open(unique_log_file, "w", encoding="utf-8") as f:
- # Update global log file path
- nonlocal log_file
- log_file = unique_log_file
-
- for line in iter(current_process.stdout.readline, ""):
- if line:
- # Write to log file
- f.write(line)
- f.flush()
- # Add to queue
- log_queue.put(line)
- except Exception as e:
- log_queue.put(f"Error reading output: {str(e)}\n")
-
- # Start the reading thread
- threading.Thread(target=read_output, daemon=True).start()
-
- # Collect logs
- logs = []
- progress(0, desc="Running...")
-
- # Wait for process to complete or timeout
- start_time = time.time()
- timeout = 1800 # 30 minutes timeout
-
- while current_process.poll() is None:
- # Check if timeout
- if time.time() - start_time > timeout:
- with process_lock:
- if current_process.poll() is None:
- if os.name == "nt":
- current_process.send_signal(signal.CTRL_BREAK_EVENT)
- else:
- current_process.terminate()
- log_queue.put("Execution timeout, process terminated\n")
- break
-
- # Get logs from queue
- while not log_queue.empty():
- log = log_queue.get()
- logs.append(log)
-
- # Update progress
- elapsed = time.time() - start_time
- progress(min(elapsed / 300, 0.99), desc="Running...")
-
- # Short sleep to reduce CPU usage
- time.sleep(0.1)
-
- # Update log display once per second
- yield (
- status_message(current_process),
- extract_answer(logs),
- "".join(logs),
- str(log_file),
- None,
- )
-
- # Get remaining logs
- while not log_queue.empty():
- logs.append(log_queue.get())
-
- # Extract chat history (if any)
- chat_history = extract_chat_history(logs)
-
- # Return final status and logs
- return (
- status_message(current_process),
- extract_answer(logs),
- "".join(logs),
- str(log_file),
- chat_history,
- )
-
-
-def status_message(process):
- """Return status message based on process status"""
- if process.poll() is None:
- return "⏳ Running..."
- elif process.returncode == 0:
- return "✅ Execution successful"
- else:
- return f"❌ Execution failed (return code: {process.returncode})"
-
-
-def extract_answer(logs):
- """Extract answer from logs"""
- answer = ""
- for log in logs:
- if "Answer:" in log:
- answer = log.split("Answer:", 1)[1].strip()
- break
- return answer
-
-
-def extract_chat_history(logs):
- """Try to extract chat history from logs"""
- try:
- chat_json_str = ""
- capture_json = False
-
- for log in logs:
- if "chat_history" in log:
- # Start capturing JSON
- start_idx = log.find("[")
- if start_idx != -1:
- capture_json = True
- chat_json_str = log[start_idx:]
- elif capture_json:
- # Continue capturing JSON until finding the matching closing bracket
- chat_json_str += log
- if "]" in log:
- # Found closing bracket, try to parse JSON
- end_idx = chat_json_str.rfind("]") + 1
- if end_idx > 0:
- try:
- # Clean up possible extra text
- json_str = chat_json_str[:end_idx].strip()
- chat_data = json.loads(json_str)
-
- # Format for use with Gradio chat component
- formatted_chat = []
- for msg in chat_data:
- if "role" in msg and "content" in msg:
- role = (
- "User" if msg["role"] == "user" else "Assistant"
- )
- formatted_chat.append([role, msg["content"]])
- return formatted_chat
- except json.JSONDecodeError:
- # If parsing fails, continue capturing
- pass
- except Exception:
- # Other errors, stop capturing
- capture_json = False
- except Exception:
- pass
- return None
-
-
-def create_ui():
- """Create Gradio interface"""
- # Load environment variables
- env_vars = load_env_vars()
-
- with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as app:
- gr.Markdown(
- """
- # 🦉 OWL Intelligent Assistant Platform
-
- Select a model and enter your question, the system will run the corresponding script and display the results.
- """
- )
-
- with gr.Tabs():
- with gr.TabItem("Run Mode"):
- with gr.Row():
- with gr.Column(scale=1):
- # Ensure default value is a key that exists in SCRIPTS
- default_script = list(SCRIPTS.keys())[0] if SCRIPTS else None
- script_dropdown = gr.Dropdown(
- choices=list(SCRIPTS.keys()),
- value=default_script,
- label="Select Mode",
- )
-
- script_info = gr.Textbox(
- value=get_script_info(default_script)
- if default_script
- else "",
- label="Model Description",
- interactive=False,
- )
-
- script_dropdown.change(
- fn=lambda x: get_script_info(x),
- inputs=script_dropdown,
- outputs=script_info,
- )
-
- question_input = gr.Textbox(
- lines=8,
- placeholder="Please enter your question...",
- label="Question",
- elem_id="question_input",
- show_copy_button=True,
- )
-
- gr.Markdown(
- """
- > **Note**: Your question will replace the default question in the script. The system will automatically handle the replacement, ensuring your question is used correctly.
- > Multi-line input is supported, line breaks will be preserved.
- """
- )
-
- with gr.Row():
- run_button = gr.Button("Run", variant="primary")
- stop_button = gr.Button("Stop", variant="stop")
-
- with gr.Column(scale=2):
- with gr.Tabs():
- with gr.TabItem("Results"):
- status_output = gr.Textbox(label="Status")
- answer_output = gr.Textbox(label="Answer", lines=10)
- log_file_output = gr.Textbox(label="Log File Path")
-
- with gr.TabItem("Run Logs"):
- log_output = gr.Textbox(label="Complete Logs", lines=25)
-
- with gr.TabItem("Chat History"):
- chat_output = gr.Chatbot(label="Conversation History")
-
- # Example questions
- examples = [
- [
- "Qwen Mini (Chinese)",
- "Browse Amazon and find a product that is attractive to programmers. Please provide the product name and price.",
- ],
- [
- "DeepSeek (Chinese)",
- "Please analyze the latest statistics of the CAMEL-AI project on GitHub. Find out the number of stars, number of contributors, and recent activity of the project. Then, create a simple Excel spreadsheet to display this data and generate a bar chart to visualize these metrics. Finally, summarize the popularity and development trends of the CAMEL project.",
- ],
- [
- "Default",
- "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer.",
- ],
- ]
-
- gr.Examples(examples=examples, inputs=[script_dropdown, question_input])
-
- with gr.TabItem("Environment Variable Configuration"):
- env_inputs = {}
- save_status = gr.Textbox(label="Save Status", interactive=False)
-
- # Add custom environment variables section
- with gr.Accordion("Add Custom Environment Variables", open=True):
- with gr.Row():
- new_var_name = gr.Textbox(
- label="Environment Variable Name",
- placeholder="Example: MY_CUSTOM_API_KEY",
- )
- new_var_value = gr.Textbox(
- label="Environment Variable Value",
- placeholder="Enter value",
- )
- new_var_type = gr.Dropdown(
- choices=["text", "password"], value="text", label="Type"
- )
-
- add_var_button = gr.Button(
- "Add Environment Variable", variant="primary"
- )
- add_var_status = gr.Textbox(label="Add Status", interactive=False)
-
- # Custom environment variables list
- custom_vars_list = gr.JSON(
- value=ENV_GROUPS["Custom Environment Variables"],
- label="Added Custom Environment Variables",
- visible=len(ENV_GROUPS["Custom Environment Variables"]) > 0,
- )
-
- # Update and delete custom environment variables section
- with gr.Accordion(
- "Update or Delete Custom Environment Variables",
- open=True,
- visible=len(ENV_GROUPS["Custom Environment Variables"]) > 0,
- ) as update_delete_accordion:
- with gr.Row():
- # Create dropdown menu to display all custom environment variables
- custom_var_dropdown = gr.Dropdown(
- choices=[
- var["name"]
- for var in ENV_GROUPS["Custom Environment Variables"]
- ],
- label="Select Environment Variable",
- interactive=True,
- )
- update_var_value = gr.Textbox(
- label="New Environment Variable Value",
- placeholder="Enter new value",
- )
- update_var_type = gr.Dropdown(
- choices=["text", "password"], value="text", label="Type"
- )
-
- with gr.Row():
- update_var_button = gr.Button(
- "Update Environment Variable", variant="primary"
- )
- delete_var_button = gr.Button(
- "Delete Environment Variable", variant="stop"
- )
-
- update_var_status = gr.Textbox(
- label="Operation Status", interactive=False
- )
-
- # Add environment variable button click event
- add_var_button.click(
- fn=add_custom_env_var,
- inputs=[new_var_name, new_var_value, new_var_type],
- outputs=[add_var_status, custom_vars_list],
- ).then(
- fn=lambda vars: {"visible": len(vars) > 0},
- inputs=[custom_vars_list],
- outputs=[update_delete_accordion],
- )
-
- # Update environment variable button click event
- update_var_button.click(
- fn=update_custom_env_var,
- inputs=[custom_var_dropdown, update_var_value, update_var_type],
- outputs=[update_var_status, custom_vars_list],
- )
-
- # Delete environment variable button click event
- delete_var_button.click(
- fn=delete_custom_env_var,
- inputs=[custom_var_dropdown],
- outputs=[update_var_status, custom_vars_list],
- ).then(
- fn=lambda vars: {"visible": len(vars) > 0},
- inputs=[custom_vars_list],
- outputs=[update_delete_accordion],
- )
-
- # When custom environment variables list is updated, update dropdown menu options
- custom_vars_list.change(
- fn=lambda vars: {
- "choices": [var["name"] for var in vars],
- "value": None,
- },
- inputs=[custom_vars_list],
- outputs=[custom_var_dropdown],
- )
-
- # Existing environment variable configuration
- for group_name, vars in ENV_GROUPS.items():
- if (
- group_name != "Custom Environment Variables" or len(vars) > 0
- ): # Only show non-empty custom environment variable groups
- with gr.Accordion(
- group_name,
- open=(group_name != "Custom Environment Variables"),
- ):
- for var in vars:
- # Add help information
- gr.Markdown(f"**{var['help']}**")
-
- if var["type"] == "password":
- env_inputs[var["name"]] = gr.Textbox(
- value=env_vars.get(var["name"], ""),
- label=var["label"],
- placeholder=f"Please enter {var['label']}",
- type="password",
- )
- else:
- env_inputs[var["name"]] = gr.Textbox(
- value=env_vars.get(var["name"], ""),
- label=var["label"],
- placeholder=f"Please enter {var['label']}",
- )
-
- save_button = gr.Button("Save Environment Variables", variant="primary")
-
- # Save environment variables
- save_inputs = [
- env_inputs[var_name]
- for group in ENV_GROUPS.values()
- for var in group
- for var_name in [var["name"]]
- if var_name in env_inputs
- ]
- save_button.click(
- fn=lambda *values: save_env_vars(
- dict(
- zip(
- [
- var["name"]
- for group in ENV_GROUPS.values()
- for var in group
- if var["name"] in env_inputs
- ],
- values,
- )
- )
- ),
- inputs=save_inputs,
- outputs=save_status,
- )
-
- # Run script
- run_button.click(
- fn=run_script,
- inputs=[script_dropdown, question_input],
- outputs=[
- status_output,
- answer_output,
- log_output,
- log_file_output,
- chat_output,
- ],
- show_progress=True,
- )
-
- # Terminate execution
- stop_button.click(fn=terminate_process, inputs=[], outputs=[status_output])
-
- # Add footer
- gr.Markdown(
- """
- ### 📝 Instructions
-
- - Select a model and enter your question
- - Click the "Run" button to start execution
- - To stop execution, click the "Stop" button
- - View execution status and answers in the "Results" tab
- - View complete logs in the "Run Logs" tab
- - View conversation history in the "Chat History" tab (if available)
- - Configure API keys and other environment variables in the "Environment Variable Configuration" tab
- - You can add custom environment variables to meet special requirements
-
- ### ⚠️ Notes
-
- - Running some models may require API keys, please make sure you have set the corresponding environment variables in the "Environment Variable Configuration" tab
- - Some scripts may take a long time to run, please be patient
- - If execution exceeds 30 minutes, the process will automatically terminate
- - Your question will replace the default question in the script, ensure the question is compatible with the selected model
- """
- )
-
- return app
-
-
-if __name__ == "__main__":
- # Create and launch the application
- app = create_ui()
- app.queue().launch(share=True)
diff --git a/owl/run.py b/owl/examples/run.py
similarity index 100%
rename from owl/run.py
rename to owl/examples/run.py
diff --git a/owl/run_deepseek_zh.py b/owl/examples/run_deepseek_zh.py
similarity index 100%
rename from owl/run_deepseek_zh.py
rename to owl/examples/run_deepseek_zh.py
diff --git a/owl/run_gaia_roleplaying.py b/owl/examples/run_gaia_roleplaying.py
similarity index 100%
rename from owl/run_gaia_roleplaying.py
rename to owl/examples/run_gaia_roleplaying.py
diff --git a/owl/run_mini.py b/owl/examples/run_mini.py
similarity index 100%
rename from owl/run_mini.py
rename to owl/examples/run_mini.py
diff --git a/owl/run_ollama.py b/owl/examples/run_ollama.py
similarity index 100%
rename from owl/run_ollama.py
rename to owl/examples/run_ollama.py
diff --git a/owl/run_openai_compatiable_model.py b/owl/examples/run_openai_compatiable_model.py
similarity index 100%
rename from owl/run_openai_compatiable_model.py
rename to owl/examples/run_openai_compatiable_model.py
diff --git a/owl/run_qwen_mini_zh.py b/owl/examples/run_qwen_mini_zh.py
similarity index 100%
rename from owl/run_qwen_mini_zh.py
rename to owl/examples/run_qwen_mini_zh.py
diff --git a/owl/run_qwen_zh.py b/owl/examples/run_qwen_zh.py
similarity index 100%
rename from owl/run_qwen_zh.py
rename to owl/examples/run_qwen_zh.py
diff --git a/owl/run_terminal.py b/owl/examples/run_terminal.py
similarity index 100%
rename from owl/run_terminal.py
rename to owl/examples/run_terminal.py
diff --git a/owl/run_terminal_zh.py b/owl/examples/run_terminal_zh.py
similarity index 100%
rename from owl/run_terminal_zh.py
rename to owl/examples/run_terminal_zh.py
diff --git a/owl/script_adapter.py b/owl/script_adapter.py
deleted file mode 100644
index fff8ddb..0000000
--- a/owl/script_adapter.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-import os
-import sys
-import importlib.util
-import re
-from pathlib import Path
-import traceback
-
-
-def load_module_from_path(module_name, file_path):
- """从文件路径加载Python模块"""
- try:
- spec = importlib.util.spec_from_file_location(module_name, file_path)
- if spec is None:
- print(f"错误: 无法从 {file_path} 创建模块规范")
- return None
-
- module = importlib.util.module_from_spec(spec)
- sys.modules[module_name] = module
- spec.loader.exec_module(module)
- return module
- except Exception as e:
- print(f"加载模块时出错: {e}")
- traceback.print_exc()
- return None
-
-
-def run_script_with_env_question(script_name):
- """使用环境变量中的问题运行脚本"""
- # 获取环境变量中的问题
- question = os.environ.get("OWL_QUESTION")
- if not question:
- print("错误: 未设置OWL_QUESTION环境变量")
- sys.exit(1)
-
- # 脚本路径
- script_path = Path(script_name).resolve()
- if not script_path.exists():
- print(f"错误: 脚本 {script_path} 不存在")
- sys.exit(1)
-
- # 创建临时文件路径
- temp_script_path = script_path.with_name(f"temp_{script_path.name}")
-
- try:
- # 读取脚本内容
- try:
- with open(script_path, "r", encoding="utf-8") as f:
- content = f.read()
- except Exception as e:
- print(f"读取脚本文件时出错: {e}")
- sys.exit(1)
-
- # 检查脚本是否有main函数
- has_main = re.search(r"def\s+main\s*\(\s*\)\s*:", content) is not None
-
- # 转义问题中的特殊字符
- escaped_question = (
- question.replace("\\", "\\\\")
- .replace('"', '\\"')
- .replace("'", "\\'")
- .replace("\n", "\\n") # 转义换行符
- .replace("\r", "\\r") # 转义回车符
- )
-
- # 查找脚本中所有的question赋值 - 改进的正则表达式
- # 匹配单行和多行字符串赋值
- question_assignments = re.findall(
- r'question\s*=\s*(?:["\'].*?["\']|""".*?"""|\'\'\'.*?\'\'\'|\(.*?\))',
- content,
- re.DOTALL,
- )
- print(f"在脚本中找到 {len(question_assignments)} 个question赋值")
-
- # 修改脚本内容,替换所有的question赋值
- modified_content = content
-
- # 如果脚本中有question赋值,替换所有的赋值
- if question_assignments:
- for assignment in question_assignments:
- modified_content = modified_content.replace(
- assignment, f'question = "{escaped_question}"'
- )
- print(f"已替换脚本中的所有question赋值为: {question}")
- else:
- # 如果没有找到question赋值,尝试在main函数前插入
- if has_main:
- main_match = re.search(r"def\s+main\s*\(\s*\)\s*:", content)
- if main_match:
- insert_pos = main_match.start()
- modified_content = (
- content[:insert_pos]
- + f'\n# 用户输入的问题\nquestion = "{escaped_question}"\n\n'
- + content[insert_pos:]
- )
- print(f"已在main函数前插入问题: {question}")
- else:
- # 如果没有main函数,在文件开头插入
- modified_content = (
- f'# 用户输入的问题\nquestion = "{escaped_question}"\n\n' + content
- )
- print(f"已在文件开头插入问题: {question}")
-
- # 添加monkey patch代码,确保construct_society函数使用用户的问题
- monkey_patch_code = f"""
-# 确保construct_society函数使用用户的问题
-original_construct_society = globals().get('construct_society')
-if original_construct_society:
- def patched_construct_society(*args, **kwargs):
- # 忽略传入的参数,始终使用用户的问题
- return original_construct_society("{escaped_question}")
-
- # 替换原始函数
- globals()['construct_society'] = patched_construct_society
- print("已修补construct_society函数,确保使用用户问题")
-"""
-
- # 在文件末尾添加monkey patch代码
- modified_content += monkey_patch_code
-
- # 如果脚本没有调用main函数,添加调用代码
- if has_main and "__main__" not in content:
- modified_content += """
-
-# 确保调用main函数
-if __name__ == "__main__":
- main()
-"""
- print("已添加main函数调用代码")
-
- # 如果脚本没有construct_society调用,添加调用代码
- if (
- "construct_society" in content
- and "run_society" in content
- and "Answer:" not in content
- ):
- modified_content += f"""
-
-# 确保执行construct_society和run_society
-if "construct_society" in globals() and "run_society" in globals():
- try:
- society = construct_society("{escaped_question}")
- from utils import run_society
- answer, chat_history, token_count = run_society(society)
- print(f"Answer: {{answer}}")
- except Exception as e:
- print(f"运行时出错: {{e}}")
- import traceback
- traceback.print_exc()
-"""
- print("已添加construct_society和run_society调用代码")
-
- # 执行修改后的脚本
- try:
- # 将脚本目录添加到sys.path
- script_dir = script_path.parent
- if str(script_dir) not in sys.path:
- sys.path.insert(0, str(script_dir))
-
- # 创建临时文件
- try:
- with open(temp_script_path, "w", encoding="utf-8") as f:
- f.write(modified_content)
- print(f"已创建临时脚本文件: {temp_script_path}")
- except Exception as e:
- print(f"创建临时脚本文件时出错: {e}")
- sys.exit(1)
-
- try:
- # 直接执行临时脚本
- print("开始执行脚本...")
-
- # 如果有main函数,加载模块并调用main
- if has_main:
- # 加载临时模块
- module_name = f"temp_{script_path.stem}"
- module = load_module_from_path(module_name, temp_script_path)
-
- if module is None:
- print(f"错误: 无法加载模块 {module_name}")
- sys.exit(1)
-
- # 确保模块中有question变量,并且值是用户输入的问题
- setattr(module, "question", question)
-
- # 如果模块中有construct_society函数,修补它
- if hasattr(module, "construct_society"):
- original_func = module.construct_society
-
- def patched_func(*args, **kwargs):
- return original_func(question)
-
- module.construct_society = patched_func
- print("已在模块级别修补construct_society函数")
-
- # 调用main函数
- if hasattr(module, "main"):
- print("调用main函数...")
- module.main()
- else:
- print(f"错误: 脚本 {script_path} 中没有main函数")
- sys.exit(1)
- else:
- # 如果没有main函数,直接执行修改后的脚本
- print("直接执行脚本内容...")
- # 使用更安全的方式执行脚本
- with open(temp_script_path, "r", encoding="utf-8") as f:
- script_code = f.read()
-
- # 创建一个安全的全局命名空间
- safe_globals = {
- "__file__": str(temp_script_path),
- "__name__": "__main__",
- }
- # 添加内置函数
- safe_globals.update(
- {k: v for k, v in globals().items() if k in ["__builtins__"]}
- )
-
- # 执行脚本
- exec(script_code, safe_globals)
-
- except Exception as e:
- print(f"执行脚本时出错: {e}")
- traceback.print_exc()
- sys.exit(1)
-
- except Exception as e:
- print(f"处理脚本时出错: {e}")
- traceback.print_exc()
- sys.exit(1)
-
- except Exception as e:
- print(f"处理脚本时出错: {e}")
- traceback.print_exc()
- sys.exit(1)
-
- finally:
- # 删除临时文件
- if temp_script_path.exists():
- try:
- temp_script_path.unlink()
- print(f"已删除临时脚本文件: {temp_script_path}")
- except Exception as e:
- print(f"删除临时脚本文件时出错: {e}")
-
-
-if __name__ == "__main__":
- # 检查命令行参数
- if len(sys.argv) < 2:
- print("用法: python script_adapter.py ")
- sys.exit(1)
-
- # 运行指定的脚本
- run_script_with_env_question(sys.argv[1])
diff --git a/owl/webapp.py b/owl/webapp.py
new file mode 100644
index 0000000..3187e7c
--- /dev/null
+++ b/owl/webapp.py
@@ -0,0 +1,400 @@
+# Import from the correct module path
+from owl.utils import run_society
+import os
+import gradio as gr
+import time
+import json
+from typing import Tuple, List, Dict, Any
+import importlib
+
+# Enhanced CSS with navigation bar and additional styling
+custom_css = """
+:root {
+ --primary-color: #1e3c72;
+ --secondary-color: #2a5298;
+ --accent-color: #4776E6;
+ --light-bg: #f8f9fa;
+ --border-color: #dee2e6;
+ --text-muted: #6c757d;
+}
+
+.container {
+ max-width: 1200px;
+ margin: 0 auto;
+}
+
+.navbar {
+ display: flex;
+ justify-content: space-between;
+ align-items: center;
+ padding: 15px 30px;
+ background: linear-gradient(90deg, var(--primary-color), var(--secondary-color));
+ color: white;
+ border-radius: 10px 10px 0 0;
+ margin-bottom: 0;
+}
+
+.navbar-logo {
+ display: flex;
+ align-items: center;
+ gap: 10px;
+ font-size: 1.5em;
+ font-weight: bold;
+}
+
+.navbar-menu {
+ display: flex;
+ gap: 20px;
+}
+
+.navbar-menu a {
+ color: white;
+ text-decoration: none;
+ padding: 5px 10px;
+ border-radius: 5px;
+ transition: background-color 0.3s;
+}
+
+.navbar-menu a:hover {
+ background-color: rgba(255, 255, 255, 0.1);
+}
+
+.header {
+ text-align: center;
+ margin-bottom: 20px;
+ background: linear-gradient(180deg, var(--secondary-color), var(--accent-color));
+ color: white;
+ padding: 40px 20px;
+ border-radius: 0 0 10px 10px;
+ box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
+}
+
+.module-info {
+ background-color: var(--light-bg);
+ border-left: 5px solid var(--primary-color);
+ padding: 10px 15px;
+ margin-top: 10px;
+ border-radius: 5px;
+ font-size: 0.9em;
+}
+
+.answer-box {
+ background-color: var(--light-bg);
+ border-left: 5px solid var(--secondary-color);
+ padding: 15px;
+ margin-bottom: 20px;
+ border-radius: 5px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
+}
+
+.token-count {
+ background-color: #e9ecef;
+ padding: 10px;
+ border-radius: 5px;
+ text-align: center;
+ font-weight: bold;
+ margin-bottom: 20px;
+}
+
+.chat-container {
+ border: 1px solid var(--border-color);
+ border-radius: 5px;
+ max-height: 500px;
+ overflow-y: auto;
+ margin-bottom: 20px;
+}
+
+.footer {
+ text-align: center;
+ margin-top: 20px;
+ color: var(--text-muted);
+ font-size: 0.9em;
+ padding: 20px;
+ border-top: 1px solid var(--border-color);
+}
+
+.features-section {
+ display: grid;
+ grid-template-columns: repeat(auto-fit, minmax(300px, 1fr));
+ gap: 20px;
+ margin: 20px 0;
+}
+
+.feature-card {
+ background-color: white;
+ border-radius: 8px;
+ padding: 20px;
+ box-shadow: 0 2px 4px rgba(0, 0, 0, 0.05);
+ transition: transform 0.3s, box-shadow 0.3s;
+}
+
+.feature-card:hover {
+ transform: translateY(-5px);
+ box-shadow: 0 5px 15px rgba(0, 0, 0, 0.1);
+}
+
+.feature-icon {
+ font-size: 2em;
+ color: var(--primary-color);
+ margin-bottom: 10px;
+}
+
+/* Improved button and input styles */
+button.primary {
+ background: linear-gradient(90deg, var(--primary-color), var(--secondary-color));
+ transition: all 0.3s;
+}
+
+button.primary:hover {
+ background: linear-gradient(90deg, var(--secondary-color), var(--primary-color));
+ transform: translateY(-2px);
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.15);
+}
+"""
+
+# Dictionary containing module descriptions
+MODULE_DESCRIPTIONS = {
+ "run": "默认模式:使用默认的智能体协作模式,适合大多数任务。",
+ "run_mini":"使用最小化配置处理任务",
+ "run_deepseek_zh":"使用deepseek模型处理中文任务",
+ "run_terminal_zh": "终端模式:可执行命令行操作,支持网络搜索、文件处理等功能。适合需要系统交互的任务。",
+ "run_mini": "精简模式:轻量级智能体协作,适合快速回答和简单任务处理,响应速度更快。",
+ "run_gaia_roleplaying":"GAIA基准测试实现,用于评估模型能力",
+ "run_openai_compatiable_model":"使用openai兼容模型处理任务",
+ "run_ollama":"使用本地ollama模型处理任务",
+ "run_qwen_mini_zh":"使用qwen模型处理中文任务",
+ "run_qwen_zh":"使用qwen模型处理任务",
+
+
+
+}
+
+def format_chat_history(chat_history: List[Dict[str, str]]) -> List[List[str]]:
+ """将聊天历史格式化为Gradio聊天组件可接受的格式
+
+ Args:
+ chat_history: 原始聊天历史
+
+ Returns:
+ List[List[str]]: 格式化后的聊天历史
+ """
+ formatted_history = []
+ for message in chat_history:
+ user_msg = message.get("user", "")
+ assistant_msg = message.get("assistant", "")
+
+ if user_msg:
+ formatted_history.append([user_msg, None])
+ if assistant_msg and formatted_history:
+ formatted_history[-1][1] = assistant_msg
+ elif assistant_msg:
+ formatted_history.append([None, assistant_msg])
+
+ return formatted_history
+
+def run_owl(question: str, example_module: str) -> Tuple[str, List[List[str]], str, str]:
+ """运行OWL系统并返回结果
+
+ Args:
+ question: 用户问题
+ example_module: 要导入的示例模块名(如 "run_terminal_zh" 或 "run_deep")
+
+ Returns:
+ Tuple[...]: 回答、聊天历史、令牌计数、状态
+ """
+ try:
+ # 动态导入目标模块
+ module_path = f"owl.examples.{example_module}"
+ module = importlib.import_module(module_path)
+
+ # 检查是否包含construct_society函数
+ if not hasattr(module, "construct_society"):
+ raise AttributeError(f"模块 {module_path} 中未找到 construct_society 函数")
+
+ # 构建社会模拟
+ society = module.construct_society(question)
+
+ # 运行社会模拟(假设run_society兼容不同模块)
+ answer, chat_history, token_info = run_society(society)
+
+ # 格式化和令牌计数(与原逻辑一致)
+ formatted_chat_history = format_chat_history(chat_history)
+ total_tokens = token_info["completion_token_count"] + token_info["prompt_token_count"]
+
+ return (
+ answer,
+ formatted_chat_history,
+ f"完成令牌: {token_info['completion_token_count']:,} | 提示令牌: {token_info['prompt_token_count']:,} | 总计: {total_tokens:,}",
+ "✅ 成功完成"
+ )
+
+ except Exception as e:
+ return (
+ f"发生错误: {str(e)}",
+ [],
+ "0",
+ f"❌ 错误: {str(e)}"
+ )
+
+def update_module_description(module_name: str) -> str:
+ """返回所选模块的描述"""
+ return MODULE_DESCRIPTIONS.get(module_name, "无可用描述")
+
+def create_ui():
+ """创建增强版Gradio界面"""
+ with gr.Blocks(css=custom_css, theme=gr.themes.Soft(primary_hue="blue")) as app:
+ with gr.Column(elem_classes="container"):
+ gr.HTML("""
+
+
+ """)
+
+ with gr.Row(elem_id="features"):
+ gr.HTML("""
+
+
+
🔍
+
智能信息获取
+
自动化网络搜索和数据收集,提供精准信息
+
+
+
🤖
+
多智能体协作
+
多个专家智能体协同工作,解决复杂问题
+
+
+
📊
+
数据分析与可视化
+
强大的数据分析能力,生成直观的可视化结果
+
+
+ """)
+
+ with gr.Row():
+ with gr.Column(scale=2):
+ question_input = gr.Textbox(
+ lines=5,
+ placeholder="请输入您的问题...",
+ label="问题",
+ elem_id="question_input",
+ show_copy_button=True,
+ )
+
+ # 增强版模块选择下拉菜单
+ module_dropdown = gr.Dropdown(
+ choices=["run", "run_mini","run_terminal_zh","run_gaia_roleplaying",
+ "run_openai_compatiable_model","run_ollama","run_qwen_zh","run_qwen_mini_zh","run_deepseek_zh","run_terminal"],
+ value="run_terminal_zh",
+ label="选择功能模块",
+ interactive=True
+ )
+
+ # 模块描述文本框
+ module_description = gr.Textbox(
+ value=MODULE_DESCRIPTIONS["run_terminal_zh"],
+ label="模块描述",
+ interactive=False,
+ elem_classes="module-info"
+ )
+
+ run_button = gr.Button("运行", variant="primary", elem_classes="primary")
+
+ with gr.Column(scale=1):
+ gr.Markdown("""
+ ### 使用指南
+
+ 1. **选择适合的模块**:根据您的任务需求选择合适的功能模块
+ 2. **详细描述您的需求**:在输入框中清晰描述您的问题或任务
+ 3. **启动智能处理**:点击"运行"按钮开始多智能体协作处理
+ 4. **查看结果**:在下方标签页查看回答和完整对话历史
+
+ > **高级提示**: 对于复杂任务,可以尝试指定具体步骤和预期结果
+ """)
+
+ status_output = gr.Textbox(label="状态", interactive=False)
+
+ with gr.Tabs():
+ with gr.TabItem("回答"):
+ answer_output = gr.Textbox(
+ label="回答",
+ lines=10,
+ elem_classes="answer-box"
+ )
+
+ with gr.TabItem("对话历史"):
+ chat_output = gr.Chatbot(
+ label="完整对话记录",
+ elem_classes="chat-container",
+ height=500
+ )
+
+
+
+ token_count_output = gr.Textbox(
+ label="令牌计数",
+ interactive=False,
+ elem_classes="token-count"
+ )
+
+ # 示例问题
+ examples = [
+ "打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到本地,用本地终端执行python文件显示图出来给我",
+ "请分析GitHub上CAMEL-AI项目的最新统计数据。找出该项目的星标数量、贡献者数量和最近的活跃度。",
+ "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格",
+ "写一个hello world的python文件,保存到本地",
+
+ ]
+
+ gr.Examples(
+ examples=examples,
+ inputs=question_input
+ )
+
+ gr.HTML("""
+
+ """)
+
+ # 设置事件处理
+ run_button.click(
+ fn=run_owl,
+ inputs=[question_input, module_dropdown],
+ outputs=[answer_output, chat_output, token_count_output, status_output]
+ )
+
+ # 模块选择更新描述
+ module_dropdown.change(
+ fn=update_module_description,
+ inputs=module_dropdown,
+ outputs=module_description
+ )
+
+ return app
+
+# 主函数
+def main():
+ app = create_ui()
+ app.launch(share=False)
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/run_app.py b/run_app.py
deleted file mode 100644
index ccea485..0000000
--- a/run_app.py
+++ /dev/null
@@ -1,62 +0,0 @@
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-OWL Intelligent Assistant Platform Launch Script
-"""
-
-import os
-import sys
-from pathlib import Path
-
-os.environ['PYTHONIOENCODING'] = 'utf-8'
-
-def main():
- """Main function to launch the OWL Intelligent Assistant Platform"""
- # Ensure the current directory is the project root
- project_root = Path(__file__).resolve().parent
- os.chdir(project_root)
-
- # Create log directory
- log_dir = project_root / "logs"
- log_dir.mkdir(exist_ok=True)
-
- # Add project root to Python path
- sys.path.insert(0, str(project_root))
-
- try:
- from owl.app_en import create_ui
-
- # Create and launch the application
- app = create_ui()
- app.queue().launch(share=False)
-
- except ImportError as e:
- print(
- f"Error: Unable to import necessary modules. Please ensure all dependencies are installed: {e}"
- )
- print("Tip: Run 'pip install -r requirements.txt' to install all dependencies")
- sys.exit(1)
- except Exception as e:
- print(f"Error occurred while starting the application: {e}")
- import traceback
-
- traceback.print_exc()
- sys.exit(1)
-
-
-if __name__ == "__main__":
- main()
diff --git a/run_app_zh.py b/run_app_zh.py
deleted file mode 100644
index 0ec4e7b..0000000
--- a/run_app_zh.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
-#!/usr/bin/env python
-# -*- coding: utf-8 -*-
-
-"""
-OWL 智能助手运行平台启动脚本
-"""
-
-import os
-import sys
-from pathlib import Path
-
-os.environ['PYTHONIOENCODING'] = 'utf-8'
-
-def main():
- """主函数,启动OWL智能助手运行平台"""
- # 确保当前目录是项目根目录
- project_root = Path(__file__).resolve().parent
- os.chdir(project_root)
-
- # 创建日志目录
- log_dir = project_root / "logs"
- log_dir.mkdir(exist_ok=True)
-
- # 导入并运行应用
- sys.path.insert(0, str(project_root))
-
- try:
- from owl.app import create_ui
-
- # 创建并启动应用
- app = create_ui()
- app.queue().launch(share=False)
-
- except ImportError as e:
- print(f"错误: 无法导入必要的模块。请确保已安装所有依赖项: {e}")
- print("提示: 运行 'pip install -r requirements.txt' 安装所有依赖项")
- sys.exit(1)
- except Exception as e:
- print(f"启动应用程序时出错: {e}")
- import traceback
-
- traceback.print_exc()
- sys.exit(1)
-
-
-if __name__ == "__main__":
- main()
From 6d3b024e78dce4cf4db5e1f91c4e45f537391389 Mon Sep 17 00:00:00 2001
From: "yifeng.wang" <3038880699@qq.com>
Date: Thu, 13 Mar 2025 12:27:57 +0800
Subject: [PATCH 02/38] add webdemo_zh and update readme
---
README.md | 4 ++--
owl/{webapp.py => webapp_zh.py} | 0
2 files changed, 2 insertions(+), 2 deletions(-)
rename owl/{webapp.py => webapp_zh.py} (100%)
diff --git a/README.md b/README.md
index 687f03f..23e8a28 100644
--- a/README.md
+++ b/README.md
@@ -422,10 +422,10 @@ OWL includes an intuitive web-based user interface that makes it easier to inter
```bash
# Start the Chinese version
-python run_app_zh.py
+python owl/webapp_zh.py
# Start the English version
-python run_app.py
+python owl/webapp.py
```
## Features
diff --git a/owl/webapp.py b/owl/webapp_zh.py
similarity index 100%
rename from owl/webapp.py
rename to owl/webapp_zh.py
From fd2617ca73022480718bedf0ff9b6f2d6881254a Mon Sep 17 00:00:00 2001
From: "yifeng.wang" <3038880699@qq.com>
Date: Thu, 13 Mar 2025 12:29:40 +0800
Subject: [PATCH 03/38] update readme_zh
---
README_zh.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README_zh.md b/README_zh.md
index ac40fed..fb19750 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -414,10 +414,10 @@ OWL 现在包含一个基于网页的用户界面,使与系统交互变得更
```bash
# 中文版本
-python run_app_zh.py
+python owl/webapp_zh.py
# 英文版本
-python run_app.py
+python owl/webapp.py
```
网页界面提供以下功能:
From e31c39f9d880e0c55beac5bb84b04be9f568711a Mon Sep 17 00:00:00 2001
From: "yifeng.wang" <3038880699@qq.com>
Date: Thu, 13 Mar 2025 12:39:08 +0800
Subject: [PATCH 04/38] update run readme and moudle import
---
README.md | 10 +++++-----
README_zh.md | 8 ++++----
owl/examples/run.py | 2 +-
owl/examples/run_deepseek_zh.py | 2 +-
owl/examples/run_gaia_roleplaying.py | 2 +-
owl/examples/run_mini.py | 2 +-
owl/examples/run_ollama.py | 2 +-
owl/examples/run_openai_compatiable_model.py | 2 +-
owl/examples/run_qwen_mini_zh.py | 2 +-
owl/examples/run_qwen_zh.py | 2 +-
owl/examples/run_terminal.py | 2 +-
owl/examples/run_terminal_zh.py | 2 +-
12 files changed, 19 insertions(+), 19 deletions(-)
diff --git a/README.md b/README.md
index 23e8a28..5928184 100644
--- a/README.md
+++ b/README.md
@@ -299,22 +299,22 @@ OWL supports various LLM backends, though capabilities may vary depending on the
```bash
# Run with Qwen model
-python owl/run_qwen_zh.py
+python owl/examples/run_qwen_zh.py
# Run with Deepseek model
-python owl/run_deepseek_zh.py
+python owl/examples/run_deepseek_zh.py
# Run with other OpenAI-compatible models
-python owl/run_openai_compatiable_model.py
+python owl/examples/run_openai_compatiable_model.py
# Run with Ollama
-python owl/run_ollama.py
+python owl/examples/run_ollama.py
```
For a simpler version that only requires an LLM API key, you can try our minimal example:
```bash
-python owl/run_mini.py
+python owl/examples/run_mini.py
```
You can run OWL agent with your own task by modifying the `run.py` script:
diff --git a/README_zh.md b/README_zh.md
index fb19750..3885736 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -303,16 +303,16 @@ OWL 支持多种 LLM 后端,但功能可能因模型的工具调用和多模
```bash
# 使用 Qwen 模型运行
-python owl/run_qwen_zh.py
+python owl/examples/run_qwen_zh.py
# 使用 Deepseek 模型运行
-python owl/run_deepseek_zh.py
+python owl/examples/run_deepseek_zh.py
# 使用其他 OpenAI 兼容模型运行
-python owl/run_openai_compatiable_model.py
+python owl/examples/run_openai_compatiable_model.py
# 使用 Ollama 运行
-python owl/run_ollama.py
+python owl/examples/run_ollama.py
```
你可以通过修改 `run.py` 脚本来运行自己的任务:
diff --git a/owl/examples/run.py b/owl/examples/run.py
index 2d562ea..9f1cb32 100644
--- a/owl/examples/run.py
+++ b/owl/examples/run.py
@@ -26,7 +26,7 @@ from camel.toolkits import (
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
-from utils import OwlRolePlaying, run_society, DocumentProcessingToolkit
+from owl.utils import OwlRolePlaying, run_society, DocumentProcessingToolkit
load_dotenv()
diff --git a/owl/examples/run_deepseek_zh.py b/owl/examples/run_deepseek_zh.py
index 52b4c34..f109722 100644
--- a/owl/examples/run_deepseek_zh.py
+++ b/owl/examples/run_deepseek_zh.py
@@ -31,7 +31,7 @@ from camel.toolkits import (
from camel.types import ModelPlatformType, ModelType
-from utils import OwlRolePlaying, run_society, DocumentProcessingToolkit
+from owl.utils import OwlRolePlaying, run_society, DocumentProcessingToolkit
from camel.logger import set_log_level
diff --git a/owl/examples/run_gaia_roleplaying.py b/owl/examples/run_gaia_roleplaying.py
index ff4b90b..d08c669 100644
--- a/owl/examples/run_gaia_roleplaying.py
+++ b/owl/examples/run_gaia_roleplaying.py
@@ -32,7 +32,7 @@ from camel.toolkits import (
from camel.types import ModelPlatformType, ModelType
from camel.configs import ChatGPTConfig
-from utils import GAIABenchmark
+from owl.utils import GAIABenchmark
from camel.logger import set_log_level
set_log_level(level="DEBUG")
diff --git a/owl/examples/run_mini.py b/owl/examples/run_mini.py
index a20849e..59fac6e 100644
--- a/owl/examples/run_mini.py
+++ b/owl/examples/run_mini.py
@@ -22,7 +22,7 @@ from camel.toolkits import (
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
-from utils import OwlRolePlaying, run_society
+from owl.utils import OwlRolePlaying, run_society
load_dotenv()
set_log_level(level="DEBUG")
diff --git a/owl/examples/run_ollama.py b/owl/examples/run_ollama.py
index 55f1b8a..8da8a64 100644
--- a/owl/examples/run_ollama.py
+++ b/owl/examples/run_ollama.py
@@ -25,7 +25,7 @@ from camel.toolkits import (
)
from camel.types import ModelPlatformType
-from utils import OwlRolePlaying, run_society
+from owl.utils import OwlRolePlaying, run_society
from camel.logger import set_log_level
diff --git a/owl/examples/run_openai_compatiable_model.py b/owl/examples/run_openai_compatiable_model.py
index fd271ce..029059b 100644
--- a/owl/examples/run_openai_compatiable_model.py
+++ b/owl/examples/run_openai_compatiable_model.py
@@ -25,7 +25,7 @@ from camel.toolkits import (
)
from camel.types import ModelPlatformType
-from utils import OwlRolePlaying, run_society
+from owl.utils import OwlRolePlaying, run_society
from camel.logger import set_log_level
diff --git a/owl/examples/run_qwen_mini_zh.py b/owl/examples/run_qwen_mini_zh.py
index d602315..f6782ee 100644
--- a/owl/examples/run_qwen_mini_zh.py
+++ b/owl/examples/run_qwen_mini_zh.py
@@ -22,7 +22,7 @@ from camel.models import ModelFactory
from camel.toolkits import BrowserToolkit, SearchToolkit, FileWriteToolkit
from camel.types import ModelPlatformType, ModelType
-from utils import OwlRolePlaying, run_society
+from owl.utils import OwlRolePlaying, run_society
from camel.logger import set_log_level
diff --git a/owl/examples/run_qwen_zh.py b/owl/examples/run_qwen_zh.py
index dceb851..e7c36df 100644
--- a/owl/examples/run_qwen_zh.py
+++ b/owl/examples/run_qwen_zh.py
@@ -29,7 +29,7 @@ from camel.toolkits import (
)
from camel.types import ModelPlatformType, ModelType
-from utils import OwlRolePlaying, run_society, DocumentProcessingToolkit
+from owl.utils import OwlRolePlaying, run_society, DocumentProcessingToolkit
from camel.logger import set_log_level
diff --git a/owl/examples/run_terminal.py b/owl/examples/run_terminal.py
index 3741a7e..576f29c 100644
--- a/owl/examples/run_terminal.py
+++ b/owl/examples/run_terminal.py
@@ -23,7 +23,7 @@ from camel.toolkits import (
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
-from utils import OwlRolePlaying, run_society
+from owl.utils import OwlRolePlaying, run_society
load_dotenv()
set_log_level(level="DEBUG")
diff --git a/owl/examples/run_terminal_zh.py b/owl/examples/run_terminal_zh.py
index 2582c24..946a0ed 100644
--- a/owl/examples/run_terminal_zh.py
+++ b/owl/examples/run_terminal_zh.py
@@ -23,7 +23,7 @@ from camel.toolkits import (
from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
-from utils import OwlRolePlaying, run_society
+from owl.utils import OwlRolePlaying, run_society
load_dotenv()
set_log_level(level="DEBUG")
From 0b0fc0bc56b14349587836c2f61ac44b0f7ef237 Mon Sep 17 00:00:00 2001
From: "yifeng.wang" <3038880699@qq.com>
Date: Thu, 13 Mar 2025 12:46:04 +0800
Subject: [PATCH 05/38] update webapp navigate url and text
---
owl/webapp_zh.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/owl/webapp_zh.py b/owl/webapp_zh.py
index 3187e7c..4b6595e 100644
--- a/owl/webapp_zh.py
+++ b/owl/webapp_zh.py
@@ -251,10 +251,10 @@ def create_ui():
""")
-
+
with gr.Row():
with gr.Column(scale=2):
question_input = gr.Textbox(
@@ -620,26 +630,28 @@ def create_ui():
elem_id="question_input",
show_copy_button=True,
)
-
+
# 增强版模块选择下拉菜单
# 只包含MODULE_DESCRIPTIONS中定义的模块
module_dropdown = gr.Dropdown(
choices=list(MODULE_DESCRIPTIONS.keys()),
value="run_terminal_zh",
label="选择功能模块",
- interactive=True
+ interactive=True,
)
-
+
# 模块描述文本框
module_description = gr.Textbox(
value=MODULE_DESCRIPTIONS["run_terminal_zh"],
label="模块描述",
interactive=False,
- elem_classes="module-info"
+ elem_classes="module-info",
)
-
- run_button = gr.Button("运行", variant="primary", elem_classes="primary")
-
+
+ run_button = gr.Button(
+ "运行", variant="primary", elem_classes="primary"
+ )
+
with gr.Column(scale=1):
gr.Markdown("""
### 使用指南
@@ -651,127 +663,103 @@ def create_ui():
> **高级提示**: 对于复杂任务,可以尝试指定具体步骤和预期结果
""")
-
+
status_output = gr.Textbox(label="状态", interactive=False)
-
+
with gr.Tabs():
with gr.TabItem("回答"):
answer_output = gr.Textbox(
- label="回答",
- lines=10,
- elem_classes="answer-box"
+ label="回答", lines=10, elem_classes="answer-box"
)
-
+
with gr.TabItem("对话历史"):
chat_output = gr.Chatbot(
- label="完整对话记录",
- elem_classes="chat-container",
- height=500
+ label="完整对话记录", elem_classes="chat-container", height=500
)
-
-
-
+
token_count_output = gr.Textbox(
- label="令牌计数",
- interactive=False,
- elem_classes="token-count"
+ label="令牌计数", interactive=False, elem_classes="token-count"
)
-
+
# 示例问题
examples = [
"打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到本地,用本地终端执行python文件显示图出来给我",
"请分析GitHub上CAMEL-AI项目的最新统计数据。找出该项目的星标数量、贡献者数量和最近的活跃度。",
"浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格",
"写一个hello world的python文件,保存到本地",
-
]
-
- gr.Examples(
- examples=examples,
- inputs=question_input
- )
+
+ gr.Examples(examples=examples, inputs=question_input)
# 新增: 环境变量管理选项卡
with gr.TabItem("环境变量管理", id="env-settings"):
- gr.Markdown("""
+ gr.Markdown("""
## 环境变量管理
在此处设置模型API密钥和其他服务凭证。这些信息将保存在本地的`.env`文件中,确保您的API密钥安全存储且不会上传到网络。
""")
-
- # 环境变量表格
- env_table = gr.Dataframe(
- headers=["变量名", "值"],
- datatype=["str", "str"],
- row_count=10,
- col_count=(2, "fixed"),
- value=update_env_table,
- label="当前环境变量",
- interactive=False
- )
-
- with gr.Row():
- with gr.Column(scale=1):
- new_env_key = gr.Textbox(label="变量名", placeholder="例如: OPENAI_API_KEY")
- with gr.Column(scale=2):
- new_env_value = gr.Textbox(label="值", placeholder="输入API密钥或其他配置值")
-
- with gr.Row():
- add_env_button = gr.Button("添加/更新变量", variant="primary")
- refresh_button = gr.Button("刷新变量列表")
- delete_env_button = gr.Button("删除选定变量", variant="stop")
-
- env_status = gr.Textbox(label="状态", interactive=False)
-
- # 变量选择器(用于删除)
- env_var_to_delete = gr.Dropdown(
- choices=[],
- label="选择要删除的变量",
- interactive=True
- )
-
- # 更新变量选择器的选项
- def update_delete_dropdown():
- env_vars = load_env_vars()
- return gr.Dropdown.update(choices=list(env_vars.keys()))
-
- # 连接事件处理函数
- add_env_button.click(
- fn=lambda k, v: add_env_var(k, v),
- inputs=[new_env_key, new_env_value],
- outputs=[env_status]
- ).then(
- fn=update_env_table,
- outputs=[env_table]
- ).then(
- fn=update_delete_dropdown,
- outputs=[env_var_to_delete]
- ).then(
- fn=lambda: ("", ""), # 修改为返回两个空字符串的元组
- outputs=[new_env_key, new_env_value]
- )
-
- refresh_button.click(
- fn=update_env_table,
- outputs=[env_table]
- ).then(
- fn=update_delete_dropdown,
- outputs=[env_var_to_delete]
- )
-
- delete_env_button.click(
- fn=lambda k: delete_env_var(k),
- inputs=[env_var_to_delete],
- outputs=[env_status]
- ).then(
- fn=update_env_table,
- outputs=[env_table]
- ).then(
- fn=update_delete_dropdown,
- outputs=[env_var_to_delete]
- )
+ # 环境变量表格
+ env_table = gr.Dataframe(
+ headers=["变量名", "值"],
+ datatype=["str", "str"],
+ row_count=10,
+ col_count=(2, "fixed"),
+ value=update_env_table,
+ label="当前环境变量",
+ interactive=False,
+ )
+
+ with gr.Row():
+ with gr.Column(scale=1):
+ new_env_key = gr.Textbox(
+ label="变量名", placeholder="例如: OPENAI_API_KEY"
+ )
+ with gr.Column(scale=2):
+ new_env_value = gr.Textbox(
+ label="值", placeholder="输入API密钥或其他配置值"
+ )
+
+ with gr.Row():
+ add_env_button = gr.Button("添加/更新变量", variant="primary")
+ refresh_button = gr.Button("刷新变量列表")
+ delete_env_button = gr.Button("删除选定变量", variant="stop")
+
+ env_status = gr.Textbox(label="状态", interactive=False)
+
+ # 变量选择器(用于删除)
+ env_var_to_delete = gr.Dropdown(
+ choices=[], label="选择要删除的变量", interactive=True
+ )
+
+ # 更新变量选择器的选项
+ def update_delete_dropdown():
+ env_vars = load_env_vars()
+ return gr.Dropdown.update(choices=list(env_vars.keys()))
+
+ # 连接事件处理函数
+ add_env_button.click(
+ fn=lambda k, v: add_env_var(k, v),
+ inputs=[new_env_key, new_env_value],
+ outputs=[env_status],
+ ).then(fn=update_env_table, outputs=[env_table]).then(
+ fn=update_delete_dropdown, outputs=[env_var_to_delete]
+ ).then(
+ fn=lambda: ("", ""), # 修改为返回两个空字符串的元组
+ outputs=[new_env_key, new_env_value],
+ )
+
+ refresh_button.click(fn=update_env_table, outputs=[env_table]).then(
+ fn=update_delete_dropdown, outputs=[env_var_to_delete]
+ )
+
+ delete_env_button.click(
+ fn=lambda k: delete_env_var(k),
+ inputs=[env_var_to_delete],
+ outputs=[env_status],
+ ).then(fn=update_env_table, outputs=[env_table]).then(
+ fn=update_delete_dropdown, outputs=[env_var_to_delete]
+ )
-
gr.HTML("""
""")
-
+
# 设置事件处理
run_button.click(
fn=run_owl,
- inputs=[question_input, module_dropdown],
- outputs=[answer_output, chat_output, token_count_output, status_output]
+ inputs=[question_input, module_dropdown],
+ outputs=[answer_output, chat_output, token_count_output, status_output],
)
-
+
# 模块选择更新描述
module_dropdown.change(
fn=update_module_description,
inputs=module_dropdown,
- outputs=module_description
+ outputs=module_description,
)
-
+
return app
+
# 主函数
def main():
try:
@@ -807,7 +796,9 @@ def main():
except Exception as e:
print(f"启动应用程序时发生错误: {str(e)}")
import traceback
+
traceback.print_exc()
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff --git a/owl/utils/enhanced_role_playing.py b/owl/utils/enhanced_role_playing.py
index 0cbc2c9..bf1e5bb 100644
--- a/owl/utils/enhanced_role_playing.py
+++ b/owl/utils/enhanced_role_playing.py
@@ -381,6 +381,12 @@ Now please give me instructions to solve over overall task step by step. If the
"""
input_msg = society.init_chat(init_prompt)
for _round in range(round_limit):
+ # Check if previous user response had TASK_DONE before getting next assistant response
+ if _round > 0 and (
+ "TASK_DONE" in input_msg.content or "任务已完成" in input_msg.content
+ ):
+ break
+
assistant_response, user_response = society.step(input_msg)
overall_completion_token_count += (
assistant_response.info["usage"]["completion_tokens"]
@@ -408,10 +414,12 @@ Now please give me instructions to solve over overall task step by step. If the
f"Round #{_round} assistant_response:\n {assistant_response.msgs[0].content}"
)
+ # Check other termination conditions
if (
assistant_response.terminated
or user_response.terminated
or "TASK_DONE" in user_response.msg.content
+ or "任务已完成" in user_response.msg.content
):
break
diff --git a/owl/webapp_zh.py b/owl/webapp_zh.py
index 9b58cbe..834508d 100644
--- a/owl/webapp_zh.py
+++ b/owl/webapp_zh.py
@@ -1,3 +1,16 @@
+# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
# Import from the correct module path
from owl.utils import run_society
import os
@@ -6,19 +19,15 @@ import time
import json
import logging
import datetime
-from typing import Tuple, List, Dict, Any
+from typing import Tuple
import importlib
from dotenv import load_dotenv, set_key, find_dotenv, unset_key
import threading
import queue
-import time
-import signal
-import sys
-import subprocess
-import platform
-import re
+import re # For regular expression operations
+
+os.environ["PYTHONIOENCODING"] = "utf-8"
-os.environ['PYTHONIOENCODING'] = 'utf-8'
# 配置日志系统
def setup_logging():
@@ -26,94 +35,112 @@ def setup_logging():
# 创建logs目录(如果不存在)
logs_dir = os.path.join(os.path.dirname(__file__), "logs")
os.makedirs(logs_dir, exist_ok=True)
-
+
# 生成日志文件名(使用当前日期)
current_date = datetime.datetime.now().strftime("%Y-%m-%d")
log_file = os.path.join(logs_dir, f"gradio_log_{current_date}.txt")
-
+
# 配置根日志记录器(捕获所有日志)
root_logger = logging.getLogger()
-
+
# 清除现有的处理器,避免重复日志
for handler in root_logger.handlers[:]:
root_logger.removeHandler(handler)
-
+
root_logger.setLevel(logging.INFO)
-
+
# 创建文件处理器
- file_handler = logging.FileHandler(log_file, encoding='utf-8', mode='a')
+ file_handler = logging.FileHandler(log_file, encoding="utf-8", mode="a")
file_handler.setLevel(logging.INFO)
-
+
# 创建控制台处理器
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
-
+
# 创建格式化器
- formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
+ formatter = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ )
file_handler.setFormatter(formatter)
console_handler.setFormatter(formatter)
-
+
# 添加处理器到根日志记录器
root_logger.addHandler(file_handler)
root_logger.addHandler(console_handler)
-
+
logging.info("日志系统已初始化,日志文件: %s", log_file)
return log_file
+
# 全局变量
LOG_FILE = None
-LOG_QUEUE = queue.Queue()
-LOG_QUEUE2 = queue.Queue() # 对话记录的队列
+LOG_QUEUE: queue.Queue = queue.Queue() # 日志队列
STOP_LOG_THREAD = threading.Event()
CURRENT_PROCESS = None # 用于跟踪当前运行的进程
STOP_REQUESTED = threading.Event() # 用于标记是否请求停止
+
# 日志读取和更新函数
def log_reader_thread(log_file):
"""后台线程,持续读取日志文件并将新行添加到队列中"""
try:
- with open(log_file, 'r', encoding='utf-8') as f:
+ with open(log_file, "r", encoding="utf-8") as f:
# 移动到文件末尾
f.seek(0, 2)
-
+
while not STOP_LOG_THREAD.is_set():
line = f.readline()
if line:
- LOG_QUEUE.put(line)
- LOG_QUEUE2.put(line) # 同时添加到第二个队列
+ LOG_QUEUE.put(line) # 添加到对话记录队列
else:
# 没有新行,等待一小段时间
time.sleep(0.1)
except Exception as e:
logging.error(f"日志读取线程出错: {str(e)}")
+
def get_latest_logs(max_lines=100, queue_source=None):
"""从队列中获取最新的日志行,如果队列为空则直接从文件读取
-
+
Args:
max_lines: 最大返回行数
queue_source: 指定使用哪个队列,默认为LOG_QUEUE
-
+
Returns:
str: 日志内容
"""
logs = []
log_queue = queue_source if queue_source else LOG_QUEUE
+
+ # 创建一个临时队列来存储日志,以便我们可以处理它们而不会从原始队列中删除它们
+ temp_queue = queue.Queue()
+ temp_logs = []
+
try:
# 尝试从队列中获取所有可用的日志行
- while not log_queue.empty() and len(logs) < max_lines:
- logs.append(log_queue.get_nowait())
+ while not log_queue.empty() and len(temp_logs) < max_lines:
+ log = log_queue.get_nowait()
+ temp_logs.append(log)
+ temp_queue.put(log) # 将日志放回临时队列
except queue.Empty:
pass
-
+
+ # 处理对话记录
+ logs = temp_logs
+
# 如果没有新日志或日志不足,尝试直接从文件读取最后几行
if len(logs) < max_lines and LOG_FILE and os.path.exists(LOG_FILE):
try:
- with open(LOG_FILE, 'r', encoding='utf-8') as f:
+ with open(LOG_FILE, "r", encoding="utf-8") as f:
all_lines = f.readlines()
# 如果队列中已有一些日志,只读取剩余需要的行数
remaining_lines = max_lines - len(logs)
- file_logs = all_lines[-remaining_lines:] if len(all_lines) > remaining_lines else all_lines
+ file_logs = (
+ all_lines[-remaining_lines:]
+ if len(all_lines) > remaining_lines
+ else all_lines
+ )
+
# 将文件日志添加到队列日志之前
logs = file_logs + logs
except Exception as e:
@@ -121,51 +148,106 @@ def get_latest_logs(max_lines=100, queue_source=None):
logging.error(error_msg)
if not logs: # 只有在没有任何日志的情况下才添加错误消息
logs = [error_msg]
-
+
# 如果仍然没有日志,返回提示信息
if not logs:
- return "暂无日志记录或日志系统未正确初始化。"
-
- # 格式化日志输出,确保每个日志条目有适当的换行和分隔
- formatted_logs = []
+ return "暂无对话记录。"
+
+ # 过滤日志,只保留 camel.agents.chat_agent - INFO 的日志
+ filtered_logs = []
for log in logs:
+ if "camel.agents.chat_agent - INFO" in log:
+ filtered_logs.append(log)
+
+ # 如果过滤后没有日志,返回提示信息
+ if not filtered_logs:
+ return "暂无对话记录。"
+
+ # 处理日志内容,提取最新的用户和助手消息
+ simplified_logs = []
+
+ # 使用集合来跟踪已经处理过的消息,避免重复
+ processed_messages = set()
+
+ def process_message(role, content):
+ # 创建一个唯一标识符来跟踪消息
+ msg_id = f"{role}:{content}"
+ if msg_id in processed_messages:
+ return None
+
+ processed_messages.add(msg_id)
+ content = content.replace("\\n", "\n")
+ lines = [line.strip() for line in content.split("\n")]
+ content = "\n".join(lines)
+
+ return f"[{role.title()} Agent]: {content}"
+
+ for log in filtered_logs:
+ formatted_messages = []
+ # 尝试提取消息数组
+ messages_match = re.search(
+ r"Model (.*?), index (\d+), processed these messages: (\[.*\])", log
+ )
+
+ if messages_match:
+ try:
+ messages = json.loads(messages_match.group(3))
+ for msg in messages:
+ if msg.get("role") in ["user", "assistant"]:
+ formatted_msg = process_message(
+ msg.get("role"), msg.get("content", "")
+ )
+ if formatted_msg:
+ formatted_messages.append(formatted_msg)
+ except json.JSONDecodeError:
+ pass
+
+ # 如果JSON解析失败或没有找到消息数组,尝试直接提取对话内容
+ if not formatted_messages:
+ user_pattern = re.compile(r"\{'role': 'user', 'content': '(.*?)'\}")
+ assistant_pattern = re.compile(
+ r"\{'role': 'assistant', 'content': '(.*?)'\}"
+ )
+
+ for content in user_pattern.findall(log):
+ formatted_msg = process_message("user", content)
+ if formatted_msg:
+ formatted_messages.append(formatted_msg)
+
+ for content in assistant_pattern.findall(log):
+ formatted_msg = process_message("assistant", content)
+ if formatted_msg:
+ formatted_messages.append(formatted_msg)
+
+ if formatted_messages:
+ simplified_logs.append("\n\n".join(formatted_messages))
+
+ # 格式化日志输出,确保每个对话记录之间有适当的分隔
+ formatted_logs = []
+ for i, log in enumerate(simplified_logs):
# 移除开头和结尾的多余空白字符
log = log.strip()
-
- # 处理包含JSON或代码片段的日志,确保它们有正确的换行和缩进
- if '"]"\n}' in log or '\n}\n\n' in log:
- # 替换不合理的换行为更清晰的格式
- log = log.replace('"]"\n}', '"]" }').replace('\n}\n\n', ' }\n')
-
- # 检测日期时间格式的开头,这通常表示一个新的日志条目
- # 例如:2025-03-14 18:49:31,008 - httpx - INFO
- if re.match(r'^\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2},\d{3}', log):
- # 在新的日志条目前添加一个空行,使日志更易读
- formatted_logs.append('\n')
-
- # 确保每个日志条目以换行符结束
- if not log.endswith('\n'):
- log += '\n'
-
+
formatted_logs.append(log)
-
- # 移除第一个可能的额外空行
- if formatted_logs and formatted_logs[0] == '\n':
- formatted_logs.pop(0)
-
+
+ # 确保每个对话记录以换行符结束
+ if not log.endswith("\n"):
+ formatted_logs.append("\n")
+
return "".join(formatted_logs)
+
# Dictionary containing module descriptions
MODULE_DESCRIPTIONS = {
"run": "默认模式:使用OpenAI模型的默认的智能体协作模式,适合大多数任务。",
- "run_mini":"使用使用OpenAI模型最小化配置处理任务",
- "run_deepseek_zh":"使用deepseek模型处理中文任务",
+ "run_mini": "使用使用OpenAI模型最小化配置处理任务",
+ "run_deepseek_zh": "使用deepseek模型处理中文任务",
"run_terminal_zh": "终端模式:可执行命令行操作,支持网络搜索、文件处理等功能。适合需要系统交互的任务,使用OpenAI模型",
- "run_gaia_roleplaying":"GAIA基准测试实现,用于评估Agent能力",
- "run_openai_compatiable_model":"使用openai兼容模型处理任务",
- "run_ollama":"使用本地ollama模型处理任务",
- "run_qwen_mini_zh":"使用qwen模型最小化配置处理任务",
- "run_qwen_zh":"使用qwen模型处理任务",
+ "run_gaia_roleplaying": "GAIA基准测试实现,用于评估Agent能力",
+ "run_openai_compatiable_model": "使用openai兼容模型处理任务",
+ "run_ollama": "使用本地ollama模型处理任务",
+ "run_qwen_mini_zh": "使用qwen模型最小化配置处理任务",
+ "run_qwen_zh": "使用qwen模型处理任务",
}
# API帮助信息
@@ -173,43 +255,43 @@ API_HELP_INFO = {
"OPENAI_API_KEY": {
"name": "OpenAI API",
"desc": "OpenAI API密钥,用于访问GPT系列模型",
- "url": "https://platform.openai.com/api-keys"
+ "url": "https://platform.openai.com/api-keys",
},
"QWEN_API_KEY": {
"name": "通义千问 API",
"desc": "阿里云通义千问API密钥",
- "url": "https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key"
+ "url": "https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key",
},
"DEEPSEEK_API_KEY": {
"name": "DeepSeek API",
"desc": "DeepSeek API密钥",
- "url": "https://platform.deepseek.com/api_keys"
+ "url": "https://platform.deepseek.com/api_keys",
},
"GOOGLE_API_KEY": {
"name": "Google Search API",
"desc": "Google自定义搜索API密钥",
- "url": "https://developers.google.com/custom-search/v1/overview"
+ "url": "https://developers.google.com/custom-search/v1/overview",
},
"SEARCH_ENGINE_ID": {
"name": "Google Search Engine ID",
"desc": "Google自定义搜索引擎ID",
- "url": "https://developers.google.com/custom-search/v1/overview"
+ "url": "https://developers.google.com/custom-search/v1/overview",
},
"HF_TOKEN": {
"name": "Hugging Face API",
"desc": "Hugging Face API令牌",
- "url": "https://huggingface.co/join"
+ "url": "https://huggingface.co/join",
},
"CHUNKR_API_KEY": {
"name": "Chunkr API",
"desc": "Chunkr API密钥",
- "url": "https://chunkr.ai/"
+ "url": "https://chunkr.ai/",
},
"FIRECRAWL_API_KEY": {
"name": "Firecrawl API",
"desc": "Firecrawl API密钥",
- "url": "https://www.firecrawl.dev/"
- }
+ "url": "https://www.firecrawl.dev/",
+ },
}
# 默认环境变量模板
@@ -245,13 +327,12 @@ FIRECRAWL_API_KEY=""
"""
-
def validate_input(question: str) -> bool:
"""验证用户输入是否有效
-
+
Args:
question: 用户问题
-
+
Returns:
bool: 输入是否有效
"""
@@ -260,41 +341,38 @@ def validate_input(question: str) -> bool:
return False
return True
+
def run_owl(question: str, example_module: str) -> Tuple[str, str, str]:
"""运行OWL系统并返回结果
-
+
Args:
question: 用户问题
example_module: 要导入的示例模块名(如 "run_terminal_zh" 或 "run_deep")
-
+
Returns:
Tuple[...]: 回答、令牌计数、状态
"""
global CURRENT_PROCESS
-
+
# 验证输入
if not validate_input(question):
logging.warning("用户提交了无效的输入")
- return (
- "请输入有效的问题",
- "0",
- "❌ 错误: 输入无效"
- )
-
+ return ("请输入有效的问题", "0", "❌ 错误: 输入无效")
+
try:
# 确保环境变量已加载
load_dotenv(find_dotenv(), override=True)
logging.info(f"处理问题: '{question}', 使用模块: {example_module}")
-
+
# 检查模块是否在MODULE_DESCRIPTIONS中
if example_module not in MODULE_DESCRIPTIONS:
logging.error(f"用户选择了不支持的模块: {example_module}")
return (
- f"所选模块 '{example_module}' 不受支持",
- "0",
- f"❌ 错误: 不支持的模块"
+ f"所选模块 '{example_module}' 不受支持",
+ "0",
+ "❌ 错误: 不支持的模块",
)
-
+
# 动态导入目标模块
module_path = f"owl.examples.{example_module}"
try:
@@ -303,41 +381,36 @@ def run_owl(question: str, example_module: str) -> Tuple[str, str, str]:
except ImportError as ie:
logging.error(f"无法导入模块 {module_path}: {str(ie)}")
return (
- f"无法导入模块: {module_path}",
- "0",
- f"❌ 错误: 模块 {example_module} 不存在或无法加载 - {str(ie)}"
+ f"无法导入模块: {module_path}",
+ "0",
+ f"❌ 错误: 模块 {example_module} 不存在或无法加载 - {str(ie)}",
)
except Exception as e:
logging.error(f"导入模块 {module_path} 时发生错误: {str(e)}")
- return (
- f"导入模块时发生错误: {module_path}",
- "0",
- f"❌ 错误: {str(e)}"
- )
-
+ return (f"导入模块时发生错误: {module_path}", "0", f"❌ 错误: {str(e)}")
+
# 检查是否包含construct_society函数
if not hasattr(module, "construct_society"):
logging.error(f"模块 {module_path} 中未找到 construct_society 函数")
return (
- f"模块 {module_path} 中未找到 construct_society 函数",
- "0",
- f"❌ 错误: 模块接口不兼容"
+ f"模块 {module_path} 中未找到 construct_society 函数",
+ "0",
+ "❌ 错误: 模块接口不兼容",
)
-
+
# 构建社会模拟
try:
logging.info("正在构建社会模拟...")
society = module.construct_society(question)
-
except Exception as e:
logging.error(f"构建社会模拟时发生错误: {str(e)}")
return (
- f"构建社会模拟时发生错误: {str(e)}",
- "0",
- f"❌ 错误: 构建失败 - {str(e)}"
+ f"构建社会模拟时发生错误: {str(e)}",
+ "0",
+ f"❌ 错误: 构建失败 - {str(e)}",
)
-
+
# 运行社会模拟
try:
logging.info("正在运行社会模拟...")
@@ -346,42 +419,45 @@ def run_owl(question: str, example_module: str) -> Tuple[str, str, str]:
except Exception as e:
logging.error(f"运行社会模拟时发生错误: {str(e)}")
return (
- f"运行社会模拟时发生错误: {str(e)}",
- "0",
- f"❌ 错误: 运行失败 - {str(e)}"
+ f"运行社会模拟时发生错误: {str(e)}",
+ "0",
+ f"❌ 错误: 运行失败 - {str(e)}",
)
-
-
# 安全地获取令牌计数
if not isinstance(token_info, dict):
token_info = {}
-
+
completion_tokens = token_info.get("completion_token_count", 0)
prompt_tokens = token_info.get("prompt_token_count", 0)
total_tokens = completion_tokens + prompt_tokens
-
- logging.info(f"处理完成,令牌使用: 完成={completion_tokens}, 提示={prompt_tokens}, 总计={total_tokens}")
-
- return (
- answer,
- f"完成令牌: {completion_tokens:,} | 提示令牌: {prompt_tokens:,} | 总计: {total_tokens:,}",
- "✅ 成功完成"
+
+ logging.info(
+ f"处理完成,令牌使用: 完成={completion_tokens}, 提示={prompt_tokens}, 总计={total_tokens}"
)
-
+
+ return (
+ answer,
+ f"完成令牌: {completion_tokens:,} | 提示令牌: {prompt_tokens:,} | 总计: {total_tokens:,}",
+ "✅ 成功完成",
+ )
+
except Exception as e:
logging.error(f"处理问题时发生未捕获的错误: {str(e)}")
- return (
- f"发生错误: {str(e)}",
- "0",
- f"❌ 错误: {str(e)}"
- )
+ return (f"发生错误: {str(e)}", "0", f"❌ 错误: {str(e)}")
+
def update_module_description(module_name: str) -> str:
"""返回所选模块的描述"""
return MODULE_DESCRIPTIONS.get(module_name, "无可用描述")
+
# 环境变量管理功能
+
+# 存储前端配置的环境变量
+WEB_FRONTEND_ENV_VARS: dict[str, str] = {}
+
+
def init_env_file():
"""初始化.env文件如果不存在"""
dotenv_path = find_dotenv()
@@ -391,116 +467,332 @@ def init_env_file():
dotenv_path = find_dotenv()
return dotenv_path
+
def load_env_vars():
- """加载环境变量并返回字典格式"""
+ """加载环境变量并返回字典格式
+
+ Returns:
+ dict: 环境变量字典,每个值为一个包含值和来源的元组 (value, source)
+ """
dotenv_path = init_env_file()
load_dotenv(dotenv_path, override=True)
-
- env_vars = {}
+
+ # 从.env文件读取环境变量
+ env_file_vars = {}
with open(dotenv_path, "r") as f:
for line in f:
line = line.strip()
if line and not line.startswith("#"):
if "=" in line:
key, value = line.split("=", 1)
- env_vars[key.strip()] = value.strip().strip('"\'')
-
+ env_file_vars[key.strip()] = value.strip().strip("\"'")
+
+ # 从系统环境变量中获取
+ system_env_vars = {
+ k: v
+ for k, v in os.environ.items()
+ if k not in env_file_vars and k not in WEB_FRONTEND_ENV_VARS
+ }
+
+ # 合并环境变量,并标记来源
+ env_vars = {}
+
+ # 添加系统环境变量(最低优先级)
+ for key, value in system_env_vars.items():
+ env_vars[key] = (value, "系统")
+
+ # 添加.env文件环境变量(中等优先级)
+ for key, value in env_file_vars.items():
+ env_vars[key] = (value, ".env文件")
+
+ # 添加前端配置的环境变量(最高优先级)
+ for key, value in WEB_FRONTEND_ENV_VARS.items():
+ env_vars[key] = (value, "前端配置")
+ # 确保操作系统环境变量也被更新
+ os.environ[key] = value
+
return env_vars
+
def save_env_vars(env_vars):
- """保存环境变量到.env文件"""
+ """保存环境变量到.env文件
+
+ Args:
+ env_vars: 字典,键为环境变量名,值可以是字符串或(值,来源)元组
+ """
try:
dotenv_path = init_env_file()
-
+
# 保存每个环境变量
- for key, value in env_vars.items():
+ for key, value_data in env_vars.items():
if key and key.strip(): # 确保键不为空
+ # 处理值可能是元组的情况
+ if isinstance(value_data, tuple):
+ value = value_data[0]
+ else:
+ value = value_data
+
set_key(dotenv_path, key.strip(), value.strip())
-
+
# 重新加载环境变量以确保生效
load_dotenv(dotenv_path, override=True)
-
+
return True, "环境变量已成功保存!"
except Exception as e:
return False, f"保存环境变量时出错: {str(e)}"
-def add_env_var(key, value):
- """添加或更新单个环境变量"""
+
+def add_env_var(key, value, from_frontend=True):
+ """添加或更新单个环境变量
+
+ Args:
+ key: 环境变量名
+ value: 环境变量值
+ from_frontend: 是否来自前端配置,默认为True
+ """
try:
if not key or not key.strip():
return False, "变量名不能为空"
-
+
+ key = key.strip()
+ value = value.strip()
+
+ # 如果来自前端,则添加到前端环境变量字典
+ if from_frontend:
+ WEB_FRONTEND_ENV_VARS[key] = value
+ # 直接更新系统环境变量
+ os.environ[key] = value
+
+ # 同时更新.env文件
dotenv_path = init_env_file()
- set_key(dotenv_path, key.strip(), value.strip())
+ set_key(dotenv_path, key, value)
load_dotenv(dotenv_path, override=True)
-
+
return True, f"环境变量 {key} 已成功添加/更新!"
except Exception as e:
return False, f"添加环境变量时出错: {str(e)}"
+
def delete_env_var(key):
"""删除环境变量"""
try:
if not key or not key.strip():
return False, "变量名不能为空"
-
+
+ key = key.strip()
+
+ # 从.env文件中删除
dotenv_path = init_env_file()
- unset_key(dotenv_path, key.strip())
-
+ unset_key(dotenv_path, key)
+
+ # 从前端环境变量字典中删除
+ if key in WEB_FRONTEND_ENV_VARS:
+ del WEB_FRONTEND_ENV_VARS[key]
+
# 从当前进程环境中也删除
if key in os.environ:
del os.environ[key]
-
+
return True, f"环境变量 {key} 已成功删除!"
except Exception as e:
return False, f"删除环境变量时出错: {str(e)}"
-def mask_sensitive_value(key: str, value: str) -> str:
- """对敏感信息进行掩码处理
-
+
+def is_api_related(key: str) -> bool:
+ """判断环境变量是否与API相关
+
Args:
key: 环境变量名
- value: 环境变量值
-
+
Returns:
- str: 处理后的值
+ bool: 是否与API相关
"""
- # 定义需要掩码的敏感关键词
- sensitive_keywords = ['key', 'token', 'secret', 'password', 'api']
-
- # 检查是否包含敏感关键词(不区分大小写)
- is_sensitive = any(keyword in key.lower() for keyword in sensitive_keywords)
-
- if is_sensitive and value:
- # 如果是敏感信息且有值,则显示掩码
- return '*' * 8
- return value
+ # API相关的关键词
+ api_keywords = [
+ "api",
+ "key",
+ "token",
+ "secret",
+ "password",
+ "openai",
+ "qwen",
+ "deepseek",
+ "google",
+ "search",
+ "hf",
+ "hugging",
+ "chunkr",
+ "firecrawl",
+ ]
+
+ # 检查是否包含API相关关键词(不区分大小写)
+ return any(keyword in key.lower() for keyword in api_keywords)
+
+
+def get_api_guide(key: str) -> str:
+ """根据环境变量名返回对应的API获取指南
+
+ Args:
+ key: 环境变量名
+
+ Returns:
+ str: API获取指南链接或说明
+ """
+ key_lower = key.lower()
+ if "openai" in key_lower:
+ return "https://platform.openai.com/api-keys"
+ elif "qwen" in key_lower or "dashscope" in key_lower:
+ return "https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key"
+ elif "deepseek" in key_lower:
+ return "https://platform.deepseek.com/api_keys"
+ elif "google" in key_lower:
+ return "https://developers.google.com/custom-search/v1/overview"
+ elif "chunkr" in key_lower:
+ return "https://chunkr.ai/"
+ elif "firecrawl" in key_lower:
+ return "https://www.firecrawl.dev/"
+ else:
+ return ""
+
def update_env_table():
- """更新环境变量表格显示,对敏感信息进行掩码处理"""
+ """更新环境变量表格显示,只显示API相关的环境变量"""
env_vars = load_env_vars()
- # 对敏感值进行掩码处理
- masked_env_vars = [[k, mask_sensitive_value(k, v)] for k, v in env_vars.items()]
- return masked_env_vars
+ # 过滤出API相关的环境变量
+ api_env_vars = {k: v for k, v in env_vars.items() if is_api_related(k)}
+ # 转换为列表格式,以符合Gradio Dataframe的要求
+ # 格式: [变量名, 变量值, 获取指南链接]
+ result = []
+ for k, v in api_env_vars.items():
+ guide = get_api_guide(k)
+ # 如果有指南链接,创建一个可点击的链接
+ guide_link = (
+ f"🔗 获取"
+ if guide
+ else ""
+ )
+ result.append([k, v[0], guide_link])
+ return result
+
+
+def save_env_table_changes(data):
+ """保存环境变量表格的更改
+
+ Args:
+ data: Dataframe数据,可能是pandas DataFrame对象
+
+ Returns:
+ str: 操作状态信息,包含HTML格式的状态消息
+ """
+ try:
+ logging.info(f"开始处理环境变量表格数据,类型: {type(data)}")
+
+ # 获取当前所有环境变量
+ current_env_vars = load_env_vars()
+ processed_keys = set() # 记录已处理的键,用于检测删除的变量
+
+ # 处理pandas DataFrame对象
+ import pandas as pd
+
+ if isinstance(data, pd.DataFrame):
+ # 获取列名信息
+ columns = data.columns.tolist()
+ logging.info(f"DataFrame列名: {columns}")
+
+ # 遍历DataFrame的每一行
+ for index, row in data.iterrows():
+ # 使用列名或索引访问数据
+ if len(columns) >= 3:
+ # 如果有列名,使用列名访问
+ key = row.iloc[1] if hasattr(row, "iloc") else row[1]
+ value = row.iloc[2] if hasattr(row, "iloc") else row[2]
+
+ # 检查是否为空行或已删除的变量
+ if key and str(key).strip(): # 如果键名不为空,则添加或更新
+ logging.info(f"处理环境变量: {key} = {value}")
+ add_env_var(key, str(value))
+ processed_keys.add(key)
+ # 处理其他格式
+ elif isinstance(data, dict):
+ logging.info(f"字典格式数据的键: {list(data.keys())}")
+ # 如果是字典格式,尝试不同的键
+ if "data" in data:
+ rows = data["data"]
+ elif "values" in data:
+ rows = data["values"]
+ elif "value" in data:
+ rows = data["value"]
+ else:
+ # 尝试直接使用字典作为行数据
+ rows = []
+ for key, value in data.items():
+ if key not in ["headers", "types", "columns"]:
+ rows.append([key, value])
+
+ if isinstance(rows, list):
+ for row in rows:
+ if isinstance(row, list) and len(row) >= 2:
+ key, value = row[0], row[1]
+ if key and str(key).strip():
+ add_env_var(key, str(value))
+ processed_keys.add(key)
+ elif isinstance(data, list):
+ # 列表格式
+ for row in data:
+ if isinstance(row, list) and len(row) >= 2:
+ key, value = row[0], row[1]
+ if key and str(key).strip():
+ add_env_var(key, str(value))
+ processed_keys.add(key)
+ else:
+ logging.error(f"未知的数据格式: {type(data)}")
+ return f"❌ 保存失败: 未知的数据格式 {type(data)}"
+
+ # 处理删除的变量 - 检查当前环境变量中是否有未在表格中出现的变量
+ api_related_keys = {k for k in current_env_vars.keys() if is_api_related(k)}
+ keys_to_delete = api_related_keys - processed_keys
+
+ # 删除不再表格中的变量
+ for key in keys_to_delete:
+ logging.info(f"删除环境变量: {key}")
+ delete_env_var(key)
+
+ return "✅ 环境变量已成功保存"
+ except Exception as e:
+ import traceback
+
+ error_details = traceback.format_exc()
+ logging.error(f"保存环境变量时出错: {str(e)}\n{error_details}")
+ return f"❌ 保存失败: {str(e)}"
+
+
+def get_env_var_value(key):
+ """获取环境变量的实际值
+
+ 优先级:前端配置 > .env文件 > 系统环境变量
+ """
+ # 检查前端配置的环境变量
+ if key in WEB_FRONTEND_ENV_VARS:
+ return WEB_FRONTEND_ENV_VARS[key]
+
+ # 检查系统环境变量(包括从.env加载的)
+ return os.environ.get(key, "")
+
def create_ui():
"""创建增强版Gradio界面"""
-
- # 定义日志更新函数
- def update_logs():
- """获取最新日志并返回给前端显示"""
- return get_latest_logs(100)
-
+
+ # 定义对话记录更新函数
def update_logs2():
"""获取最新对话记录并返回给前端显示"""
- return get_latest_logs(100, LOG_QUEUE2)
-
+ return get_latest_logs(100, LOG_QUEUE)
+
def clear_log_file():
"""清空日志文件内容"""
try:
if LOG_FILE and os.path.exists(LOG_FILE):
# 清空日志文件内容而不是删除文件
- open(LOG_FILE, 'w').close()
+ open(LOG_FILE, "w").close()
logging.info("日志文件已清空")
# 清空日志队列
while not LOG_QUEUE.empty():
@@ -508,82 +800,85 @@ def create_ui():
LOG_QUEUE.get_nowait()
except queue.Empty:
break
- # 清空第二个日志队列
- while not LOG_QUEUE2.empty():
- try:
- LOG_QUEUE2.get_nowait()
- except queue.Empty:
- break
- return "日志文件已清空"
+ return ""
else:
- return "日志文件不存在或未设置"
+ return ""
except Exception as e:
logging.error(f"清空日志文件时出错: {str(e)}")
- return f"清空日志文件时出错: {str(e)}"
-
+ return ""
+
# 创建一个实时日志更新函数
def process_with_live_logs(question, module_name):
"""处理问题并实时更新日志"""
global CURRENT_PROCESS
-
+
# 创建一个后台线程来处理问题
result_queue = queue.Queue()
-
+
def process_in_background():
try:
result = run_owl(question, module_name)
result_queue.put(result)
except Exception as e:
result_queue.put((f"发生错误: {str(e)}", "0", f"❌ 错误: {str(e)}"))
-
+
# 启动后台处理线程
bg_thread = threading.Thread(target=process_in_background)
CURRENT_PROCESS = bg_thread # 记录当前进程
bg_thread.start()
-
+
# 在等待处理完成的同时,每秒更新一次日志
while bg_thread.is_alive():
- # 更新日志显示
- logs = get_latest_logs(100)
- logs2 = get_latest_logs(100, LOG_QUEUE2)
-
+ # 更新对话记录显示
+ logs2 = get_latest_logs(100, LOG_QUEUE)
+
# 始终更新状态
- yield None, "0", " 处理中...", logs, logs2
-
+ yield (
+ "0",
+ " 处理中...",
+ logs2,
+ )
+
time.sleep(1)
-
+
# 处理完成,获取结果
if not result_queue.empty():
result = result_queue.get()
answer, token_count, status = result
-
- # 最后一次更新日志
- logs = get_latest_logs(100)
- logs2 = get_latest_logs(100, LOG_QUEUE2)
-
+
+ # 最后一次更新对话记录
+ logs2 = get_latest_logs(100, LOG_QUEUE)
+
# 根据状态设置不同的指示器
if "错误" in status:
- status_with_indicator = f" {status}"
+ status_with_indicator = (
+ f" {status}"
+ )
else:
- status_with_indicator = f" {status}"
-
- yield answer, token_count, status_with_indicator, logs, logs2
+ status_with_indicator = (
+ f" {status}"
+ )
+
+ yield token_count, status_with_indicator, logs2
else:
- logs = get_latest_logs(100)
- logs2 = get_latest_logs(100, LOG_QUEUE2)
- yield "操作未完成", "0", " 已终止", logs, logs2
-
+ logs2 = get_latest_logs(100, LOG_QUEUE)
+ yield (
+ "0",
+ " 已终止",
+ logs2,
+ )
+
with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as app:
- gr.Markdown(
- """
+ gr.Markdown(
+ """
# 🦉 OWL 多智能体协作系统
基于CAMEL框架开发的先进多智能体协作系统,旨在通过智能体协作解决复杂问题。
"""
- )
-
- # 添加自定义CSS
- gr.HTML("""
+ )
+
+ # 添加自定义CSS
+ gr.HTML("""
""")
-
- with gr.Row():
- with gr.Column(scale=1):
- question_input = gr.Textbox(
- lines=5,
- placeholder="请输入您的问题...",
- label="问题",
- elem_id="question_input",
+
+ with gr.Row():
+ with gr.Column(scale=1):
+ question_input = gr.Textbox(
+ lines=5,
+ placeholder="请输入您的问题...",
+ label="问题",
+ elem_id="question_input",
+ show_copy_button=True,
+ )
+
+ # 增强版模块选择下拉菜单
+ # 只包含MODULE_DESCRIPTIONS中定义的模块
+ module_dropdown = gr.Dropdown(
+ choices=list(MODULE_DESCRIPTIONS.keys()),
+ value="run_qwen_zh",
+ label="选择功能模块",
+ interactive=True,
+ )
+
+ # 模块描述文本框
+ module_description = gr.Textbox(
+ value=MODULE_DESCRIPTIONS["run_qwen_zh"],
+ label="模块描述",
+ interactive=False,
+ elem_classes="module-info",
+ )
+
+ with gr.Row():
+ run_button = gr.Button(
+ "运行", variant="primary", elem_classes="primary"
+ )
+
+ status_output = gr.HTML(
+ value=" 已就绪",
+ label="状态",
+ )
+ token_count_output = gr.Textbox(
+ label="令牌计数", interactive=False, elem_classes="token-count"
+ )
+
+ with gr.Tabs(): # 设置对话记录为默认选中的标签页
+ with gr.TabItem("对话记录"):
+ # 添加对话记录显示区域
+ log_display2 = gr.Textbox(
+ label="对话记录",
+ lines=25,
+ max_lines=100,
+ interactive=False,
+ autoscroll=True,
show_copy_button=True,
+ elem_classes="log-display",
+ container=True,
+ value="",
)
-
- # 增强版模块选择下拉菜单
- # 只包含MODULE_DESCRIPTIONS中定义的模块
- module_dropdown = gr.Dropdown(
- choices=list(MODULE_DESCRIPTIONS.keys()),
- value="run",
- label="选择功能模块",
- interactive=True
- )
-
- # 模块描述文本框
- module_description = gr.Textbox(
- value=MODULE_DESCRIPTIONS["run"],
- label="模块描述",
- interactive=False,
- elem_classes="module-info"
- )
-
+
with gr.Row():
- run_button = gr.Button("运行", variant="primary", elem_classes="primary")
-
- status_output = gr.HTML(
- value=" 已就绪",
- label="状态"
- )
- token_count_output = gr.Textbox(
- label="令牌计数",
- interactive=False,
- elem_classes="token-count"
- )
-
-
-
- with gr.Tabs(): # 设置对话记录为默认选中的标签页
- with gr.TabItem("对话记录"):
- # 添加对话记录显示区域
- log_display2 = gr.Textbox(
- label="对话记录",
- lines=25,
- max_lines=100,
- interactive=False,
- autoscroll=True,
- show_copy_button=True,
- elem_classes="log-display",
- container=True
+ refresh_logs_button2 = gr.Button("刷新记录")
+ auto_refresh_checkbox2 = gr.Checkbox(
+ label="自动刷新", value=True, interactive=True
)
-
- with gr.Row():
- refresh_logs_button2 = gr.Button("刷新记录")
- auto_refresh_checkbox2 = gr.Checkbox(
- label="自动刷新",
- value=True,
- interactive=True
- )
- clear_logs_button2 = gr.Button("清空记录", variant="secondary")
-
- with gr.TabItem("系统日志"):
- # 添加日志显示区域
- log_display = gr.Textbox(
- label="系统日志",
- lines=25,
- max_lines=100,
- interactive=False,
- autoscroll=True,
- show_copy_button=True,
- elem_classes="log-display",
- container=True
- )
-
- with gr.Row():
- refresh_logs_button = gr.Button("刷新日志")
- auto_refresh_checkbox = gr.Checkbox(
- label="自动刷新",
- value=True,
- interactive=True
- )
- clear_logs_button = gr.Button("清空日志", variant="secondary")
- with gr.TabItem("回答"):
- answer_output = gr.Textbox(
- label="回答",
- lines=10,
- elem_classes="answer-box"
- )
-
+ clear_logs_button2 = gr.Button("清空记录", variant="secondary")
-
-
-
- with gr.TabItem("环境变量管理", id="env-settings"):
+ with gr.TabItem("环境变量管理", id="env-settings"):
+ with gr.Box(elem_classes="env-manager-container"):
gr.Markdown("""
- ## 环境变量管理
-
- 在此处设置模型API密钥和其他服务凭证。这些信息将保存在本地的`.env`文件中,确保您的API密钥安全存储且不会上传到网络。
- """)
-
- # 添加API密钥获取指南
- gr.Markdown("### API密钥获取指南")
-
- for key, info in API_HELP_INFO.items():
- with gr.Accordion(f"{info['name']} ({key})", open=False):
- gr.Markdown(f"""
- - **说明**: {info['desc']}
- - **获取地址**: [{info['url']}]({info['url']})
- """)
-
- gr.Markdown("---")
-
- # 环境变量表格
- env_table = gr.Dataframe(
- headers=["变量名", "值"],
- datatype=["str", "str"],
- row_count=10,
- col_count=(2, "fixed"),
- value=update_env_table,
- label="当前环境变量",
- interactive=False
- )
-
- with gr.Row():
- with gr.Column(scale=1):
- new_env_key = gr.Textbox(label="变量名", placeholder="例如: OPENAI_API_KEY")
- with gr.Column(scale=2):
- new_env_value = gr.Textbox(label="值", placeholder="输入API密钥或其他配置值")
-
- with gr.Row():
- add_env_button = gr.Button("添加/更新变量", variant="primary")
- refresh_button = gr.Button("刷新变量列表")
- delete_env_button = gr.Button("删除选定变量", variant="stop")
-
- env_status = gr.Textbox(label="状态", interactive=False)
-
- # 变量选择器(用于删除)
- env_var_to_delete = gr.Dropdown(
- choices=[],
- label="选择要删除的变量",
- interactive=True
- )
-
- # 更新变量选择器的选项
- def update_delete_dropdown():
- env_vars = load_env_vars()
- return gr.Dropdown.update(choices=list(env_vars.keys()))
-
- # 连接事件处理函数
- add_env_button.click(
- fn=lambda k, v: add_env_var(k, v),
- inputs=[new_env_key, new_env_value],
- outputs=[env_status]
- ).then(
- fn=update_env_table,
- outputs=[env_table]
- ).then(
- fn=update_delete_dropdown,
- outputs=[env_var_to_delete]
- ).then(
- fn=lambda: ("", ""), # 修改为返回两个空字符串的元组
- outputs=[new_env_key, new_env_value]
- )
-
- refresh_button.click(
- fn=update_env_table,
- outputs=[env_table]
- ).then(
- fn=update_delete_dropdown,
- outputs=[env_var_to_delete]
- )
-
- delete_env_button.click(
- fn=lambda k: delete_env_var(k),
- inputs=[env_var_to_delete],
- outputs=[env_status]
- ).then(
- fn=update_env_table,
- outputs=[env_table]
- ).then(
- fn=update_delete_dropdown,
- outputs=[env_var_to_delete]
- )
+ ## 环境变量管理
-
-
-
-
- # 示例问题
- examples = [
- "打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到本地,用本地终端执行python文件显示图出来给我",
- "请分析GitHub上CAMEL-AI项目的最新统计数据。找出该项目的星标数量、贡献者名称,把内容整理成一个markdown文件保存到本地",
- "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格",
- "写一个hello world的python文件,保存到本地",
-
- ]
-
- gr.Examples(
- examples=examples,
- inputs=question_input
- )
-
+ 在此处设置模型API密钥和其他服务凭证。这些信息将保存在本地的`.env`文件中,确保您的API密钥安全存储且不会上传到网络。
+ """)
+ # 主要内容分为两列布局
+ with gr.Row():
+ # 左侧列:环境变量管理控件
+ with gr.Column(scale=3):
+ with gr.Box(elem_classes="env-controls"):
+ # 环境变量表格 - 设置为可交互以直接编辑
+ gr.Markdown("### 环境变量管理")
+ gr.Markdown("""
+ 管理您的API密钥和其他环境变量。正确设置API密钥对于OWL系统的功能至关重要。
+
+
+ 提示: 请确保正确设置API密钥以确保系统功能正常
+
+ """)
+ # 增强版环境变量表格,支持添加和删除行
+ env_table = gr.Dataframe(
+ headers=["变量名", "值", "获取指南"],
+ datatype=[
+ "str",
+ "str",
+ "html",
+ ], # 将最后一列设置为html类型以支持链接
+ row_count=10, # 增加行数,以便添加新变量
+ col_count=(3, "fixed"),
+ value=update_env_table,
+ label="API密钥和环境变量",
+ interactive=True, # 设置为可交互,允许直接编辑
+ elem_classes="env-table",
+ )
-
- gr.HTML("""
+ # 操作说明
+ gr.Markdown(
+ """
+
+
操作指南:
+
+ - 编辑变量: 直接点击表格中的"值"单元格进行编辑
+ - 添加变量: 在空白行中输入新的变量名和值
+ - 删除变量: 清空变量名即可删除该行
+ - 获取API密钥: 点击"获取指南"列中的链接获取相应API密钥
+
+
注意: 所有API密钥都安全地存储在本地,不会上传到网络
+
+ """,
+ elem_classes="env-instructions",
+ )
+
+ # 环境变量操作按钮
+ with gr.Row(elem_classes="env-buttons"):
+ save_env_button = gr.Button(
+ "💾 保存更改",
+ variant="primary",
+ elem_classes="env-button",
+ )
+ refresh_button = gr.Button(
+ "🔄 刷新列表", elem_classes="env-button"
+ )
+
+ # 状态显示
+ env_status = gr.HTML(
+ label="操作状态",
+ value="",
+ elem_classes="env-status",
+ )
+
+ # 连接事件处理函数
+ save_env_button.click(
+ fn=save_env_table_changes,
+ inputs=[env_table],
+ outputs=[env_status],
+ ).then(fn=update_env_table, outputs=[env_table])
+
+ refresh_button.click(fn=update_env_table, outputs=[env_table])
+
+ # 示例问题
+ examples = [
+ "打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到本地,用本地终端执行python文件显示图出来给我",
+ "浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格",
+ "写一个hello world的python文件,保存到本地",
+ ]
+
+ gr.Examples(examples=examples, inputs=question_input)
+
+ gr.HTML("""
""")
-
- # 设置事件处理
- run_button.click(
- fn=process_with_live_logs,
- inputs=[question_input, module_dropdown],
- outputs=[answer_output, token_count_output, status_output, log_display, log_display2]
- )
-
- # 模块选择更新描述
- module_dropdown.change(
- fn=update_module_description,
- inputs=module_dropdown,
- outputs=module_description
- )
-
- # 日志相关事件处理
- refresh_logs_button.click(
- fn=update_logs,
- outputs=[log_display]
- )
-
- refresh_logs_button2.click(
- fn=update_logs2,
- outputs=[log_display2]
- )
-
- clear_logs_button.click(
- fn=clear_log_file,
- outputs=[log_display]
- )
-
- clear_logs_button2.click(
- fn=clear_log_file,
- outputs=[log_display2]
- )
-
- # 自动刷新控制
- def toggle_auto_refresh(enabled):
- if enabled:
- return gr.update(every=3)
- else:
- return gr.update(every=0)
-
- auto_refresh_checkbox.change(
- fn=toggle_auto_refresh,
- inputs=[auto_refresh_checkbox],
- outputs=[log_display]
- )
-
- auto_refresh_checkbox2.change(
- fn=toggle_auto_refresh,
- inputs=[auto_refresh_checkbox2],
- outputs=[log_display2]
- )
-
- # 设置自动刷新(默认每3秒刷新一次)
- if auto_refresh_checkbox.value:
- app.load(
- fn=update_logs,
- outputs=[log_display],
- every=2
- )
-
- if auto_refresh_checkbox2.value:
- app.load(
- fn=update_logs2,
- outputs=[log_display2],
- every=2
- )
-
+
+ # 设置事件处理
+ run_button.click(
+ fn=process_with_live_logs,
+ inputs=[question_input, module_dropdown],
+ outputs=[token_count_output, status_output, log_display2],
+ )
+
+ # 模块选择更新描述
+ module_dropdown.change(
+ fn=update_module_description,
+ inputs=module_dropdown,
+ outputs=module_description,
+ )
+
+ # 对话记录相关事件处理
+ refresh_logs_button2.click(
+ fn=lambda: get_latest_logs(100, LOG_QUEUE), outputs=[log_display2]
+ )
+
+ clear_logs_button2.click(fn=clear_log_file, outputs=[log_display2])
+
+ # 自动刷新控制
+ def toggle_auto_refresh(enabled):
+ if enabled:
+ return gr.update(every=3)
+ else:
+ return gr.update(every=0)
+
+ auto_refresh_checkbox2.change(
+ fn=toggle_auto_refresh,
+ inputs=[auto_refresh_checkbox2],
+ outputs=[log_display2],
+ )
+
+ # 不再默认自动刷新日志
+
return app
+
# 主函数
def main():
try:
@@ -952,35 +1283,40 @@ def main():
global LOG_FILE
LOG_FILE = setup_logging()
logging.info("OWL Web应用程序启动")
-
+
# 启动日志读取线程
- log_thread = threading.Thread(target=log_reader_thread, args=(LOG_FILE,), daemon=True)
+ log_thread = threading.Thread(
+ target=log_reader_thread, args=(LOG_FILE,), daemon=True
+ )
log_thread.start()
logging.info("日志读取线程已启动")
-
+
# 初始化.env文件(如果不存在)
init_env_file()
app = create_ui()
-
+
# 注册应用关闭时的清理函数
def cleanup():
global STOP_LOG_THREAD, STOP_REQUESTED
STOP_LOG_THREAD.set()
STOP_REQUESTED.set()
logging.info("应用程序关闭,停止日志线程")
- app.queue()
- app.launch(share=False,server_name="127.0.0.1",server_port=7860)
+
+ app.queue()
+ app.launch(share=False, server_name="127.0.0.1", server_port=7860)
except Exception as e:
logging.error(f"启动应用程序时发生错误: {str(e)}")
print(f"启动应用程序时发生错误: {str(e)}")
import traceback
+
traceback.print_exc()
-
+
finally:
# 确保日志线程停止
STOP_LOG_THREAD.set()
STOP_REQUESTED.set()
logging.info("应用程序关闭")
+
if __name__ == "__main__":
- main()
\ No newline at end of file
+ main()
diff --git a/pyproject.toml b/pyproject.toml
index 2fa6908..b69c1f4 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -21,7 +21,7 @@ keywords = [
"learning-systems"
]
dependencies = [
- "camel-ai[all]==0.2.27",
+ "camel-ai[all]==0.2.30",
"chunkr-ai>=0.0.41",
"docx2markdown>=0.1.1",
"gradio>=3.50.2",
diff --git a/requirements.txt b/requirements.txt
index d73c9c6..056bbac 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
-camel-ai[all]==0.2.27
+camel-ai[all]==0.2.30
chunkr-ai>=0.0.41
docx2markdown>=0.1.1
gradio>=3.50.2
diff --git a/uv.lock b/uv.lock
index 5bb2dee..d0990b6 100644
--- a/uv.lock
+++ b/uv.lock
@@ -482,7 +482,7 @@ wheels = [
[[package]]
name = "camel-ai"
-version = "0.2.27"
+version = "0.2.30"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "colorama" },
@@ -499,9 +499,9 @@ dependencies = [
{ name = "pyyaml" },
{ name = "tiktoken" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/ff/27/2bce666ae7f7d0db276d037b3afe84a460e782438e5cacc08de20417233b/camel_ai-0.2.27.tar.gz", hash = "sha256:4689245ad48f51e5e602d2651cf463afe212bcf046633a19c2189574c1f3481a", size = 441363 }
+sdist = { url = "https://files.pythonhosted.org/packages/ef/86/57cbcae86d2d60dab0aad31b5302525c75f45ff5edc3c3819a378fa9e12c/camel_ai-0.2.30.tar.gz", hash = "sha256:e1639376e70e9cf1477eca88d1bdc1813855cbd1db683528e1f93027b6aa0b0a", size = 442842 }
wheels = [
- { url = "https://files.pythonhosted.org/packages/b0/fa/94f5b41cb6babc81aac00494b170ec2bea058b6c00f477ceb3e886c49177/camel_ai-0.2.27-py3-none-any.whl", hash = "sha256:c4a6597791faf2f2161c56c2579e60850557b126135b29af77ebd08fa0774e0b", size = 746387 },
+ { url = "https://files.pythonhosted.org/packages/85/fe/8f1d17896aedbc9e0dfa1bff40d560e5a6808d9b727e04c293be6be5954f/camel_ai-0.2.30-py3-none-any.whl", hash = "sha256:e09eec860331cdb4da4e49f46f5d45345a81820c5847556fdf9e7827dd9bbfa9", size = 752672 },
]
[package.optional-dependencies]
@@ -3575,7 +3575,7 @@ dependencies = [
[package.metadata]
requires-dist = [
- { name = "camel-ai", extras = ["all"], specifier = "==0.2.27" },
+ { name = "camel-ai", extras = ["all"], specifier = "==0.2.30" },
{ name = "chunkr-ai", specifier = ">=0.0.41" },
{ name = "docx2markdown", specifier = ">=0.1.1" },
{ name = "gradio", specifier = ">=3.50.2" },
From 05aebff0282717b174c7e6b83101cfef712eb571 Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 12:24:31 +0800
Subject: [PATCH 30/38] update wendong
---
README.md | 32 +-
README_zh.md | 30 +-
{owl/examples => examples}/run.py | 0
{owl => examples}/run_azure_openai.py | 0
{owl/examples => examples}/run_deepseek_zh.py | 0
.../run_gaia_roleplaying.py | 0
{owl => examples}/run_mcp.py | 0
{owl/examples => examples}/run_mini.py | 0
{owl/examples => examples}/run_ollama.py | 0
.../run_openai_compatiable_model.py | 0
.../examples => examples}/run_qwen_mini_zh.py | 0
{owl/examples => examples}/run_qwen_zh.py | 0
{owl/examples => examples}/run_terminal.py | 0
{owl/examples => examples}/run_terminal_zh.py | 1 -
owl/.env_template | 16 +-
owl/utils/enhanced_role_playing.py | 4 +
owl/webapp.py | 1316 +++++++++++++++++
owl/webapp_zh.py | 114 +-
18 files changed, 1398 insertions(+), 115 deletions(-)
rename {owl/examples => examples}/run.py (100%)
rename {owl => examples}/run_azure_openai.py (100%)
rename {owl/examples => examples}/run_deepseek_zh.py (100%)
rename {owl/examples => examples}/run_gaia_roleplaying.py (100%)
rename {owl => examples}/run_mcp.py (100%)
rename {owl/examples => examples}/run_mini.py (100%)
rename {owl/examples => examples}/run_ollama.py (100%)
rename {owl/examples => examples}/run_openai_compatiable_model.py (100%)
rename {owl/examples => examples}/run_qwen_mini_zh.py (100%)
rename {owl/examples => examples}/run_qwen_zh.py (100%)
rename {owl/examples => examples}/run_terminal.py (100%)
rename {owl/examples => examples}/run_terminal_zh.py (99%)
create mode 100644 owl/webapp.py
diff --git a/README.md b/README.md
index d26b1ac..8e563a7 100644
--- a/README.md
+++ b/README.md
@@ -224,7 +224,7 @@ OWL requires various API keys to interact with different services. The `owl/.env
2. **Configure Your API Keys**:
Open the `.env` file in your preferred text editor and insert your API keys in the corresponding fields.
- > **Note**: For the minimal example (`run_mini.py`), you only need to configure the LLM API key (e.g., `OPENAI_API_KEY`).
+ > **Note**: For the minimal example (`examples/run_mini.py`), you only need to configure the LLM API key (e.g., `OPENAI_API_KEY`).
### Option 2: Setting Environment Variables Directly
@@ -275,7 +275,7 @@ cd .. && source .venv/bin/activate && cd owl
playwright install-deps
#run example demo script
-xvfb-python run.py
+xvfb-python examples/run.py
# Option 2: Build and run using the provided scripts
cd .container
@@ -299,17 +299,17 @@ npx -y @smithery/cli install @wonderwhy-er/desktop-commander --client claude
npx @wonderwhy-er/desktop-commander setup
# Run the MCP example
-python owl/run_mcp.py
+python examples/run_mcp.py
```
-This example showcases how OWL agents can seamlessly interact with file systems, web automation, and information retrieval through the MCP protocol. Check out `owl/run_mcp.py` for the full implementation.
+This example showcases how OWL agents can seamlessly interact with file systems, web automation, and information retrieval through the MCP protocol. Check out `examples/run_mcp.py` for the full implementation.
## Basic Usage
After installation and setting up your environment variables, you can start using OWL right away:
```bash
-python owl/run.py
+python examples/run.py
```
## Running with Different Models
@@ -330,28 +330,28 @@ OWL supports various LLM backends, though capabilities may vary depending on the
```bash
# Run with Qwen model
-python owl/examples/run_qwen_zh.py
+python examples/run_qwen_zh.py
# Run with Deepseek model
-python owl/examples/run_deepseek_zh.py
+python examples/run_deepseek_zh.py
# Run with other OpenAI-compatible models
-python owl/examples/run_openai_compatiable_model.py
+python examples/run_openai_compatiable_model.py
# Run with Azure OpenAI
-python owl/run_azure_openai.py
+python examples/run_azure_openai.py
# Run with Ollama
-python owl/examples/run_ollama.py
+python examples/run_ollama.py
```
For a simpler version that only requires an LLM API key, you can try our minimal example:
```bash
-python owl/examples/run_mini.py
+python examples/run_mini.py
```
-You can run OWL agent with your own task by modifying the `run.py` script:
+You can run OWL agent with your own task by modifying the `examples/run.py` script:
```python
# Define your own task
@@ -393,7 +393,7 @@ Here are some tasks you can try with OWL:
OWL's MCP integration provides a standardized way for AI models to interact with various tools and data sources:
-Try our comprehensive MCP example in `owl/run_mcp.py` to see these capabilities in action!
+Try our comprehensive MCP example in `examples/run_mcp.py` to see these capabilities in action!
## Available Toolkits
@@ -464,10 +464,10 @@ OWL includes an intuitive web-based user interface that makes it easier to inter
```bash
# Start the Chinese version
-python owl/webapp_zh.py
+python examples/webapp_zh.py
# Start the English version
-python owl/webapp.py
+python examples/webapp.py
```
## Features
@@ -545,7 +545,7 @@ Join us ([*Discord*](https://discord.camel-ai.org/) or [*WeChat*](https://ghli.o
Join us for further discussions!
-
+
# ❓ FAQ
diff --git a/README_zh.md b/README_zh.md
index da0622f..b15c114 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -219,7 +219,7 @@ OWL 需要各种 API 密钥来与不同的服务进行交互。`owl/.env_templat
2. **配置你的 API 密钥**:
在你喜欢的文本编辑器中打开 `.env` 文件,并在相应字段中插入你的 API 密钥。
- > **注意**:对于最小示例(`run_mini.py`),你只需要配置 LLM API 密钥(例如,`OPENAI_API_KEY`)。
+ > **注意**:对于最小示例(`examples/run_mini.py`),你只需要配置 LLM API 密钥(例如,`OPENAI_API_KEY`)。
### 选项 2:直接设置环境变量
@@ -269,7 +269,7 @@ cd .. && source .venv/bin/activate && cd owl
playwright install-deps
#运行例子演示脚本
-xvfb-python run.py
+xvfb-python examples/run.py
# 选项2:使用提供的脚本构建和运行
cd .container
@@ -293,23 +293,23 @@ npx -y @smithery/cli install @wonderwhy-er/desktop-commander --client claude
npx @wonderwhy-er/desktop-commander setup
# 运行 MCP 示例
-python owl/run_mcp.py
+python examples/run_mcp.py
```
-这个示例展示了 OWL 智能体如何通过 MCP 协议无缝地与文件系统、网页自动化和信息检索进行交互。查看 `owl/run_mcp.py` 了解完整实现。
+这个示例展示了 OWL 智能体如何通过 MCP 协议无缝地与文件系统、网页自动化和信息检索进行交互。查看 `examples/run_mcp.py` 了解完整实现。
## 基本用法
运行以下示例:
```bash
-python owl/run.py
+python examples/run.py
```
我们还提供了一个最小化示例,只需配置LLM的API密钥即可运行:
```bash
-python owl/run_mini.py
+python examples/run_mini.py
```
## 使用不同的模型
@@ -330,22 +330,22 @@ OWL 支持多种 LLM 后端,但功能可能因模型的工具调用和多模
```bash
# 使用 Qwen 模型运行
-python owl/examples/run_qwen_zh.py
+python examples/run_qwen_zh.py
# 使用 Deepseek 模型运行
-python owl/examples/run_deepseek_zh.py
+python examples/run_deepseek_zh.py
# 使用其他 OpenAI 兼容模型运行
-python owl/examples/run_openai_compatiable_model.py
+python examples/run_openai_compatiable_model.py
# 使用 Azure OpenAI模型运行
-python owl/run_azure_openai.py
+python examples/run_azure_openai.py
# 使用 Ollama 运行
-python owl/examples/run_ollama.py
+python examples/run_ollama.py
```
-你可以通过修改 `run.py` 脚本来运行自己的任务:
+你可以通过修改 `examples/run.py` 脚本来运行自己的任务:
```python
# Define your own task
@@ -383,7 +383,7 @@ OWL 将自动调用与文档相关的工具来处理文件并提取答案。
OWL 的 MCP 集成为 AI 模型与各种工具和数据源的交互提供了标准化的方式。
-查看我们的综合示例 `owl/run_mcp.py` 来体验这些功能!
+查看我们的综合示例 `examples/run_mcp.py` 来体验这些功能!
## 可用工具包
@@ -479,7 +479,7 @@ git checkout gaia58.18
2. 运行评估脚本:
```bash
-python run_gaia_roleplaying.py
+python examples/run_gaia_roleplaying.py
```
# ⏱️ 未来计划
@@ -531,7 +531,7 @@ python run_gaia_roleplaying.py
加入我们,参与更多讨论!
-
+
# ❓ 常见问题
diff --git a/owl/examples/run.py b/examples/run.py
similarity index 100%
rename from owl/examples/run.py
rename to examples/run.py
diff --git a/owl/run_azure_openai.py b/examples/run_azure_openai.py
similarity index 100%
rename from owl/run_azure_openai.py
rename to examples/run_azure_openai.py
diff --git a/owl/examples/run_deepseek_zh.py b/examples/run_deepseek_zh.py
similarity index 100%
rename from owl/examples/run_deepseek_zh.py
rename to examples/run_deepseek_zh.py
diff --git a/owl/examples/run_gaia_roleplaying.py b/examples/run_gaia_roleplaying.py
similarity index 100%
rename from owl/examples/run_gaia_roleplaying.py
rename to examples/run_gaia_roleplaying.py
diff --git a/owl/run_mcp.py b/examples/run_mcp.py
similarity index 100%
rename from owl/run_mcp.py
rename to examples/run_mcp.py
diff --git a/owl/examples/run_mini.py b/examples/run_mini.py
similarity index 100%
rename from owl/examples/run_mini.py
rename to examples/run_mini.py
diff --git a/owl/examples/run_ollama.py b/examples/run_ollama.py
similarity index 100%
rename from owl/examples/run_ollama.py
rename to examples/run_ollama.py
diff --git a/owl/examples/run_openai_compatiable_model.py b/examples/run_openai_compatiable_model.py
similarity index 100%
rename from owl/examples/run_openai_compatiable_model.py
rename to examples/run_openai_compatiable_model.py
diff --git a/owl/examples/run_qwen_mini_zh.py b/examples/run_qwen_mini_zh.py
similarity index 100%
rename from owl/examples/run_qwen_mini_zh.py
rename to examples/run_qwen_mini_zh.py
diff --git a/owl/examples/run_qwen_zh.py b/examples/run_qwen_zh.py
similarity index 100%
rename from owl/examples/run_qwen_zh.py
rename to examples/run_qwen_zh.py
diff --git a/owl/examples/run_terminal.py b/examples/run_terminal.py
similarity index 100%
rename from owl/examples/run_terminal.py
rename to examples/run_terminal.py
diff --git a/owl/examples/run_terminal_zh.py b/examples/run_terminal_zh.py
similarity index 99%
rename from owl/examples/run_terminal_zh.py
rename to examples/run_terminal_zh.py
index 2174bd1..f0a290d 100644
--- a/owl/examples/run_terminal_zh.py
+++ b/examples/run_terminal_zh.py
@@ -25,7 +25,6 @@ from camel.logger import set_log_level
from owl.utils import run_society
from camel.societies import RolePlaying
-import os
load_dotenv()
set_log_level(level="DEBUG")
diff --git a/owl/.env_template b/owl/.env_template
index 9e4328f..c30f21e 100644
--- a/owl/.env_template
+++ b/owl/.env_template
@@ -4,7 +4,7 @@
#===========================================
# OPENAI API (https://platform.openai.com/api-keys)
-# OPENAI_API_KEY= ""
+OPENAI_API_KEY='Your_Key'
# OPENAI_API_BASE_URL=""
# Azure OpenAI API
@@ -15,22 +15,22 @@
# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
-# QWEN_API_KEY=""
+QWEN_API_KEY='Your_Key'
# DeepSeek API (https://platform.deepseek.com/api_keys)
-# DEEPSEEK_API_KEY=""
+DEEPSEEK_API_KEY='Your_Key'
#===========================================
# Tools & Services API
#===========================================
-# Google Search API (https://developers.google.com/custom-search/v1/overview)
-# GOOGLE_API_KEY=""
-# SEARCH_ENGINE_ID=""
+# Google Search API (https://coda.io/@jon-dallas/google-image-search-pack-example/search-engine-id-and-google-api-key-3)
+GOOGLE_API_KEY='Your_Key'
+SEARCH_ENGINE_ID='Your_ID'
# Chunkr API (https://chunkr.ai/)
-# CHUNKR_API_KEY=""
+CHUNKR_API_KEY='Your_Key'
# Firecrawl API (https://www.firecrawl.dev/)
-#FIRECRAWL_API_KEY=""
+FIRECRAWL_API_KEY='Your_Key'
#FIRECRAWL_API_URL="https://api.firecrawl.dev"
\ No newline at end of file
diff --git a/owl/utils/enhanced_role_playing.py b/owl/utils/enhanced_role_playing.py
index d50b337..76bf757 100644
--- a/owl/utils/enhanced_role_playing.py
+++ b/owl/utils/enhanced_role_playing.py
@@ -461,6 +461,10 @@ def run_society(
assistant_response.info["usage"]["completion_tokens"]
+ user_response.info["usage"]["completion_tokens"]
)
+ overall_prompt_token_count += (
+ assistant_response.info["usage"]["prompt_tokens"]
+ + user_response.info["usage"]["prompt_tokens"]
+ )
# convert tool call to dict
tool_call_records: List[dict] = []
diff --git a/owl/webapp.py b/owl/webapp.py
new file mode 100644
index 0000000..52b26c6
--- /dev/null
+++ b/owl/webapp.py
@@ -0,0 +1,1316 @@
+# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
+# Import from the correct module path
+from owl.utils import run_society
+import os
+import gradio as gr
+import time
+import json
+import logging
+import datetime
+from typing import Tuple
+import importlib
+from dotenv import load_dotenv, set_key, find_dotenv, unset_key
+import threading
+import queue
+import re # For regular expression operations
+
+os.environ["PYTHONIOENCODING"] = "utf-8"
+
+
+# Configure logging system
+def setup_logging():
+ """Configure logging system to output logs to file, memory queue, and console"""
+ # Create logs directory (if it doesn't exist)
+ logs_dir = os.path.join(os.path.dirname(__file__), "logs")
+ os.makedirs(logs_dir, exist_ok=True)
+
+ # Generate log filename (using current date)
+ current_date = datetime.datetime.now().strftime("%Y-%m-%d")
+ log_file = os.path.join(logs_dir, f"gradio_log_{current_date}.txt")
+
+ # Configure root logger (captures all logs)
+ root_logger = logging.getLogger()
+
+ # Clear existing handlers to avoid duplicate logs
+ for handler in root_logger.handlers[:]:
+ root_logger.removeHandler(handler)
+
+ root_logger.setLevel(logging.INFO)
+
+ # Create file handler
+ file_handler = logging.FileHandler(log_file, encoding="utf-8", mode="a")
+ file_handler.setLevel(logging.INFO)
+
+ # Create console handler
+ console_handler = logging.StreamHandler()
+ console_handler.setLevel(logging.INFO)
+
+ # Create formatter
+ formatter = logging.Formatter(
+ "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+ )
+ file_handler.setFormatter(formatter)
+ console_handler.setFormatter(formatter)
+
+ # Add handlers to root logger
+ root_logger.addHandler(file_handler)
+ root_logger.addHandler(console_handler)
+
+ logging.info("Logging system initialized, log file: %s", log_file)
+ return log_file
+
+
+# Global variables
+LOG_FILE = None
+LOG_QUEUE: queue.Queue = queue.Queue() # Log queue
+STOP_LOG_THREAD = threading.Event()
+CURRENT_PROCESS = None # Used to track the currently running process
+STOP_REQUESTED = threading.Event() # Used to mark if stop was requested
+
+
+# Log reading and updating functions
+def log_reader_thread(log_file):
+ """Background thread that continuously reads the log file and adds new lines to the queue"""
+ try:
+ with open(log_file, "r", encoding="utf-8") as f:
+ # Move to the end of file
+ f.seek(0, 2)
+
+ while not STOP_LOG_THREAD.is_set():
+ line = f.readline()
+ if line:
+ LOG_QUEUE.put(line) # Add to conversation record queue
+ else:
+ # No new lines, wait for a short time
+ time.sleep(0.1)
+ except Exception as e:
+ logging.error(f"Log reader thread error: {str(e)}")
+
+
+def get_latest_logs(max_lines=100, queue_source=None):
+ """Get the latest log lines from the queue, or read directly from the file if the queue is empty
+
+ Args:
+ max_lines: Maximum number of lines to return
+ queue_source: Specify which queue to use, default is LOG_QUEUE
+
+ Returns:
+ str: Log content
+ """
+ logs = []
+ log_queue = queue_source if queue_source else LOG_QUEUE
+
+ # Create a temporary queue to store logs so we can process them without removing them from the original queue
+ temp_queue = queue.Queue()
+ temp_logs = []
+
+ try:
+ # Try to get all available log lines from the queue
+ while not log_queue.empty() and len(temp_logs) < max_lines:
+ log = log_queue.get_nowait()
+ temp_logs.append(log)
+ temp_queue.put(log) # Put the log back into the temporary queue
+ except queue.Empty:
+ pass
+
+ # Process conversation records
+ logs = temp_logs
+
+ # If there are no new logs or not enough logs, try to read the last few lines directly from the file
+ if len(logs) < max_lines and LOG_FILE and os.path.exists(LOG_FILE):
+ try:
+ with open(LOG_FILE, "r", encoding="utf-8") as f:
+ all_lines = f.readlines()
+ # If there are already some logs in the queue, only read the remaining needed lines
+ remaining_lines = max_lines - len(logs)
+ file_logs = (
+ all_lines[-remaining_lines:]
+ if len(all_lines) > remaining_lines
+ else all_lines
+ )
+
+ # Add file logs before queue logs
+ logs = file_logs + logs
+ except Exception as e:
+ error_msg = f"Error reading log file: {str(e)}"
+ logging.error(error_msg)
+ if not logs: # Only add error message if there are no logs
+ logs = [error_msg]
+
+ # If there are still no logs, return a prompt message
+ if not logs:
+ return "Initialization in progress..."
+
+ # Filter logs, only keep logs with 'camel.agents.chat_agent - INFO'
+ filtered_logs = []
+ for log in logs:
+ if "camel.agents.chat_agent - INFO" in log:
+ filtered_logs.append(log)
+
+ # If there are no logs after filtering, return a prompt message
+ if not filtered_logs:
+ return "No conversation records yet."
+
+ # Process log content, extract the latest user and assistant messages
+ simplified_logs = []
+
+ # Use a set to track messages that have already been processed, to avoid duplicates
+ processed_messages = set()
+
+ def process_message(role, content):
+ # 创建一个唯一标识符来跟踪消息
+ msg_id = f"{role}:{content}"
+ if msg_id in processed_messages:
+ return None
+
+ processed_messages.add(msg_id)
+ content = content.replace("\\n", "\n")
+ lines = [line.strip() for line in content.split("\n")]
+ content = "\n".join(lines)
+
+ return f"[{role.title()} Agent]: {content}"
+
+ for log in filtered_logs:
+ formatted_messages = []
+ # 尝试提取消息数组
+ messages_match = re.search(
+ r"Model (.*?), index (\d+), processed these messages: (\[.*\])", log
+ )
+
+ if messages_match:
+ try:
+ messages = json.loads(messages_match.group(3))
+ for msg in messages:
+ if msg.get("role") in ["user", "assistant"]:
+ formatted_msg = process_message(
+ msg.get("role"), msg.get("content", "")
+ )
+ if formatted_msg:
+ formatted_messages.append(formatted_msg)
+ except json.JSONDecodeError:
+ pass
+
+ # If JSON parsing fails or no message array is found, try to extract conversation content directly
+ if not formatted_messages:
+ user_pattern = re.compile(r"\{'role': 'user', 'content': '(.*?)'\}")
+ assistant_pattern = re.compile(
+ r"\{'role': 'assistant', 'content': '(.*?)'\}"
+ )
+
+ for content in user_pattern.findall(log):
+ formatted_msg = process_message("user", content)
+ if formatted_msg:
+ formatted_messages.append(formatted_msg)
+
+ for content in assistant_pattern.findall(log):
+ formatted_msg = process_message("assistant", content)
+ if formatted_msg:
+ formatted_messages.append(formatted_msg)
+
+ if formatted_messages:
+ simplified_logs.append("\n\n".join(formatted_messages))
+
+ # Format log output, ensure appropriate separation between each conversation record
+ formatted_logs = []
+ for i, log in enumerate(simplified_logs):
+ # Remove excess whitespace characters from beginning and end
+ log = log.strip()
+
+ formatted_logs.append(log)
+
+ # Ensure each conversation record ends with a newline
+ if not log.endswith("\n"):
+ formatted_logs.append("\n")
+
+ return "".join(formatted_logs)
+
+
+# Dictionary containing module descriptions
+MODULE_DESCRIPTIONS = {
+ "run": "Default mode: Using OpenAI model's default agent collaboration mode, suitable for most tasks.",
+ "run_mini": "Using OpenAI model with minimal configuration to process tasks",
+ "run_deepseek_zh": "Using deepseek model to process Chinese tasks",
+ "run_openai_compatiable_model": "Using openai compatible model to process tasks",
+ "run_ollama": "Using local ollama model to process tasks",
+ "run_qwen_mini_zh": "Using qwen model with minimal configuration to process tasks",
+ "run_qwen_zh": "Using qwen model to process tasks",
+}
+
+
+# Default environment variable template
+DEFAULT_ENV_TEMPLATE = """#===========================================
+# MODEL & API
+# (See https://docs.camel-ai.org/key_modules/models.html#)
+#===========================================
+
+# OPENAI API (https://platform.openai.com/api-keys)
+OPENAI_API_KEY='Your_Key'
+# OPENAI_API_BASE_URL=""
+
+# Azure OpenAI API
+# AZURE_OPENAI_BASE_URL=""
+# AZURE_API_VERSION=""
+# AZURE_OPENAI_API_KEY=""
+# AZURE_DEPLOYMENT_NAME=""
+
+
+# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
+QWEN_API_KEY='Your_Key'
+
+# DeepSeek API (https://platform.deepseek.com/api_keys)
+DEEPSEEK_API_KEY='Your_Key'
+
+#===========================================
+# Tools & Services API
+#===========================================
+
+# Google Search API (https://coda.io/@jon-dallas/google-image-search-pack-example/search-engine-id-and-google-api-key-3)
+GOOGLE_API_KEY='Your_Key'
+SEARCH_ENGINE_ID='Your_ID'
+
+# Chunkr API (https://chunkr.ai/)
+CHUNKR_API_KEY='Your_Key'
+
+# Firecrawl API (https://www.firecrawl.dev/)
+FIRECRAWL_API_KEY='Your_Key'
+#FIRECRAWL_API_URL="https://api.firecrawl.dev"
+"""
+
+
+def validate_input(question: str) -> bool:
+ """Validate if user input is valid
+
+ Args:
+ question: User question
+
+ Returns:
+ bool: Whether the input is valid
+ """
+ # Check if input is empty or contains only spaces
+ if not question or question.strip() == "":
+ return False
+ return True
+
+
+def run_owl(question: str, example_module: str) -> Tuple[str, str, str]:
+ """Run the OWL system and return results
+
+ Args:
+ question: User question
+ example_module: Example module name to import (e.g., "run_terminal_zh" or "run_deep")
+
+ Returns:
+ Tuple[...]: Answer, token count, status
+ """
+ global CURRENT_PROCESS
+
+ # Validate input
+ if not validate_input(question):
+ logging.warning("User submitted invalid input")
+ return (
+ "Please enter a valid question",
+ "0",
+ "❌ Error: Invalid input question",
+ )
+
+ try:
+ # Ensure environment variables are loaded
+ load_dotenv(find_dotenv(), override=True)
+ logging.info(
+ f"Processing question: '{question}', using module: {example_module}"
+ )
+
+ # Check if the module is in MODULE_DESCRIPTIONS
+ if example_module not in MODULE_DESCRIPTIONS:
+ logging.error(f"User selected an unsupported module: {example_module}")
+ return (
+ f"Selected module '{example_module}' is not supported",
+ "0",
+ "❌ Error: Unsupported module",
+ )
+
+ # Dynamically import target module
+ module_path = f"examples.{example_module}"
+ try:
+ logging.info(f"Importing module: {module_path}")
+ module = importlib.import_module(module_path)
+ except ImportError as ie:
+ logging.error(f"Unable to import module {module_path}: {str(ie)}")
+ return (
+ f"Unable to import module: {module_path}",
+ "0",
+ f"❌ Error: Module {example_module} does not exist or cannot be loaded - {str(ie)}",
+ )
+ except Exception as e:
+ logging.error(
+ f"Error occurred while importing module {module_path}: {str(e)}"
+ )
+ return (
+ f"Error occurred while importing module: {module_path}",
+ "0",
+ f"❌ Error: {str(e)}",
+ )
+
+ # Check if it contains the construct_society function
+ if not hasattr(module, "construct_society"):
+ logging.error(
+ f"construct_society function not found in module {module_path}"
+ )
+ return (
+ f"construct_society function not found in module {module_path}",
+ "0",
+ "❌ Error: Module interface incompatible",
+ )
+
+ # Build society simulation
+ try:
+ logging.info("Building society simulation...")
+ society = module.construct_society(question)
+
+ except Exception as e:
+ logging.error(f"Error occurred while building society simulation: {str(e)}")
+ return (
+ f"Error occurred while building society simulation: {str(e)}",
+ "0",
+ f"❌ Error: Build failed - {str(e)}",
+ )
+
+ # Run society simulation
+ try:
+ logging.info("Running society simulation...")
+ answer, chat_history, token_info = run_society(society)
+ logging.info("Society simulation completed")
+ except Exception as e:
+ logging.error(f"Error occurred while running society simulation: {str(e)}")
+ return (
+ f"Error occurred while running society simulation: {str(e)}",
+ "0",
+ f"❌ Error: Run failed - {str(e)}",
+ )
+
+ # Safely get token count
+ if not isinstance(token_info, dict):
+ token_info = {}
+
+ completion_tokens = token_info.get("completion_token_count", 0)
+ prompt_tokens = token_info.get("prompt_token_count", 0)
+ total_tokens = completion_tokens + prompt_tokens
+
+ logging.info(
+ f"Processing completed, token usage: completion={completion_tokens}, prompt={prompt_tokens}, total={total_tokens}"
+ )
+
+ return (
+ answer,
+ f"Completion tokens: {completion_tokens:,} | Prompt tokens: {prompt_tokens:,} | Total: {total_tokens:,}",
+ "✅ Successfully completed",
+ )
+
+ except Exception as e:
+ logging.error(
+ f"Uncaught error occurred while processing the question: {str(e)}"
+ )
+ return (f"Error occurred: {str(e)}", "0", f"❌ Error: {str(e)}")
+
+
+def update_module_description(module_name: str) -> str:
+ """Return the description of the selected module"""
+ return MODULE_DESCRIPTIONS.get(module_name, "No description available")
+
+
+# Store environment variables configured from the frontend
+WEB_FRONTEND_ENV_VARS: dict[str, str] = {}
+
+
+def init_env_file():
+ """Initialize .env file if it doesn't exist"""
+ dotenv_path = find_dotenv()
+ if not dotenv_path:
+ with open(".env", "w") as f:
+ f.write(DEFAULT_ENV_TEMPLATE)
+ dotenv_path = find_dotenv()
+ return dotenv_path
+
+
+def load_env_vars():
+ """Load environment variables and return as dictionary format
+
+ Returns:
+ dict: Environment variable dictionary, each value is a tuple containing value and source (value, source)
+ """
+ dotenv_path = init_env_file()
+ load_dotenv(dotenv_path, override=True)
+
+ # Read environment variables from .env file
+ env_file_vars = {}
+ with open(dotenv_path, "r") as f:
+ for line in f:
+ line = line.strip()
+ if line and not line.startswith("#"):
+ if "=" in line:
+ key, value = line.split("=", 1)
+ env_file_vars[key.strip()] = value.strip().strip("\"'")
+
+ # Get from system environment variables
+ system_env_vars = {
+ k: v
+ for k, v in os.environ.items()
+ if k not in env_file_vars and k not in WEB_FRONTEND_ENV_VARS
+ }
+
+ # Merge environment variables and mark sources
+ env_vars = {}
+
+ # Add system environment variables (lowest priority)
+ for key, value in system_env_vars.items():
+ env_vars[key] = (value, "System")
+
+ # Add .env file environment variables (medium priority)
+ for key, value in env_file_vars.items():
+ env_vars[key] = (value, ".env file")
+
+ # Add frontend configured environment variables (highest priority)
+ for key, value in WEB_FRONTEND_ENV_VARS.items():
+ env_vars[key] = (value, "Frontend configuration")
+ # Ensure operating system environment variables are also updated
+ os.environ[key] = value
+
+ return env_vars
+
+
+def save_env_vars(env_vars):
+ """Save environment variables to .env file
+
+ Args:
+ env_vars: Dictionary, keys are environment variable names, values can be strings or (value, source) tuples
+ """
+ try:
+ dotenv_path = init_env_file()
+
+ # Save each environment variable
+ for key, value_data in env_vars.items():
+ if key and key.strip(): # Ensure key is not empty
+ # Handle case where value might be a tuple
+ if isinstance(value_data, tuple):
+ value = value_data[0]
+ else:
+ value = value_data
+
+ set_key(dotenv_path, key.strip(), value.strip())
+
+ # Reload environment variables to ensure they take effect
+ load_dotenv(dotenv_path, override=True)
+
+ return True, "Environment variables have been successfully saved!"
+ except Exception as e:
+ return False, f"Error saving environment variables: {str(e)}"
+
+
+def add_env_var(key, value, from_frontend=True):
+ """Add or update a single environment variable
+
+ Args:
+ key: Environment variable name
+ value: Environment variable value
+ from_frontend: Whether it's from frontend configuration, default is True
+ """
+ try:
+ if not key or not key.strip():
+ return False, "Variable name cannot be empty"
+
+ key = key.strip()
+ value = value.strip()
+
+ # If from frontend, add to frontend environment variable dictionary
+ if from_frontend:
+ WEB_FRONTEND_ENV_VARS[key] = value
+ # Directly update system environment variables
+ os.environ[key] = value
+
+ # Also update .env file
+ dotenv_path = init_env_file()
+ set_key(dotenv_path, key, value)
+ load_dotenv(dotenv_path, override=True)
+
+ return True, f"Environment variable {key} has been successfully added/updated!"
+ except Exception as e:
+ return False, f"Error adding environment variable: {str(e)}"
+
+
+def delete_env_var(key):
+ """Delete environment variable"""
+ try:
+ if not key or not key.strip():
+ return False, "Variable name cannot be empty"
+
+ key = key.strip()
+
+ # Delete from .env file
+ dotenv_path = init_env_file()
+ unset_key(dotenv_path, key)
+
+ # Delete from frontend environment variable dictionary
+ if key in WEB_FRONTEND_ENV_VARS:
+ del WEB_FRONTEND_ENV_VARS[key]
+
+ # Also delete from current process environment
+ if key in os.environ:
+ del os.environ[key]
+
+ return True, f"Environment variable {key} has been successfully deleted!"
+ except Exception as e:
+ return False, f"Error deleting environment variable: {str(e)}"
+
+
+def is_api_related(key: str) -> bool:
+ """Determine if an environment variable is API-related
+
+ Args:
+ key: Environment variable name
+
+ Returns:
+ bool: Whether it's API-related
+ """
+ # API-related keywords
+ api_keywords = [
+ "api",
+ "key",
+ "token",
+ "secret",
+ "password",
+ "openai",
+ "qwen",
+ "deepseek",
+ "google",
+ "search",
+ "hf",
+ "hugging",
+ "chunkr",
+ "firecrawl",
+ ]
+
+ # Check if it contains API-related keywords (case insensitive)
+ return any(keyword in key.lower() for keyword in api_keywords)
+
+
+def get_api_guide(key: str) -> str:
+ """Return the corresponding API guide based on the environment variable name
+
+ Args:
+ key: Environment variable name
+
+ Returns:
+ str: API guide link or description
+ """
+ key_lower = key.lower()
+ if "openai" in key_lower:
+ return "https://platform.openai.com/api-keys"
+ elif "qwen" in key_lower or "dashscope" in key_lower:
+ return "https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key"
+ elif "deepseek" in key_lower:
+ return "https://platform.deepseek.com/api_keys"
+ elif "google" in key_lower:
+ return "https://coda.io/@jon-dallas/google-image-search-pack-example/search-engine-id-and-google-api-key-3"
+ elif "search_engine_id" in key_lower:
+ return "https://coda.io/@jon-dallas/google-image-search-pack-example/search-engine-id-and-google-api-key-3"
+ elif "chunkr" in key_lower:
+ return "https://chunkr.ai/"
+ elif "firecrawl" in key_lower:
+ return "https://www.firecrawl.dev/"
+ else:
+ return ""
+
+
+def update_env_table():
+ """Update environment variable table display, only showing API-related environment variables"""
+ env_vars = load_env_vars()
+ # Filter out API-related environment variables
+ api_env_vars = {k: v for k, v in env_vars.items() if is_api_related(k)}
+ # Convert to list format to meet Gradio Dataframe requirements
+ # Format: [Variable name, Variable value, Guide link]
+ result = []
+ for k, v in api_env_vars.items():
+ guide = get_api_guide(k)
+ # If there's a guide link, create a clickable link
+ guide_link = (
+ f"🔗 Get"
+ if guide
+ else ""
+ )
+ result.append([k, v[0], guide_link])
+ return result
+
+
+def save_env_table_changes(data):
+ """Save changes to the environment variable table
+
+ Args:
+ data: Dataframe data, possibly a pandas DataFrame object
+
+ Returns:
+ str: Operation status information, containing HTML-formatted status message
+ """
+ try:
+ logging.info(
+ f"Starting to process environment variable table data, type: {type(data)}"
+ )
+
+ # Get all current environment variables
+ current_env_vars = load_env_vars()
+ processed_keys = set() # Record processed keys to detect deleted variables
+
+ # 处理pandas DataFrame对象
+ import pandas as pd
+
+ if isinstance(data, pd.DataFrame):
+ # Get column name information
+ columns = data.columns.tolist()
+ logging.info(f"DataFrame column names: {columns}")
+
+ # Iterate through each row of the DataFrame
+ for index, row in data.iterrows():
+ # 使用列名访问数据
+ if len(columns) >= 3:
+ # Get variable name and value (column 0 is name, column 1 is value)
+ key = row[0] if isinstance(row, pd.Series) else row.iloc[0]
+ value = row[1] if isinstance(row, pd.Series) else row.iloc[1]
+
+ # Check if it's an empty row or deleted variable
+ if (
+ key and str(key).strip()
+ ): # If key name is not empty, add or update
+ logging.info(
+ f"Processing environment variable: {key} = {value}"
+ )
+ add_env_var(key, str(value))
+ processed_keys.add(key)
+ # 处理其他格式
+ elif isinstance(data, dict):
+ logging.info(f"Dictionary format data keys: {list(data.keys())}")
+ # 如果是字典格式,尝试不同的键
+ if "data" in data:
+ rows = data["data"]
+ elif "values" in data:
+ rows = data["values"]
+ elif "value" in data:
+ rows = data["value"]
+ else:
+ # 尝试直接使用字典作为行数据
+ rows = []
+ for key, value in data.items():
+ if key not in ["headers", "types", "columns"]:
+ rows.append([key, value])
+
+ if isinstance(rows, list):
+ for row in rows:
+ if isinstance(row, list) and len(row) >= 2:
+ key, value = row[0], row[1]
+ if key and str(key).strip():
+ add_env_var(key, str(value))
+ processed_keys.add(key)
+ elif isinstance(data, list):
+ # 列表格式
+ for row in data:
+ if isinstance(row, list) and len(row) >= 2:
+ key, value = row[0], row[1]
+ if key and str(key).strip():
+ add_env_var(key, str(value))
+ processed_keys.add(key)
+ else:
+ logging.error(f"Unknown data format: {type(data)}")
+ return f"❌ Save failed: Unknown data format {type(data)}"
+
+ # Process deleted variables - check if there are variables in current environment not appearing in the table
+ api_related_keys = {k for k in current_env_vars.keys() if is_api_related(k)}
+ keys_to_delete = api_related_keys - processed_keys
+
+ # Delete variables no longer in the table
+ for key in keys_to_delete:
+ logging.info(f"Deleting environment variable: {key}")
+ delete_env_var(key)
+
+ return "✅ Environment variables have been successfully saved"
+ except Exception as e:
+ import traceback
+
+ error_details = traceback.format_exc()
+ logging.error(f"Error saving environment variables: {str(e)}\n{error_details}")
+ return f"❌ Save failed: {str(e)}"
+
+
+def get_env_var_value(key):
+ """Get the actual value of an environment variable
+
+ Priority: Frontend configuration > .env file > System environment variables
+ """
+ # Check frontend configured environment variables
+ if key in WEB_FRONTEND_ENV_VARS:
+ return WEB_FRONTEND_ENV_VARS[key]
+
+ # Check system environment variables (including those loaded from .env)
+ return os.environ.get(key, "")
+
+
+def create_ui():
+ """Create enhanced Gradio interface"""
+
+ # Define conversation record update function
+ def update_logs2():
+ """Get the latest conversation records and return them to the frontend for display"""
+ return get_latest_logs(100, LOG_QUEUE)
+
+ def clear_log_file():
+ """Clear log file content"""
+ try:
+ if LOG_FILE and os.path.exists(LOG_FILE):
+ # Clear log file content instead of deleting the file
+ open(LOG_FILE, "w").close()
+ logging.info("Log file has been cleared")
+ # Clear log queue
+ while not LOG_QUEUE.empty():
+ try:
+ LOG_QUEUE.get_nowait()
+ except queue.Empty:
+ break
+ return ""
+ else:
+ return ""
+ except Exception as e:
+ logging.error(f"Error clearing log file: {str(e)}")
+ return ""
+
+ # Create a real-time log update function
+ def process_with_live_logs(question, module_name):
+ """Process questions and update logs in real-time"""
+ global CURRENT_PROCESS
+
+ # Clear log file
+ clear_log_file()
+
+ # Create a background thread to process the question
+ result_queue = queue.Queue()
+
+ def process_in_background():
+ try:
+ result = run_owl(question, module_name)
+ result_queue.put(result)
+ except Exception as e:
+ result_queue.put(
+ (f"Error occurred: {str(e)}", "0", f"❌ Error: {str(e)}")
+ )
+
+ # Start background processing thread
+ bg_thread = threading.Thread(target=process_in_background)
+ CURRENT_PROCESS = bg_thread # Record current process
+ bg_thread.start()
+
+ # While waiting for processing to complete, update logs once per second
+ while bg_thread.is_alive():
+ # Update conversation record display
+ logs2 = get_latest_logs(100, LOG_QUEUE)
+
+ # Always update status
+ yield (
+ "0",
+ " Processing...",
+ logs2,
+ )
+
+ time.sleep(1)
+
+ # Processing complete, get results
+ if not result_queue.empty():
+ result = result_queue.get()
+ answer, token_count, status = result
+
+ # Final update of conversation record
+ logs2 = get_latest_logs(100, LOG_QUEUE)
+
+ # Set different indicators based on status
+ if "Error" in status:
+ status_with_indicator = (
+ f" {status}"
+ )
+ else:
+ status_with_indicator = (
+ f" {status}"
+ )
+
+ yield token_count, status_with_indicator, logs2
+ else:
+ logs2 = get_latest_logs(100, LOG_QUEUE)
+ yield (
+ "0",
+ " Terminated",
+ logs2,
+ )
+
+ with gr.Blocks(theme=gr.themes.Soft(primary_hue="blue")) as app:
+ gr.Markdown(
+ """
+ # 🦉 OWL Multi-Agent Collaboration System
+
+ Advanced multi-agent collaboration system developed based on the CAMEL framework, designed to solve complex problems through agent collaboration.
+ Models and tools can be customized by modifying local scripts.
+ This web app is currently in beta development. It is provided for demonstration and testing purposes only and is not yet recommended for production use.
+ """
+ )
+
+ # Add custom CSS
+ gr.HTML("""
+
+ """)
+
+ with gr.Row():
+ with gr.Column(scale=1):
+ question_input = gr.Textbox(
+ lines=5,
+ placeholder="Please enter your question...",
+ label="Question",
+ elem_id="question_input",
+ show_copy_button=True,
+ value="Open Baidu search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file.",
+ )
+
+ # Enhanced module selection dropdown
+ # Only includes modules defined in MODULE_DESCRIPTIONS
+ module_dropdown = gr.Dropdown(
+ choices=list(MODULE_DESCRIPTIONS.keys()),
+ value="run_qwen_zh",
+ label="Select Function Module",
+ interactive=True,
+ )
+
+ # Module description text box
+ module_description = gr.Textbox(
+ value=MODULE_DESCRIPTIONS["run_qwen_zh"],
+ label="Module Description",
+ interactive=False,
+ elem_classes="module-info",
+ )
+
+ with gr.Row():
+ run_button = gr.Button(
+ "Run", variant="primary", elem_classes="primary"
+ )
+
+ status_output = gr.HTML(
+ value=" Ready",
+ label="Status",
+ )
+ token_count_output = gr.Textbox(
+ label="Token Count", interactive=False, elem_classes="token-count"
+ )
+
+ with gr.Tabs(): # Set conversation record as the default selected tab
+ with gr.TabItem("Conversation Record"):
+ # Add conversation record display area
+ log_display2 = gr.Textbox(
+ label="Conversation Record",
+ lines=25,
+ max_lines=100,
+ interactive=False,
+ autoscroll=True,
+ show_copy_button=True,
+ elem_classes="log-display",
+ container=True,
+ value="",
+ )
+
+ with gr.Row():
+ refresh_logs_button2 = gr.Button("Refresh Record")
+ auto_refresh_checkbox2 = gr.Checkbox(
+ label="Auto Refresh", value=True, interactive=True
+ )
+ clear_logs_button2 = gr.Button(
+ "Clear Record", variant="secondary"
+ )
+
+ with gr.TabItem("Environment Variable Management", id="env-settings"):
+ with gr.Box(elem_classes="env-manager-container"):
+ gr.Markdown("""
+ ## Environment Variable Management
+
+ Set model API keys and other service credentials here. This information will be saved in a local `.env` file, ensuring your API keys are securely stored and not uploaded to the network. Correctly setting API keys is crucial for the functionality of the OWL system. Environment variables can be flexibly configured according to tool requirements.
+ """)
+
+ # Main content divided into two-column layout
+ with gr.Row():
+ # Left column: Environment variable management controls
+ with gr.Column(scale=3):
+ with gr.Box(elem_classes="env-controls"):
+ # Environment variable table - set to interactive for direct editing
+ gr.Markdown("""
+
+ Tip: Please make sure to run cp .env_template .env to create a local .env file, and flexibly configure the required environment variables according to the running module
+
+ """)
+
+ # Enhanced environment variable table, supporting adding and deleting rows
+ env_table = gr.Dataframe(
+ headers=[
+ "Variable Name",
+ "Value",
+ "Retrieval Guide",
+ ],
+ datatype=[
+ "str",
+ "str",
+ "html",
+ ], # Set the last column as HTML type to support links
+ row_count=10, # Increase row count to allow adding new variables
+ col_count=(3, "fixed"),
+ value=update_env_table,
+ label="API Keys and Environment Variables",
+ interactive=True, # Set as interactive, allowing direct editing
+ elem_classes="env-table",
+ )
+
+ # Operation instructions
+ gr.Markdown(
+ """
+
+
Operation Guide:
+
+ - Edit Variable: Click directly on the "Value" cell in the table to edit
+ - Add Variable: Enter a new variable name and value in a blank row
+ - Delete Variable: Clear the variable name to delete that row
+ - Get API Key: Click on the link in the "Retrieval Guide" column to get the corresponding API key
+
+
+ """,
+ elem_classes="env-instructions",
+ )
+
+ # Environment variable operation buttons
+ with gr.Row(elem_classes="env-buttons"):
+ save_env_button = gr.Button(
+ "💾 Save Changes",
+ variant="primary",
+ elem_classes="env-button",
+ )
+ refresh_button = gr.Button(
+ "🔄 Refresh List", elem_classes="env-button"
+ )
+
+ # Status display
+ env_status = gr.HTML(
+ label="Operation Status",
+ value="",
+ elem_classes="env-status",
+ )
+
+ # 连接事件处理函数
+ save_env_button.click(
+ fn=save_env_table_changes,
+ inputs=[env_table],
+ outputs=[env_status],
+ ).then(fn=update_env_table, outputs=[env_table])
+
+ refresh_button.click(fn=update_env_table, outputs=[env_table])
+
+ # Example questions
+ examples = [
+ "Open Baidu search, summarize the github stars, fork counts, etc. of camel-ai's camel framework, and write the numbers into a python file using the plot package, save it locally, and run the generated python file.",
+ "Browse Amazon and find a product that is attractive to programmers. Please provide the product name and price",
+ "Write a hello world python file and save it locally",
+ ]
+
+ gr.Examples(examples=examples, inputs=question_input)
+
+ gr.HTML("""
+
+ """)
+
+ # Set up event handling
+ run_button.click(
+ fn=process_with_live_logs,
+ inputs=[question_input, module_dropdown],
+ outputs=[token_count_output, status_output, log_display2],
+ )
+
+ # Module selection updates description
+ module_dropdown.change(
+ fn=update_module_description,
+ inputs=module_dropdown,
+ outputs=module_description,
+ )
+
+ # Conversation record related event handling
+ refresh_logs_button2.click(
+ fn=lambda: get_latest_logs(100, LOG_QUEUE), outputs=[log_display2]
+ )
+
+ clear_logs_button2.click(fn=clear_log_file, outputs=[log_display2])
+
+ # Auto refresh control
+ def toggle_auto_refresh(enabled):
+ if enabled:
+ return gr.update(every=3)
+ else:
+ return gr.update(every=0)
+
+ auto_refresh_checkbox2.change(
+ fn=toggle_auto_refresh,
+ inputs=[auto_refresh_checkbox2],
+ outputs=[log_display2],
+ )
+
+ # No longer automatically refresh logs by default
+
+ return app
+
+
+# Main function
+def main():
+ try:
+ # Initialize logging system
+ global LOG_FILE
+ LOG_FILE = setup_logging()
+ logging.info("OWL Web application started")
+
+ # Start log reading thread
+ log_thread = threading.Thread(
+ target=log_reader_thread, args=(LOG_FILE,), daemon=True
+ )
+ log_thread.start()
+ logging.info("Log reading thread started")
+
+ # Initialize .env file (if it doesn't exist)
+ init_env_file()
+ app = create_ui()
+
+ # Register cleanup function for when the application closes
+ def cleanup():
+ global STOP_LOG_THREAD, STOP_REQUESTED
+ STOP_LOG_THREAD.set()
+ STOP_REQUESTED.set()
+ logging.info("Application closed, stopping log thread")
+
+ app.queue()
+ app.launch(share=False, server_name="127.0.0.1", server_port=7860)
+ except Exception as e:
+ logging.error(f"Error occurred while starting the application: {str(e)}")
+ print(f"Error occurred while starting the application: {str(e)}")
+ import traceback
+
+ traceback.print_exc()
+
+ finally:
+ # Ensure log thread stops
+ STOP_LOG_THREAD.set()
+ STOP_REQUESTED.set()
+ logging.info("Application closed")
+
+
+if __name__ == "__main__":
+ main()
diff --git a/owl/webapp_zh.py b/owl/webapp_zh.py
index 834508d..2f6f4b6 100644
--- a/owl/webapp_zh.py
+++ b/owl/webapp_zh.py
@@ -151,7 +151,7 @@ def get_latest_logs(max_lines=100, queue_source=None):
# 如果仍然没有日志,返回提示信息
if not logs:
- return "暂无对话记录。"
+ return "初始化运行中..."
# 过滤日志,只保留 camel.agents.chat_agent - INFO 的日志
filtered_logs = []
@@ -242,87 +242,49 @@ MODULE_DESCRIPTIONS = {
"run": "默认模式:使用OpenAI模型的默认的智能体协作模式,适合大多数任务。",
"run_mini": "使用使用OpenAI模型最小化配置处理任务",
"run_deepseek_zh": "使用deepseek模型处理中文任务",
- "run_terminal_zh": "终端模式:可执行命令行操作,支持网络搜索、文件处理等功能。适合需要系统交互的任务,使用OpenAI模型",
- "run_gaia_roleplaying": "GAIA基准测试实现,用于评估Agent能力",
"run_openai_compatiable_model": "使用openai兼容模型处理任务",
"run_ollama": "使用本地ollama模型处理任务",
"run_qwen_mini_zh": "使用qwen模型最小化配置处理任务",
"run_qwen_zh": "使用qwen模型处理任务",
}
-# API帮助信息
-API_HELP_INFO = {
- "OPENAI_API_KEY": {
- "name": "OpenAI API",
- "desc": "OpenAI API密钥,用于访问GPT系列模型",
- "url": "https://platform.openai.com/api-keys",
- },
- "QWEN_API_KEY": {
- "name": "通义千问 API",
- "desc": "阿里云通义千问API密钥",
- "url": "https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key",
- },
- "DEEPSEEK_API_KEY": {
- "name": "DeepSeek API",
- "desc": "DeepSeek API密钥",
- "url": "https://platform.deepseek.com/api_keys",
- },
- "GOOGLE_API_KEY": {
- "name": "Google Search API",
- "desc": "Google自定义搜索API密钥",
- "url": "https://developers.google.com/custom-search/v1/overview",
- },
- "SEARCH_ENGINE_ID": {
- "name": "Google Search Engine ID",
- "desc": "Google自定义搜索引擎ID",
- "url": "https://developers.google.com/custom-search/v1/overview",
- },
- "HF_TOKEN": {
- "name": "Hugging Face API",
- "desc": "Hugging Face API令牌",
- "url": "https://huggingface.co/join",
- },
- "CHUNKR_API_KEY": {
- "name": "Chunkr API",
- "desc": "Chunkr API密钥",
- "url": "https://chunkr.ai/",
- },
- "FIRECRAWL_API_KEY": {
- "name": "Firecrawl API",
- "desc": "Firecrawl API密钥",
- "url": "https://www.firecrawl.dev/",
- },
-}
# 默认环境变量模板
-DEFAULT_ENV_TEMPLATE = """# MODEL & API (See https://docs.camel-ai.org/key_modules/models.html#)
+DEFAULT_ENV_TEMPLATE = """#===========================================
+# MODEL & API
+# (See https://docs.camel-ai.org/key_modules/models.html#)
+#===========================================
-# OPENAI API
-# OPENAI_API_KEY= ""
+# OPENAI API (https://platform.openai.com/api-keys)
+OPENAI_API_KEY='Your_Key'
# OPENAI_API_BASE_URL=""
+# Azure OpenAI API
+# AZURE_OPENAI_BASE_URL=""
+# AZURE_API_VERSION=""
+# AZURE_OPENAI_API_KEY=""
+# AZURE_DEPLOYMENT_NAME=""
+
+
# Qwen API (https://help.aliyun.com/zh/model-studio/developer-reference/get-api-key)
-# QWEN_API_KEY=""
+QWEN_API_KEY='Your_Key'
# DeepSeek API (https://platform.deepseek.com/api_keys)
-# DEEPSEEK_API_KEY=""
+DEEPSEEK_API_KEY='Your_Key'
#===========================================
# Tools & Services API
#===========================================
-# Google Search API (https://developers.google.com/custom-search/v1/overview)
-GOOGLE_API_KEY=""
-SEARCH_ENGINE_ID=""
-
-# Hugging Face API (https://huggingface.co/join)
-HF_TOKEN=""
+# Google Search API (https://coda.io/@jon-dallas/google-image-search-pack-example/search-engine-id-and-google-api-key-3)
+GOOGLE_API_KEY='Your_Key'
+SEARCH_ENGINE_ID='Your_ID'
# Chunkr API (https://chunkr.ai/)
-CHUNKR_API_KEY=""
+CHUNKR_API_KEY='Your_Key'
# Firecrawl API (https://www.firecrawl.dev/)
-FIRECRAWL_API_KEY=""
+FIRECRAWL_API_KEY='Your_Key'
#FIRECRAWL_API_URL="https://api.firecrawl.dev"
"""
@@ -357,7 +319,7 @@ def run_owl(question: str, example_module: str) -> Tuple[str, str, str]:
# 验证输入
if not validate_input(question):
logging.warning("用户提交了无效的输入")
- return ("请输入有效的问题", "0", "❌ 错误: 输入无效")
+ return ("请输入有效的问题", "0", "❌ 错误: 输入问题无效")
try:
# 确保环境变量已加载
@@ -374,7 +336,7 @@ def run_owl(question: str, example_module: str) -> Tuple[str, str, str]:
)
# 动态导入目标模块
- module_path = f"owl.examples.{example_module}"
+ module_path = f"examples.{example_module}"
try:
logging.info(f"正在导入模块: {module_path}")
module = importlib.import_module(module_path)
@@ -452,8 +414,6 @@ def update_module_description(module_name: str) -> str:
return MODULE_DESCRIPTIONS.get(module_name, "无可用描述")
-# 环境变量管理功能
-
# 存储前端配置的环境变量
WEB_FRONTEND_ENV_VARS: dict[str, str] = {}
@@ -646,7 +606,9 @@ def get_api_guide(key: str) -> str:
elif "deepseek" in key_lower:
return "https://platform.deepseek.com/api_keys"
elif "google" in key_lower:
- return "https://developers.google.com/custom-search/v1/overview"
+ return "https://coda.io/@jon-dallas/google-image-search-pack-example/search-engine-id-and-google-api-key-3"
+ elif "search_engine_id" in key_lower:
+ return "https://coda.io/@jon-dallas/google-image-search-pack-example/search-engine-id-and-google-api-key-3"
elif "chunkr" in key_lower:
return "https://chunkr.ai/"
elif "firecrawl" in key_lower:
@@ -701,11 +663,11 @@ def save_env_table_changes(data):
# 遍历DataFrame的每一行
for index, row in data.iterrows():
- # 使用列名或索引访问数据
+ # 使用列名访问数据
if len(columns) >= 3:
- # 如果有列名,使用列名访问
- key = row.iloc[1] if hasattr(row, "iloc") else row[1]
- value = row.iloc[2] if hasattr(row, "iloc") else row[2]
+ # 获取变量名和值 (第0列是变量名,第1列是值)
+ key = row[0] if isinstance(row, pd.Series) else row.iloc[0]
+ value = row[1] if isinstance(row, pd.Series) else row.iloc[1]
# 检查是否为空行或已删除的变量
if key and str(key).strip(): # 如果键名不为空,则添加或更新
@@ -812,6 +774,9 @@ def create_ui():
"""处理问题并实时更新日志"""
global CURRENT_PROCESS
+ # 清空日志文件
+ clear_log_file()
+
# 创建一个后台线程来处理问题
result_queue = queue.Queue()
@@ -874,6 +839,8 @@ def create_ui():
# 🦉 OWL 多智能体协作系统
基于CAMEL框架开发的先进多智能体协作系统,旨在通过智能体协作解决复杂问题。
+ 可以通过修改本地脚本自定义模型和工具。
+ 本网页应用目前处于测试阶段,仅供演示和测试使用,尚未推荐用于生产环境。
"""
)
@@ -1082,6 +1049,7 @@ def create_ui():
label="问题",
elem_id="question_input",
show_copy_button=True,
+ value="打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到本地,并运行生成的python文件。",
)
# 增强版模块选择下拉菜单
@@ -1141,7 +1109,7 @@ def create_ui():
gr.Markdown("""
## 环境变量管理
- 在此处设置模型API密钥和其他服务凭证。这些信息将保存在本地的`.env`文件中,确保您的API密钥安全存储且不会上传到网络。
+ 在此处设置模型API密钥和其他服务凭证。这些信息将保存在本地的`.env`文件中,确保您的API密钥安全存储且不会上传到网络。正确设置API密钥对于OWL系统的功能至关重要, 可以按找工具需求灵活配置环境变量。
""")
# 主要内容分为两列布局
@@ -1150,12 +1118,9 @@ def create_ui():
with gr.Column(scale=3):
with gr.Box(elem_classes="env-controls"):
# 环境变量表格 - 设置为可交互以直接编辑
- gr.Markdown("### 环境变量管理")
gr.Markdown("""
- 管理您的API密钥和其他环境变量。正确设置API密钥对于OWL系统的功能至关重要。
-
- 提示: 请确保正确设置API密钥以确保系统功能正常
+ 提示: 请确保运行cp .env_template .env创建本地.env文件,根据运行模块灵活配置所需环境变量
""")
@@ -1186,7 +1151,6 @@ def create_ui():
删除变量: 清空变量名即可删除该行
获取API密钥: 点击"获取指南"列中的链接获取相应API密钥
- 注意: 所有API密钥都安全地存储在本地,不会上传到网络
""",
elem_classes="env-instructions",
@@ -1221,7 +1185,7 @@ def create_ui():
# 示例问题
examples = [
- "打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到本地,用本地终端执行python文件显示图出来给我",
+ "打开百度搜索,总结一下camel-ai的camel框架的github star、fork数目等,并把数字用plot包写成python文件保存到本地,并运行生成的python文件。",
"浏览亚马逊并找出一款对程序员有吸引力的产品。请提供产品名称和价格",
"写一个hello world的python文件,保存到本地",
]
From b25506580f7228d41569ca7be100d52ef209203c Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 12:25:26 +0800
Subject: [PATCH 31/38] update
---
README.md | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 8e563a7..81dee85 100644
--- a/README.md
+++ b/README.md
@@ -464,10 +464,10 @@ OWL includes an intuitive web-based user interface that makes it easier to inter
```bash
# Start the Chinese version
-python examples/webapp_zh.py
+python owl/webapp_zh.py
# Start the English version
-python examples/webapp.py
+python owl/webapp.py
```
## Features
From 1f7f5023ba6923cf385eb03857bf8b2479878b8b Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 12:27:14 +0800
Subject: [PATCH 32/38] update wendong
---
owl/{nextwebapp.py => webapp_backup.py} | 0
1 file changed, 0 insertions(+), 0 deletions(-)
rename owl/{nextwebapp.py => webapp_backup.py} (100%)
diff --git a/owl/nextwebapp.py b/owl/webapp_backup.py
similarity index 100%
rename from owl/nextwebapp.py
rename to owl/webapp_backup.py
From cfb5c6df011676b59368d19fc8cdc5bdb115d39a Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 12:31:42 +0800
Subject: [PATCH 33/38] update readme
---
README.md | 3 ++-
README_zh.md | 3 ++-
2 files changed, 4 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index 81dee85..f0781ae 100644
--- a/README.md
+++ b/README.md
@@ -104,6 +104,7 @@ Our vision is to revolutionize how AI agents collaborate to solve real-world tas
+- **[2025.03.15]**: Restructured the web application architecture for significantly improved system stability; optimized OWL Agent execution mechanisms for enhanced efficiency and performance; integrated Baidu search engine into SearchToolkit.
- **[2025.03.12]**: Added Bocha search in SearchToolkit, integrated Volcano Engine model platform, and enhanced Azure and OpenAI Compatible models with structured output and tool calling.
- **[2025.03.11]**: We added MCPToolkit, FileWriteToolkit, and TerminalToolkit to enhance OWL agents with MCP tool calling, file writing capabilities, and terminal command execution.
- **[2025.03.09]**: We added a web-based user interface that makes it easier to interact with the system.
@@ -119,7 +120,7 @@ https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-13
# ✨️ Core Features
-- **Real-time Information Retrieval**: Leverage Wikipedia, Google Search, and other online sources for up-to-date information.
+- **Online Search**: Support for multiple search engines (including Wikipedia, Google, DuckDuckGo, Baidu, Bocha, etc.) for real-time information retrieval and knowledge acquisition.
- **Multimodal Processing**: Support for handling internet or local videos, images, and audio data.
- **Browser Automation**: Utilize the Playwright framework for simulating browser interactions, including scrolling, clicking, input handling, downloading, navigation, and more.
- **Document Parsing**: Extract content from Word, Excel, PDF, and PowerPoint files, converting them into text or Markdown format.
diff --git a/README_zh.md b/README_zh.md
index b15c114..2c3435a 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -104,6 +104,7 @@
+- **[2025.03.15]**: 重构Web应用架构,显著提升系统稳定性;优化OWL Agent的运行机制,提高执行效率与性能;在SearchToolkit中整合百度搜索引擎
- **[2025.03.12]**: 在SearchToolkit中添加了Bocha搜索功能,集成了火山引擎模型平台,并更新了Azure和OpenAI Compatible模型的结构化输出和工具调用能力。
- **[2025.03.11]**: 我们添加了 MCPToolkit、FileWriteToolkit 和 TerminalToolkit,增强了 OWL Agent 的 MCP(模型上下文协议)集成、文件写入能力和终端命令执行功能。MCP 作为一个通用协议层,标准化了 AI 模型与各种数据源和工具的交互方式。
- **[2025.03.09]**: 我们添加了基于网页的用户界面,使系统交互变得更加简便。
@@ -118,7 +119,7 @@ https://private-user-images.githubusercontent.com/55657767/420212194-e813fc05-13
# ✨️ 核心功能
-- **在线搜索**:使用维基百科、谷歌搜索等,进行实时信息检索
+- **在线搜索**:支持多种搜索引擎(包括维基百科、Google、DuckDuckGo、百度、博查等),实现实时信息检索与知识获取
- **多模态处理**:支持互联网或本地视频、图片、语音处理
- **浏览器操作**:借助Playwright框架开发浏览器模拟交互,支持页面滚动、点击、输入、下载、历史回退等功能
- **文件解析**:word、excel、PDF、PowerPoint信息提取,内容转文本/Markdown
From 0f3deff749914cec24f8023daf2ada9132af3790 Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 12:43:42 +0800
Subject: [PATCH 34/38] update
---
examples/run_azure_openai.py | 2 +-
examples/run_mcp.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/examples/run_azure_openai.py b/examples/run_azure_openai.py
index d695893..2095aff 100644
--- a/examples/run_azure_openai.py
+++ b/examples/run_azure_openai.py
@@ -25,7 +25,7 @@ from camel.toolkits import (
)
from camel.types import ModelPlatformType
-from utils import OwlRolePlaying, run_society
+from owl.utils import OwlRolePlaying, run_society
from camel.logger import set_log_level
diff --git a/examples/run_mcp.py b/examples/run_mcp.py
index a535658..2e8fbaf 100644
--- a/examples/run_mcp.py
+++ b/examples/run_mcp.py
@@ -102,7 +102,7 @@ from camel.types import ModelPlatformType, ModelType
from camel.logger import set_log_level
from camel.toolkits import MCPToolkit
-from utils.enhanced_role_playing import OwlRolePlaying, arun_society
+from owl.utils.enhanced_role_playing import OwlRolePlaying, arun_society
load_dotenv()
From 62a443915e71c0251679daef98cc47e2d68c8ec7 Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 12:50:22 +0800
Subject: [PATCH 35/38] update
---
owl/webapp.py | 28 ++++++++--------------------
owl/webapp_zh.py | 12 ------------
2 files changed, 8 insertions(+), 32 deletions(-)
diff --git a/owl/webapp.py b/owl/webapp.py
index 52b26c6..111a775 100644
--- a/owl/webapp.py
+++ b/owl/webapp.py
@@ -24,7 +24,7 @@ import importlib
from dotenv import load_dotenv, set_key, find_dotenv, unset_key
import threading
import queue
-import re # For regular expression operations
+import re
os.environ["PYTHONIOENCODING"] = "utf-8"
@@ -170,7 +170,7 @@ def get_latest_logs(max_lines=100, queue_source=None):
processed_messages = set()
def process_message(role, content):
- # 创建一个唯一标识符来跟踪消息
+ # Create a unique identifier to track messages
msg_id = f"{role}:{content}"
if msg_id in processed_messages:
return None
@@ -184,7 +184,7 @@ def get_latest_logs(max_lines=100, queue_source=None):
for log in filtered_logs:
formatted_messages = []
- # 尝试提取消息数组
+ # Try to extract message array
messages_match = re.search(
r"Model (.*?), index (\d+), processed these messages: (\[.*\])", log
)
@@ -671,7 +671,7 @@ def save_env_table_changes(data):
current_env_vars = load_env_vars()
processed_keys = set() # Record processed keys to detect deleted variables
- # 处理pandas DataFrame对象
+ # Process pandas DataFrame object
import pandas as pd
if isinstance(data, pd.DataFrame):
@@ -681,7 +681,7 @@ def save_env_table_changes(data):
# Iterate through each row of the DataFrame
for index, row in data.iterrows():
- # 使用列名访问数据
+ # Use column names to access data
if len(columns) >= 3:
# Get variable name and value (column 0 is name, column 1 is value)
key = row[0] if isinstance(row, pd.Series) else row.iloc[0]
@@ -696,10 +696,10 @@ def save_env_table_changes(data):
)
add_env_var(key, str(value))
processed_keys.add(key)
- # 处理其他格式
+ # Process other formats
elif isinstance(data, dict):
logging.info(f"Dictionary format data keys: {list(data.keys())}")
- # 如果是字典格式,尝试不同的键
+ # If dictionary format, try different keys
if "data" in data:
rows = data["data"]
elif "values" in data:
@@ -707,7 +707,7 @@ def save_env_table_changes(data):
elif "value" in data:
rows = data["value"]
else:
- # 尝试直接使用字典作为行数据
+ # Try using dictionary directly as row data
rows = []
for key, value in data.items():
if key not in ["headers", "types", "columns"]:
@@ -766,11 +766,6 @@ def get_env_var_value(key):
def create_ui():
"""Create enhanced Gradio interface"""
- # Define conversation record update function
- def update_logs2():
- """Get the latest conversation records and return them to the frontend for display"""
- return get_latest_logs(100, LOG_QUEUE)
-
def clear_log_file():
"""Clear log file content"""
try:
@@ -1289,13 +1284,6 @@ def main():
init_env_file()
app = create_ui()
- # Register cleanup function for when the application closes
- def cleanup():
- global STOP_LOG_THREAD, STOP_REQUESTED
- STOP_LOG_THREAD.set()
- STOP_REQUESTED.set()
- logging.info("Application closed, stopping log thread")
-
app.queue()
app.launch(share=False, server_name="127.0.0.1", server_port=7860)
except Exception as e:
diff --git a/owl/webapp_zh.py b/owl/webapp_zh.py
index 2f6f4b6..d0884a2 100644
--- a/owl/webapp_zh.py
+++ b/owl/webapp_zh.py
@@ -744,11 +744,6 @@ def get_env_var_value(key):
def create_ui():
"""创建增强版Gradio界面"""
- # 定义对话记录更新函数
- def update_logs2():
- """获取最新对话记录并返回给前端显示"""
- return get_latest_logs(100, LOG_QUEUE)
-
def clear_log_file():
"""清空日志文件内容"""
try:
@@ -1259,13 +1254,6 @@ def main():
init_env_file()
app = create_ui()
- # 注册应用关闭时的清理函数
- def cleanup():
- global STOP_LOG_THREAD, STOP_REQUESTED
- STOP_LOG_THREAD.set()
- STOP_REQUESTED.set()
- logging.info("应用程序关闭,停止日志线程")
-
app.queue()
app.launch(share=False, server_name="127.0.0.1", server_port=7860)
except Exception as e:
From 5885e9bf006fc0ac8c8ff6ef39d3d0b92319cfac Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 12:58:02 +0800
Subject: [PATCH 36/38] update
---
owl/webapp.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/owl/webapp.py b/owl/webapp.py
index 111a775..f5fabd9 100644
--- a/owl/webapp.py
+++ b/owl/webapp.py
@@ -1075,14 +1075,14 @@ def create_ui():
# Only includes modules defined in MODULE_DESCRIPTIONS
module_dropdown = gr.Dropdown(
choices=list(MODULE_DESCRIPTIONS.keys()),
- value="run_qwen_zh",
+ value="run",
label="Select Function Module",
interactive=True,
)
# Module description text box
module_description = gr.Textbox(
- value=MODULE_DESCRIPTIONS["run_qwen_zh"],
+ value=MODULE_DESCRIPTIONS["run"],
label="Module Description",
interactive=False,
elem_classes="module-info",
From fd4739d4803f285da10e12ac3318bf9cc2d45792 Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 13:16:34 +0800
Subject: [PATCH 37/38] update format
---
owl/webapp.py | 2 ++
owl/webapp_zh.py | 2 ++
2 files changed, 4 insertions(+)
diff --git a/owl/webapp.py b/owl/webapp.py
index f5fabd9..a3a18e7 100644
--- a/owl/webapp.py
+++ b/owl/webapp.py
@@ -858,7 +858,9 @@ def create_ui():
# 🦉 OWL Multi-Agent Collaboration System
Advanced multi-agent collaboration system developed based on the CAMEL framework, designed to solve complex problems through agent collaboration.
+
Models and tools can be customized by modifying local scripts.
+
This web app is currently in beta development. It is provided for demonstration and testing purposes only and is not yet recommended for production use.
"""
)
diff --git a/owl/webapp_zh.py b/owl/webapp_zh.py
index d0884a2..9a693da 100644
--- a/owl/webapp_zh.py
+++ b/owl/webapp_zh.py
@@ -834,7 +834,9 @@ def create_ui():
# 🦉 OWL 多智能体协作系统
基于CAMEL框架开发的先进多智能体协作系统,旨在通过智能体协作解决复杂问题。
+
可以通过修改本地脚本自定义模型和工具。
+
本网页应用目前处于测试阶段,仅供演示和测试使用,尚未推荐用于生产环境。
"""
)
From eab1515fe051f8260df126b39f96aac1ec806b68 Mon Sep 17 00:00:00 2001
From: Wendong
Date: Sat, 15 Mar 2025 13:22:01 +0800
Subject: [PATCH 38/38] update readme
---
README.md | 2 +-
README_zh.md | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index f0781ae..db2e5d0 100644
--- a/README.md
+++ b/README.md
@@ -104,7 +104,7 @@ Our vision is to revolutionize how AI agents collaborate to solve real-world tas
-- **[2025.03.15]**: Restructured the web application architecture for significantly improved system stability; optimized OWL Agent execution mechanisms for enhanced efficiency and performance; integrated Baidu search engine into SearchToolkit.
+- **[2025.03.15]**: Restructured the web-based user interface architecture for improved system stability; optimized OWL Agent execution mechanisms for enhanced efficiency and performance; integrated Baidu search engine into SearchToolkit.
- **[2025.03.12]**: Added Bocha search in SearchToolkit, integrated Volcano Engine model platform, and enhanced Azure and OpenAI Compatible models with structured output and tool calling.
- **[2025.03.11]**: We added MCPToolkit, FileWriteToolkit, and TerminalToolkit to enhance OWL agents with MCP tool calling, file writing capabilities, and terminal command execution.
- **[2025.03.09]**: We added a web-based user interface that makes it easier to interact with the system.
diff --git a/README_zh.md b/README_zh.md
index 2c3435a..43349bd 100644
--- a/README_zh.md
+++ b/README_zh.md
@@ -104,7 +104,7 @@
-- **[2025.03.15]**: 重构Web应用架构,显著提升系统稳定性;优化OWL Agent的运行机制,提高执行效率与性能;在SearchToolkit中整合百度搜索引擎
+- **[2025.03.15]**: 重构网页用户界面,提升系统稳定性;优化OWL Agent的运行机制,提高执行效率与性能;在SearchToolkit中整合百度搜索引擎
- **[2025.03.12]**: 在SearchToolkit中添加了Bocha搜索功能,集成了火山引擎模型平台,并更新了Azure和OpenAI Compatible模型的结构化输出和工具调用能力。
- **[2025.03.11]**: 我们添加了 MCPToolkit、FileWriteToolkit 和 TerminalToolkit,增强了 OWL Agent 的 MCP(模型上下文协议)集成、文件写入能力和终端命令执行功能。MCP 作为一个通用协议层,标准化了 AI 模型与各种数据源和工具的交互方式。
- **[2025.03.09]**: 我们添加了基于网页的用户界面,使系统交互变得更加简便。