Update model settings to use more capable models from Groq

This commit is contained in:
thekage91
2025-03-14 13:47:49 +01:00
parent c8af28781c
commit 32a6e1c17c

View File

@@ -11,6 +11,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ========= Copyright 2023-2024 @ CAMEL-AI.org. All Rights Reserved. =========
"""
This module provides integration with the Groq API platform for the OWL system.
It configures different agent roles with appropriate Groq models based on their requirements:
- Tool-intensive roles (assistant, web, planning, video, image) use GROQ_LLAMA_3_3_70B
- Document processing uses GROQ_MIXTRAL_8_7B
- Simple roles (user) use GROQ_LLAMA_3_1_8B
To use this module:
1. Set GROQ_API_KEY in your .env file
2. Set OPENAI_API_BASE_URL to "https://api.groq.com/openai/v1"
3. Run with: python -m owl.run_groq
"""
from dotenv import load_dotenv
from camel.models import ModelFactory
from camel.toolkits import (
@@ -47,37 +62,37 @@ def construct_society(question: str) -> OwlRolePlaying:
models = {
"user": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_type=ModelType.GROQ_LLAMA_3_1_8B, # Simple role, can use 8B model
model_config_dict={"temperature": 0},
),
"assistant": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Main assistant needs tool capability
model_config_dict={"temperature": 0},
),
"web": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Web browsing requires tool usage
model_config_dict={"temperature": 0},
),
"planning": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Planning requires complex reasoning
model_config_dict={"temperature": 0},
),
"video": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Video analysis is multimodal
model_config_dict={"temperature": 0},
),
"image": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_type=ModelType.GROQ_LLAMA_3_3_70B, # Image analysis is multimodal
model_config_dict={"temperature": 0},
),
"document": ModelFactory.create(
model_platform=ModelPlatformType.GROQ,
model_type=ModelType.GROQ_LLAMA_3_1_8B,
model_type=ModelType.GROQ_MIXTRAL_8_7B, # Document processing can use Mixtral
model_config_dict={"temperature": 0},
),
}
@@ -129,6 +144,9 @@ def main():
question = "Navigate to Amazon.com and identify one product that is attractive to coders. Please provide me with the product name and price. No need to verify your answer."
# Construct and run the society
# Note: This configuration uses GROQ_LLAMA_3_3_70B for tool-intensive roles (assistant, web, planning, video, image)
# and GROQ_MIXTRAL_8_7B for document processing. GROQ_LLAMA_3_1_8B is used only for the user role
# which doesn't require tool usage capabilities.
society = construct_society(question)
answer, chat_history, token_count = run_society(society)