From 106a169ee93a97e54cee672d6ee4ade5df409cdf Mon Sep 17 00:00:00 2001 From: Wendong Date: Thu, 20 Mar 2025 19:54:34 +0800 Subject: [PATCH] update max_token setting in example --- examples/run_openai_compatiable_model.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/run_openai_compatiable_model.py b/examples/run_openai_compatiable_model.py index 213ee65..6a51449 100644 --- a/examples/run_openai_compatiable_model.py +++ b/examples/run_openai_compatiable_model.py @@ -56,35 +56,35 @@ def construct_society(question: str) -> RolePlaying: model_type="qwen-max", api_key=os.getenv("QWEN_API_KEY"), url="https://dashscope.aliyuncs.com/compatible-mode/v1", - model_config_dict={"temperature": 0.4, "max_tokens": 4096}, + model_config_dict={"temperature": 0.4, "max_tokens": 128000}, ), "assistant": ModelFactory.create( model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, model_type="qwen-max", api_key=os.getenv("QWEN_API_KEY"), url="https://dashscope.aliyuncs.com/compatible-mode/v1", - model_config_dict={"temperature": 0.4, "max_tokens": 4096}, + model_config_dict={"temperature": 0.4, "max_tokens": 128000}, ), "browsing": ModelFactory.create( model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, model_type="qwen-vl-max", api_key=os.getenv("QWEN_API_KEY"), url="https://dashscope.aliyuncs.com/compatible-mode/v1", - model_config_dict={"temperature": 0.4, "max_tokens": 4096}, + model_config_dict={"temperature": 0.4, "max_tokens": 128000}, ), "planning": ModelFactory.create( model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, model_type="qwen-max", api_key=os.getenv("QWEN_API_KEY"), url="https://dashscope.aliyuncs.com/compatible-mode/v1", - model_config_dict={"temperature": 0.4, "max_tokens": 4096}, + model_config_dict={"temperature": 0.4, "max_tokens": 128000}, ), "image": ModelFactory.create( model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL, model_type="qwen-vl-max", api_key=os.getenv("QWEN_API_KEY"), url="https://dashscope.aliyuncs.com/compatible-mode/v1", - model_config_dict={"temperature": 0.4, "max_tokens": 4096}, + model_config_dict={"temperature": 0.4, "max_tokens": 128000}, ), }