Fix issue #8145: Correct name for max_tokens for condenser in config.template.toml (#8165)

This commit is contained in:
Dani
2025-04-29 22:28:01 +02:00
committed by GitHub
parent a6d3db3ce7
commit c82b3378a6

View File

@@ -391,7 +391,7 @@ type = "noop"
#[llm.condenser]
#model = "gpt-4o"
#temperature = 0.1
#max_tokens = 1024
#max_input_tokens = 1024
#################################### Eval ####################################
# Configuration for the evaluation, please refer to the specific evaluation