Azure LLM fix (#1227)

* azure embedding fix

* corrected embedding config

* fixed doc
This commit is contained in:
மனோஜ்குமார் பழனிச்சாமி 2024-04-20 04:35:14 +05:30 committed by GitHub
parent 3988443705
commit 0356f6ec89
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
8 changed files with 21 additions and 14 deletions

View File

@ -167,8 +167,8 @@ setup-config-prompts:
elif [ "$$llm_embedding_model" = "azureopenai" ]; then \
read -p "Enter the Azure endpoint URL (will overwrite LLM_BASE_URL): " llm_base_url; \
echo "LLM_BASE_URL=\"$$llm_base_url\"" >> $(CONFIG_FILE).tmp; \
read -p "Enter the Azure LLM Deployment Name: " llm_deployment_name; \
echo "LLM_DEPLOYMENT_NAME=\"$$llm_deployment_name\"" >> $(CONFIG_FILE).tmp; \
read -p "Enter the Azure LLM Embedding Deployment Name: " llm_embedding_deployment_name; \
echo "LLM_EMBEDDING_DEPLOYMENT_NAME=\"$$llm_embedding_deployment_name\"" >> $(CONFIG_FILE).tmp; \
read -p "Enter the Azure API Version: " llm_api_version; \
echo "LLM_API_VERSION=\"$$llm_api_version\"" >> $(CONFIG_FILE).tmp; \
fi

View File

@ -158,7 +158,7 @@ The following environment variables might be necessary for some LLMs:
* `LLM_API_KEY`
* `LLM_BASE_URL`
* `LLM_EMBEDDING_MODEL`
* `LLM_DEPLOYMENT_NAME`
* `LLM_EMBEDDING_DEPLOYMENT_NAME`
* `LLM_API_VERSION`
We have a few guides for running OpenDevin with specific model providers:

View File

@ -32,7 +32,7 @@ elif embedding_strategy == 'azureopenai':
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
embed_model = AzureOpenAIEmbedding(
model='text-embedding-ada-002',
deployment_name=config.get('LLM_DEPLOYMENT_NAME', required=True),
deployment_name=config.get('LLM_EMBEDDING_DEPLOYMENT_NAME', required=True),
api_key=config.get('LLM_API_KEY', required=True),
azure_endpoint=config.get('LLM_BASE_URL', required=True),
api_version=config.get('LLM_API_VERSION', required=True),

View File

@ -160,7 +160,7 @@ OpenDevin 可以与任何 LLM 后端配合使用。
* `LLM_API_KEY`
* `LLM_BASE_URL`
* `LLM_EMBEDDING_MODEL`
* `LLM_DEPLOYMENT_NAME`
* `LLM_EMBEDDING_DEPLOYMENT_NAME`
* `LLM_API_VERSION`
**关于替代模型的说明:**

View File

@ -11,7 +11,7 @@ When running the OpenDevin Docker image, you'll need to set the following enviro
LLM_BASE_URL="<azure-api-base-url>" # e.g. "https://openai-gpt-4-test-v-1.openai.azure.com/"
LLM_API_KEY="<azure-api-key>"
LLM_MODEL="azure/<your-gpt-deployment-name>"
AZURE_API_VERSION = "<api-version>" # e.g. "2024-02-15-preview"
LLM_API_VERSION = "<api-version>" # e.g. "2024-02-15-preview"
```
# 2. Embeddings
@ -26,6 +26,6 @@ You need the correct deployment name for this model in your Azure account.
When running OpenDevin in Docker, set the following environment variables using `-e`:
```
LLM_EMBEDDING_MODEL="azureopenai"
DEPLOYMENT_NAME = "<your-embedding-deployment-name>" # e.g. "TextEmbedding...<etc>"
LLM_EMBEDDING_DEPLOYMENT_NAME = "<your-embedding-deployment-name>" # e.g. "TextEmbedding...<etc>"
LLM_API_VERSION = "<api-version>" # e.g. "2024-02-15-preview"
```

View File

@ -18,7 +18,7 @@ DEFAULT_CONFIG: dict = {
ConfigType.SANDBOX_CONTAINER_IMAGE: 'ghcr.io/opendevin/sandbox',
ConfigType.RUN_AS_DEVIN: 'true',
ConfigType.LLM_EMBEDDING_MODEL: 'local',
ConfigType.LLM_DEPLOYMENT_NAME: None,
ConfigType.LLM_EMBEDDING_DEPLOYMENT_NAME: None,
ConfigType.LLM_API_VERSION: None,
ConfigType.LLM_NUM_RETRIES: 1,
ConfigType.LLM_COOLDOWN_TIME: 1,

View File

@ -12,6 +12,7 @@ DEFAULT_BASE_URL = config.get('LLM_BASE_URL')
DEFAULT_MODEL_NAME = config.get('LLM_MODEL')
DEFAULT_LLM_NUM_RETRIES = config.get('LLM_NUM_RETRIES')
DEFAULT_LLM_COOLDOWN_TIME = config.get('LLM_COOLDOWN_TIME')
DEFAULT_API_VERSION = config.get('LLM_API_VERSION')
class LLM:
@ -21,14 +22,16 @@ class LLM:
base_url=DEFAULT_BASE_URL,
num_retries=DEFAULT_LLM_NUM_RETRIES,
cooldown_time=DEFAULT_LLM_COOLDOWN_TIME,
api_version=DEFAULT_API_VERSION,
):
opendevin_logger.info(f'Initializing LLM with model: {model}')
self.model_name = model if model else DEFAULT_MODEL_NAME
self.api_key = api_key if api_key else DEFAULT_API_KEY
self.base_url = base_url if base_url else DEFAULT_BASE_URL
self.model_name = model
self.api_key = api_key
self.base_url = base_url
self.api_version = api_version
self._completion = partial(
litellm_completion, model=self.model_name, api_key=self.api_key, base_url=self.base_url)
litellm_completion, model=self.model_name, api_key=self.api_key, base_url=self.base_url, api_version=self.api_version)
completion_unwrapped = self._completion
@ -64,4 +67,8 @@ class LLM:
return self._completion
def __str__(self):
return f'LLM(model={self.model_name}, base_url={self.base_url})'
if self.api_version:
return f'LLM(model={self.model_name}, api_version={self.api_version}, base_url={self.base_url})'
elif self.base_url:
return f'LLM(model={self.model_name}, base_url={self.base_url})'
return f'LLM(model={self.model_name})'

View File

@ -11,7 +11,7 @@ class ConfigType(str, Enum):
SANDBOX_CONTAINER_IMAGE = 'SANDBOX_CONTAINER_IMAGE'
RUN_AS_DEVIN = 'RUN_AS_DEVIN'
LLM_EMBEDDING_MODEL = 'LLM_EMBEDDING_MODEL'
LLM_DEPLOYMENT_NAME = 'LLM_DEPLOYMENT_NAME'
LLM_EMBEDDING_DEPLOYMENT_NAME = 'LLM_EMBEDDING_DEPLOYMENT_NAME'
LLM_API_VERSION = 'LLM_API_VERSION'
LLM_NUM_RETRIES = 'LLM_NUM_RETRIES'
LLM_COOLDOWN_TIME = 'LLM_COOLDOWN_TIME'