localhost base_url fixup when running in a docker container (#11474)

Co-authored-by: Rohit Malhotra <rohitvinodmalhotra@gmail.com>
This commit is contained in:
eddierichter-amd
2025-11-04 15:57:25 -07:00
committed by GitHub
parent 308d0e62ab
commit c544ea1187
6 changed files with 128 additions and 5 deletions

View File

@@ -23,6 +23,7 @@ from openhands.resolver.patching import apply_diff, parse_patch
from openhands.resolver.resolver_output import ResolverOutput
from openhands.resolver.utils import identify_token
from openhands.utils.async_utils import GENERAL_TIMEOUT, call_async_from_sync
from openhands.utils.environment import get_effective_llm_base_url
def apply_patch(repo_dir: str, patch: str) -> None:
@@ -707,10 +708,16 @@ def main() -> None:
)
api_key = my_args.llm_api_key or os.environ['LLM_API_KEY']
model_name = my_args.llm_model or os.environ['LLM_MODEL']
base_url = my_args.llm_base_url or os.environ.get('LLM_BASE_URL')
resolved_base_url = get_effective_llm_base_url(
model_name,
base_url,
)
llm_config = LLMConfig(
model=my_args.llm_model or os.environ['LLM_MODEL'],
model=model_name,
api_key=SecretStr(api_key) if api_key else None,
base_url=my_args.llm_base_url or os.environ.get('LLM_BASE_URL', None),
base_url=resolved_base_url,
)
if not os.path.exists(my_args.output_dir):

View File

@@ -91,6 +91,7 @@ from openhands.storage.locations import get_experiment_config_filename
from openhands.storage.settings.settings_store import SettingsStore
from openhands.utils.async_utils import wait_all
from openhands.utils.conversation_summary import get_default_conversation_title
from openhands.utils.environment import get_effective_llm_base_url
app = APIRouter(prefix='/api', dependencies=get_dependencies())
app_conversation_service_dependency = depends_app_conversation_service()
@@ -545,10 +546,15 @@ async def get_prompt(
# placeholder for error handling
raise ValueError('Settings not found')
settings_base_url = settings.llm_base_url
effective_base_url = get_effective_llm_base_url(
settings.llm_model,
settings_base_url,
)
llm_config = LLMConfig(
model=settings.llm_model or '',
api_key=settings.llm_api_key,
base_url=settings.llm_base_url,
base_url=effective_base_url,
)
prompt_template = generate_prompt_template(stringified_events)

View File

@@ -10,6 +10,7 @@ from openhands.events.event_store import EventStore
from openhands.llm.llm_registry import LLMRegistry
from openhands.storage.data_models.settings import Settings
from openhands.storage.files import FileStore
from openhands.utils.environment import get_effective_llm_base_url
async def generate_conversation_title(
@@ -114,10 +115,15 @@ async def auto_generate_title(
try:
if settings and settings.llm_model:
# Create LLM config from settings
settings_base_url = settings.llm_base_url
effective_base_url = get_effective_llm_base_url(
settings.llm_model,
settings_base_url,
)
llm_config = LLMConfig(
model=settings.llm_model,
api_key=settings.llm_api_key,
base_url=settings.llm_base_url,
base_url=effective_base_url,
)
# Try to generate title using LLM

View File

@@ -0,0 +1,58 @@
from __future__ import annotations
import os
from functools import lru_cache
from pathlib import Path
LEMONADE_DOCKER_BASE_URL = 'http://host.docker.internal:8000/api/v1/'
_LEMONADE_PROVIDER_NAME = 'lemonade'
_LEMONADE_MODEL_PREFIX = 'lemonade/'
@lru_cache(maxsize=1)
def is_running_in_docker() -> bool:
"""Best-effort detection for Docker containers."""
docker_env_markers = (
Path('/.dockerenv'),
Path('/run/.containerenv'),
)
if any(marker.exists() for marker in docker_env_markers):
return True
if os.environ.get('DOCKER_CONTAINER') == 'true':
return True
try:
with Path('/proc/self/cgroup').open('r', encoding='utf-8') as cgroup_file:
for line in cgroup_file:
if any(token in line for token in ('docker', 'containerd', 'kubepods')):
return True
except FileNotFoundError:
pass
return False
def is_lemonade_provider(
model: str | None,
custom_provider: str | None = None,
) -> bool:
provider = (custom_provider or '').strip().lower()
if provider == _LEMONADE_PROVIDER_NAME:
return True
return (model or '').startswith(_LEMONADE_MODEL_PREFIX)
def get_effective_llm_base_url(
model: str | None,
base_url: str | None,
custom_provider: str | None = None,
) -> str | None:
"""Return the runtime LLM base URL with provider-specific overrides."""
if (
base_url in (None, '')
and is_lemonade_provider(model, custom_provider)
and is_running_in_docker()
):
return LEMONADE_DOCKER_BASE_URL
return base_url

View File

@@ -1,3 +1,4 @@
import os
from copy import deepcopy
from openhands.core.config.openhands_config import OpenHandsConfig
@@ -5,6 +6,7 @@ from openhands.llm.llm_registry import LLMRegistry
from openhands.server.services.conversation_stats import ConversationStats
from openhands.storage import get_file_store
from openhands.storage.data_models.settings import Settings
from openhands.utils.environment import get_effective_llm_base_url
def setup_llm_config(config: OpenHandsConfig, settings: Settings) -> OpenHandsConfig:
@@ -14,7 +16,19 @@ def setup_llm_config(config: OpenHandsConfig, settings: Settings) -> OpenHandsCo
llm_config = config.get_llm_config()
llm_config.model = settings.llm_model or ''
llm_config.api_key = settings.llm_api_key
llm_config.base_url = settings.llm_base_url
env_base_url = os.environ.get('LLM_BASE_URL')
settings_base_url = settings.llm_base_url
# Use env_base_url if available, otherwise fall back to settings_base_url
base_url_to_use = (
env_base_url if env_base_url not in (None, '') else settings_base_url
)
llm_config.base_url = get_effective_llm_base_url(
llm_config.model,
base_url_to_use,
llm_config.custom_llm_provider,
)
config.set_llm_config(llm_config)
return config

View File

@@ -0,0 +1,32 @@
import pytest
from openhands.utils import environment
@pytest.fixture(autouse=True)
def clear_docker_cache():
if hasattr(environment.is_running_in_docker, 'cache_clear'):
environment.is_running_in_docker.cache_clear()
yield
if hasattr(environment.is_running_in_docker, 'cache_clear'):
environment.is_running_in_docker.cache_clear()
def test_get_effective_base_url_lemonade_in_docker(monkeypatch):
monkeypatch.setattr(environment, 'is_running_in_docker', lambda: True)
result = environment.get_effective_llm_base_url('lemonade/example', None)
assert result == environment.LEMONADE_DOCKER_BASE_URL
def test_get_effective_base_url_lemonade_outside_docker(monkeypatch):
monkeypatch.setattr(environment, 'is_running_in_docker', lambda: False)
base_url = 'http://localhost:8000/api/v1/'
result = environment.get_effective_llm_base_url('lemonade/example', base_url)
assert result == base_url
def test_get_effective_base_url_non_lemonade(monkeypatch):
monkeypatch.setattr(environment, 'is_running_in_docker', lambda: True)
base_url = 'https://api.example.com'
result = environment.get_effective_llm_base_url('openai/gpt-4', base_url)
assert result == base_url