Added environment variable allowing skipping dependency checks (#9010)

This commit is contained in:
Tim O'Farrell 2025-06-09 11:14:39 -06:00 committed by GitHub
parent c6a4324bda
commit e5d21e003d
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
2 changed files with 31 additions and 17 deletions

View File

@ -307,8 +307,10 @@ class LocalRuntime(ActionExecutionClient):
env['PATH'] = f'{python_bin_path}{os.pathsep}{env.get("PATH", "")}'
logger.debug(f'Updated PATH for subprocesses: {env["PATH"]}')
# Check dependencies using the derived env_root_path
check_dependencies(code_repo_path, env_root_path)
# Check dependencies using the derived env_root_path if not skipped
if os.getenv('SKIP_DEPENDENCY_CHECK', '') != '1':
check_dependencies(code_repo_path, env_root_path)
self.server_process = subprocess.Popen( # noqa: S603
cmd,
stdout=subprocess.PIPE,
@ -407,7 +409,7 @@ class LocalRuntime(ActionExecutionClient):
@tenacity.retry(
wait=tenacity.wait_fixed(2),
stop=tenacity.stop_after_attempt(10) | stop_if_should_exit(),
stop=tenacity.stop_after_delay(120) | stop_if_should_exit(),
before_sleep=lambda retry_state: logger.debug(
f'Waiting for server to be ready... (attempt {retry_state.attempt_number})'
),

View File

@ -22,7 +22,6 @@ from openhands.events.nested_event_store import NestedEventStore
from openhands.events.stream import EventStream
from openhands.integrations.provider import PROVIDER_TOKEN_TYPE, ProviderHandler
from openhands.llm.llm import LLM
from openhands.runtime.impl.docker.containers import stop_all_containers
from openhands.runtime.impl.docker.docker_runtime import DockerRuntime
from openhands.server.config.server_config import ServerConfig
from openhands.server.conversation_manager.conversation_manager import (
@ -90,7 +89,7 @@ class DockerNestedConversationManager(ConversationManager):
"""
Get the running agent loops directly from docker.
"""
containers : list[Container] = self.docker_client.containers.list()
containers: list[Container] = self.docker_client.containers.list()
names = (container.name or '' for container in containers)
conversation_ids = {
name[len('openhands-runtime-') :]
@ -284,7 +283,7 @@ class DockerNestedConversationManager(ConversationManager):
# First try to graceful stop server.
try:
container = self.docker_client.containers.get(f'openhands-runtime-{sid}')
except docker.errors.NotFound as e:
except docker.errors.NotFound:
return
try:
nested_url = self.get_nested_url_for_container(container)
@ -293,25 +292,33 @@ class DockerNestedConversationManager(ConversationManager):
'X-Session-API-Key': self._get_session_api_key_for_conversation(sid)
}
) as client:
response = await client.post(f'{nested_url}/api/conversations/{sid}/stop')
# Stop conversation
response = await client.post(
f'{nested_url}/api/conversations/{sid}/stop'
)
response.raise_for_status()
# Check up to 3 times that client has closed
for _ in range(3):
response = await client.get(f'{nested_url}/api/conversations/{sid}')
if response.status_code == status.HTTP_200_OK and response.json().get('status') == "STOPPED":
response.raise_for_status()
if response.json().get('status') == 'STOPPED':
break
await asyncio.sleep(1)
except Exception:
logger.exception("error_stopping_container")
except Exception as e:
logger.warning('error_stopping_container', extra={"sid": sid, "error": str(e)})
container.stop()
async def get_agent_loop_info(self, user_id: str | None = None, filter_to_sids: set[str] | None = None) -> list[AgentLoopInfo]:
async def get_agent_loop_info(
self, user_id: str | None = None, filter_to_sids: set[str] | None = None
) -> list[AgentLoopInfo]:
results = []
containers : list[Container] = self.docker_client.containers.list()
containers: list[Container] = self.docker_client.containers.list()
for container in containers:
if not container.name or not container.name.startswith('openhands-runtime-'):
if not container.name or not container.name.startswith(
'openhands-runtime-'
):
continue
conversation_id = container.name[len('openhands-runtime-') :]
if filter_to_sids is not None and conversation_id not in filter_to_sids:
@ -389,7 +396,9 @@ class DockerNestedConversationManager(ConversationManager):
)
return session_api_key
async def ensure_num_conversations_below_limit(self, sid: str, user_id: str | None) -> None:
async def ensure_num_conversations_below_limit(
self, sid: str, user_id: str | None
) -> None:
response_ids = await self.get_running_agent_loops(user_id)
if len(response_ids) >= self.config.max_concurrent_conversations:
logger.info(
@ -431,7 +440,9 @@ class DockerNestedConversationManager(ConversationManager):
)
return provider_handler
async def _create_runtime(self, sid: str, user_id: str | None, settings: Settings) -> DockerRuntime:
async def _create_runtime(
self, sid: str, user_id: str | None, settings: Settings
) -> DockerRuntime:
# This session is created here only because it is the easiest way to get a runtime, which
# is the easiest way to create the needed docker container
session = Session(
@ -463,8 +474,9 @@ class DockerNestedConversationManager(ConversationManager):
env_vars['SESSION_API_KEY'] = self._get_session_api_key_for_conversation(sid)
# We need to be able to specify the nested conversation id within the nested runtime
env_vars['ALLOW_SET_CONVERSATION_ID'] = '1'
env_vars['WORKSPACE_BASE'] = f'/workspace'
env_vars['WORKSPACE_BASE'] = '/workspace'
env_vars['SANDBOX_CLOSE_DELAY'] = '0'
env_vars['SKIP_DEPENDENCY_CHECK'] = '1'
# Set up mounted volume for conversation directory within workspace
# TODO: Check if we are using the standard event store and file store
@ -509,7 +521,7 @@ class DockerNestedConversationManager(ConversationManager):
await call_sync_from_async(container.start)
return True
return False
except docker.errors.NotFound as e:
except docker.errors.NotFound:
return False