Merge branch 'main' into openhands/fix-github-resolver-auth-error

This commit is contained in:
Graham Neubig 2025-12-02 23:20:25 -05:00 committed by GitHub
commit eeb81ecc49
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
706 changed files with 30753 additions and 25814 deletions

1
.devcontainer/README.md Normal file
View File

@ -0,0 +1 @@
This way of running OpenHands is not officially supported. It is maintained by the community.

View File

@ -7,5 +7,8 @@ git config --global --add safe.directory "$(realpath .)"
# Install `nc`
sudo apt update && sudo apt install netcat -y
# Install `uv` and `uvx`
wget -qO- https://astral.sh/uv/install.sh | sh
# Do common setup tasks
source .openhands/setup.sh

View File

@ -13,6 +13,7 @@
- [ ] Other (dependency update, docs, typo fixes, etc.)
## Checklist
<!-- AI/LLM AGENTS: This checklist is for a human author to complete. Do NOT check either of the two boxes below. Leave them unchecked until a human has personally reviewed and tested the changes. -->
- [ ] I have read and reviewed the code and I understand what the code is doing.
- [ ] I have tested the code to the best of my ability and ensured it works as expected.

View File

@ -1,73 +0,0 @@
#!/usr/bin/env python3
import os
import re
import sys
def find_version_references(directory: str) -> tuple[set[str], set[str]]:
openhands_versions = set()
runtime_versions = set()
version_pattern_openhands = re.compile(r'openhands:(\d{1})\.(\d{2})')
version_pattern_runtime = re.compile(r'runtime:(\d{1})\.(\d{2})')
for root, _, files in os.walk(directory):
# Skip .git directory and docs/build directory
if '.git' in root or 'docs/build' in root:
continue
for file in files:
if file.endswith(
('.md', '.yml', '.yaml', '.txt', '.html', '.py', '.js', '.ts')
):
file_path = os.path.join(root, file)
try:
with open(file_path, 'r', encoding='utf-8') as f:
content = f.read()
# Find all openhands version references
matches = version_pattern_openhands.findall(content)
if matches:
print(f'Found openhands version {matches} in {file_path}')
openhands_versions.update(matches)
# Find all runtime version references
matches = version_pattern_runtime.findall(content)
if matches:
print(f'Found runtime version {matches} in {file_path}')
runtime_versions.update(matches)
except Exception as e:
print(f'Error reading {file_path}: {e}', file=sys.stderr)
return openhands_versions, runtime_versions
def main():
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
print(f'Checking version consistency in {repo_root}')
openhands_versions, runtime_versions = find_version_references(repo_root)
print(f'Found openhands versions: {sorted(openhands_versions)}')
print(f'Found runtime versions: {sorted(runtime_versions)}')
exit_code = 0
if len(openhands_versions) > 1:
print('Error: Multiple openhands versions found:', file=sys.stderr)
print('Found versions:', sorted(openhands_versions), file=sys.stderr)
exit_code = 1
elif len(openhands_versions) == 0:
print('Warning: No openhands version references found', file=sys.stderr)
if len(runtime_versions) > 1:
print('Error: Multiple runtime versions found:', file=sys.stderr)
print('Found versions:', sorted(runtime_versions), file=sys.stderr)
exit_code = 1
elif len(runtime_versions) == 0:
print('Warning: No runtime version references found', file=sys.stderr)
sys.exit(exit_code)
if __name__ == '__main__':
main()

View File

@ -13,12 +13,9 @@ DOCKER_RUN_COMMAND="docker run -it --rm \
-p 3000:3000 \
-v /var/run/docker.sock:/var/run/docker.sock \
--add-host host.docker.internal:host-gateway \
-e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:${SHORT_SHA}-nikolaik \
-e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.openhands.dev/openhands/runtime:${SHORT_SHA}-nikolaik \
--name openhands-app-${SHORT_SHA} \
docker.all-hands.dev/all-hands-ai/openhands:${SHORT_SHA}"
# Define the uvx command
UVX_RUN_COMMAND="uvx --python 3.12 --from git+https://github.com/All-Hands-AI/OpenHands@${BRANCH_NAME}#subdirectory=openhands-cli openhands"
docker.openhands.dev/openhands/openhands:${SHORT_SHA}"
# Get the current PR body
PR_BODY=$(gh pr view "$PR_NUMBER" --json body --jq .body)
@ -37,11 +34,6 @@ GUI with Docker:
\`\`\`
${DOCKER_RUN_COMMAND}
\`\`\`
CLI with uvx:
\`\`\`
${UVX_RUN_COMMAND}
\`\`\`
EOF
)
else
@ -57,11 +49,6 @@ GUI with Docker:
\`\`\`
${DOCKER_RUN_COMMAND}
\`\`\`
CLI with uvx:
\`\`\`
${UVX_RUN_COMMAND}
\`\`\`
EOF
)
fi

View File

@ -0,0 +1,65 @@
name: Check Package Versions
on:
push:
branches: [main]
pull_request:
workflow_dispatch:
jobs:
check-package-versions:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Check for any 'rev' fields in pyproject.toml
run: |
python - <<'PY'
import sys, tomllib, pathlib
path = pathlib.Path("pyproject.toml")
if not path.exists():
print("❌ ERROR: pyproject.toml not found")
sys.exit(1)
try:
data = tomllib.loads(path.read_text(encoding="utf-8"))
except Exception as e:
print(f"❌ ERROR: Failed to parse pyproject.toml: {e}")
sys.exit(1)
poetry = data.get("tool", {}).get("poetry", {})
sections = {
"dependencies": poetry.get("dependencies", {}),
}
errors = []
print("🔍 Checking for any dependencies with 'rev' fields...\n")
for section_name, deps in sections.items():
if not isinstance(deps, dict):
continue
for pkg_name, cfg in deps.items():
if isinstance(cfg, dict) and "rev" in cfg:
msg = f" ✖ {pkg_name} in [{section_name}] uses rev='{cfg['rev']}' (NOT ALLOWED)"
print(msg)
errors.append(msg)
else:
print(f" • {pkg_name}: OK")
if errors:
print("\n❌ FAILED: Found dependencies using 'rev' fields:\n" + "\n".join(errors))
print("\nPlease use versioned releases instead, e.g.:")
print(' my-package = "1.0.0"')
sys.exit(1)
print("\n✅ SUCCESS: No 'rev' fields found. All dependencies are using proper versioned releases.")
PY

View File

@ -1,69 +0,0 @@
# Workflow that cleans up outdated and old workflows to prevent out of disk issues
name: Delete old workflow runs
# This workflow is currently only triggered manually
on:
workflow_dispatch:
inputs:
days:
description: 'Days-worth of runs to keep for each workflow'
required: true
default: '30'
minimum_runs:
description: 'Minimum runs to keep for each workflow'
required: true
default: '10'
delete_workflow_pattern:
description: 'Name or filename of the workflow (if not set, all workflows are targeted)'
required: false
delete_workflow_by_state_pattern:
description: 'Filter workflows by state: active, deleted, disabled_fork, disabled_inactivity, disabled_manually'
required: true
default: "ALL"
type: choice
options:
- "ALL"
- active
- deleted
- disabled_inactivity
- disabled_manually
delete_run_by_conclusion_pattern:
description: 'Remove runs based on conclusion: action_required, cancelled, failure, skipped, success'
required: true
default: 'ALL'
type: choice
options:
- 'ALL'
- 'Unsuccessful: action_required,cancelled,failure,skipped'
- action_required
- cancelled
- failure
- skipped
- success
dry_run:
description: 'Logs simulated changes, no deletions are performed'
required: false
jobs:
del_runs:
runs-on: blacksmith-4vcpu-ubuntu-2204
permissions:
actions: write
contents: read
steps:
- name: Delete workflow runs
uses: Mattraks/delete-workflow-runs@v2
with:
token: ${{ github.token }}
repository: ${{ github.repository }}
retain_days: ${{ github.event.inputs.days }}
keep_minimum_runs: ${{ github.event.inputs.minimum_runs }}
delete_workflow_pattern: ${{ github.event.inputs.delete_workflow_pattern }}
delete_workflow_by_state_pattern: ${{ github.event.inputs.delete_workflow_by_state_pattern }}
delete_run_by_conclusion_pattern: >-
${{
startsWith(github.event.inputs.delete_run_by_conclusion_pattern, 'Unsuccessful:')
&& 'action_required,cancelled,failure,skipped'
|| github.event.inputs.delete_run_by_conclusion_pattern
}}
dry_run: ${{ github.event.inputs.dry_run }}

View File

@ -1,114 +0,0 @@
# Workflow that builds and tests the CLI binary executable
name: CLI - Build binary and optionally release
# Run on pushes to main branch and CLI tags, and on pull requests when CLI files change
on:
push:
branches:
- main
tags:
- "*-cli"
pull_request:
paths:
- "openhands-cli/**"
permissions:
contents: write # needed to create releases or upload assets
# Cancel previous runs if a new commit is pushed
concurrency:
group: ${{ github.workflow }}-${{ (github.head_ref && github.ref) || github.run_id }}
cancel-in-progress: true
jobs:
build-binary:
name: Build binary executable
strategy:
matrix:
include:
# Build on Ubuntu 22.04 for maximum GLIBC compatibility (GLIBC 2.31)
- os: ubuntu-22.04
platform: linux
artifact_name: openhands-cli-linux
# Build on macOS for macOS users
- os: macos-15
platform: macos
artifact_name: openhands-cli-macos
runs-on: ${{ matrix.os }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
version: "latest"
- name: Install dependencies
working-directory: openhands-cli
run: |
uv sync
- name: Build binary executable
working-directory: openhands-cli
run: |
./build.sh --install-pyinstaller | tee output.log
echo "Full output:"
cat output.log
if grep -q "❌" output.log; then
echo "❌ Found failure marker in output"
exit 1
fi
echo "✅ Build & test finished without ❌ markers"
- name: Upload binary artifact
uses: actions/upload-artifact@v4
with:
name: ${{ matrix.artifact_name }}
path: openhands-cli/dist/openhands*
retention-days: 30
create-github-release:
name: Create GitHub Release
runs-on: ubuntu-latest
needs: build-binary
if: startsWith(github.ref, 'refs/tags/')
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Download all artifacts
uses: actions/download-artifact@v4
with:
path: artifacts
- name: Prepare release assets
run: |
mkdir -p release-assets
# Copy binaries with appropriate names for release
if [ -f artifacts/openhands-cli-linux/openhands ]; then
cp artifacts/openhands-cli-linux/openhands release-assets/openhands-linux
fi
if [ -f artifacts/openhands-cli-macos/openhands ]; then
cp artifacts/openhands-cli-macos/openhands release-assets/openhands-macos
fi
ls -la release-assets/
- name: Create GitHub Release
uses: softprops/action-gh-release@v2
with:
files: release-assets/*
draft: true
prerelease: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@ -1,23 +0,0 @@
name: Dispatch to docs repo
on:
push:
branches: [main]
paths:
- 'docs/**'
workflow_dispatch:
jobs:
dispatch:
runs-on: ubuntu-latest
strategy:
matrix:
repo: ["All-Hands-AI/docs"]
steps:
- name: Push to docs repo
uses: peter-evans/repository-dispatch@v3
with:
token: ${{ secrets.ALLHANDS_BOT_GITHUB_PAT }}
repository: ${{ matrix.repo }}
event-type: update
client-payload: '{"ref": "${{ github.ref }}", "sha": "${{ github.sha }}", "module": "openhands", "branch": "main"}'

View File

@ -26,4 +26,4 @@ jobs:
-H "Authorization: Bearer ${{ secrets.PAT_TOKEN }}" \
-H "Accept: application/vnd.github+json" \
-d "{\"ref\": \"main\", \"inputs\": {\"openhandsPrNumber\": \"${{ github.event.pull_request.number }}\", \"deployEnvironment\": \"feature\", \"enterpriseImageTag\": \"pr-${{ github.event.pull_request.number }}\" }}" \
https://api.github.com/repos/All-Hands-AI/deploy/actions/workflows/deploy.yaml/dispatches
https://api.github.com/repos/OpenHands/deploy/actions/workflows/deploy.yaml/dispatches

View File

@ -37,7 +37,6 @@ jobs:
shell: bash
id: define-base-images
run: |
# Only build nikolaik on PRs, otherwise build both nikolaik and ubuntu.
if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
json=$(jq -n -c '[
{ image: "nikolaik/python-nodejs:python3.12-nodejs22", tag: "nikolaik" },
@ -46,7 +45,6 @@ jobs:
else
json=$(jq -n -c '[
{ image: "nikolaik/python-nodejs:python3.12-nodejs22", tag: "nikolaik" },
{ image: "ghcr.io/all-hands-ai/python-nodejs:python3.13-nodejs22-trixie", tag: "trixie" },
{ image: "ubuntu:24.04", tag: "ubuntu" }
]')
fi
@ -88,7 +86,7 @@ jobs:
# Builds the runtime Docker images
ghcr_build_runtime:
name: Build Image
name: Build Runtime Image
runs-on: blacksmith-8vcpu-ubuntu-2204
if: "!(github.event_name == 'push' && startsWith(github.ref, 'refs/tags/ext-v'))"
permissions:
@ -200,7 +198,7 @@ jobs:
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/all-hands-ai/enterprise-server
images: ghcr.io/openhands/enterprise-server
tags: |
type=ref,event=branch
type=ref,event=pr
@ -252,13 +250,13 @@ jobs:
-H "Authorization: Bearer ${{ secrets.PAT_TOKEN }}" \
-H "Accept: application/vnd.github+json" \
-d "{\"ref\": \"main\", \"inputs\": {\"openhandsPrNumber\": \"${{ github.event.pull_request.number }}\", \"deployEnvironment\": \"feature\", \"enterpriseImageTag\": \"pr-${{ github.event.pull_request.number }}\" }}" \
https://api.github.com/repos/All-Hands-AI/deploy/actions/workflows/deploy.yaml/dispatches
https://api.github.com/repos/OpenHands/deploy/actions/workflows/deploy.yaml/dispatches
# Run unit tests with the Docker runtime Docker images as root
test_runtime_root:
name: RT Unit Tests (Root)
needs: [ghcr_build_runtime, define-matrix]
runs-on: blacksmith-8vcpu-ubuntu-2204
runs-on: blacksmith-4vcpu-ubuntu-2404
strategy:
fail-fast: false
matrix:
@ -300,7 +298,7 @@ jobs:
# We install pytest-xdist in order to run tests across CPUs
poetry run pip install pytest-xdist
# Install to be able to retry on failures for flaky tests
# Install to be able to retry on failures for flakey tests
poetry run pip install pytest-rerunfailures
image_name=ghcr.io/${{ env.REPO_OWNER }}/runtime:${{ env.RELEVANT_SHA }}-${{ matrix.base_image.tag }}
@ -313,14 +311,14 @@ jobs:
SANDBOX_RUNTIME_CONTAINER_IMAGE=$image_name \
TEST_IN_CI=true \
RUN_AS_OPENHANDS=false \
poetry run pytest -n 0 -raRs --reruns 2 --reruns-delay 5 -s ./tests/runtime --ignore=tests/runtime/test_browsergym_envs.py --durations=10
poetry run pytest -n 5 -raRs --reruns 2 --reruns-delay 3 -s ./tests/runtime --ignore=tests/runtime/test_browsergym_envs.py --durations=10
env:
DEBUG: "1"
# Run unit tests with the Docker runtime Docker images as openhands user
test_runtime_oh:
name: RT Unit Tests (openhands)
runs-on: blacksmith-8vcpu-ubuntu-2204
runs-on: blacksmith-4vcpu-ubuntu-2404
needs: [ghcr_build_runtime, define-matrix]
strategy:
matrix:
@ -372,7 +370,7 @@ jobs:
SANDBOX_RUNTIME_CONTAINER_IMAGE=$image_name \
TEST_IN_CI=true \
RUN_AS_OPENHANDS=true \
poetry run pytest -n 0 -raRs --reruns 2 --reruns-delay 5 -s ./tests/runtime --ignore=tests/runtime/test_browsergym_envs.py --durations=10
poetry run pytest -n 5 -raRs --reruns 2 --reruns-delay 3 -s ./tests/runtime --ignore=tests/runtime/test_browsergym_envs.py --durations=10
env:
DEBUG: "1"

View File

@ -1,199 +0,0 @@
name: Run Integration Tests
on:
pull_request:
types: [labeled]
workflow_dispatch:
inputs:
reason:
description: 'Reason for manual trigger'
required: true
default: ''
schedule:
- cron: '30 22 * * *' # Runs at 10:30pm UTC every day
env:
N_PROCESSES: 10 # Global configuration for number of parallel processes for evaluation
jobs:
run-integration-tests:
if: github.event.label.name == 'integration-test' || github.event_name == 'workflow_dispatch' || github.event_name == 'schedule'
runs-on: blacksmith-4vcpu-ubuntu-2204
permissions:
contents: "read"
id-token: "write"
pull-requests: "write"
issues: "write"
strategy:
matrix:
python-version: ["3.12"]
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Install poetry via pipx
run: pipx install poetry
- name: Set up Python
uses: useblacksmith/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
cache: "poetry"
- name: Setup Node.js
uses: useblacksmith/setup-node@v5
with:
node-version: '22.x'
- name: Comment on PR if 'integration-test' label is present
if: github.event_name == 'pull_request' && github.event.label.name == 'integration-test'
uses: KeisukeYamashita/create-comment@v1
with:
unique: false
comment: |
Hi! I started running the integration tests on your PR. You will receive a comment with the results shortly.
- name: Install Python dependencies using Poetry
run: poetry install --with dev,test,runtime,evaluation
- name: Configure config.toml for testing with Haiku
env:
LLM_MODEL: "litellm_proxy/claude-3-5-haiku-20241022"
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }}
MAX_ITERATIONS: 10
run: |
echo "[llm.eval]" > config.toml
echo "model = \"$LLM_MODEL\"" >> config.toml
echo "api_key = \"$LLM_API_KEY\"" >> config.toml
echo "base_url = \"$LLM_BASE_URL\"" >> config.toml
echo "temperature = 0.0" >> config.toml
- name: Build environment
run: make build
- name: Run integration test evaluation for Haiku
env:
SANDBOX_FORCE_REBUILD_RUNTIME: True
run: |
poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD CodeActAgent '' 10 $N_PROCESSES '' 'haiku_run'
# get integration tests report
REPORT_FILE_HAIKU=$(find evaluation/evaluation_outputs/outputs/integration_tests/CodeActAgent/*haiku*_maxiter_10_N* -name "report.md" -type f | head -n 1)
echo "REPORT_FILE: $REPORT_FILE_HAIKU"
echo "INTEGRATION_TEST_REPORT_HAIKU<<EOF" >> $GITHUB_ENV
cat $REPORT_FILE_HAIKU >> $GITHUB_ENV
echo >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
- name: Wait a little bit
run: sleep 10
- name: Configure config.toml for testing with DeepSeek
env:
LLM_MODEL: "litellm_proxy/deepseek-chat"
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }}
MAX_ITERATIONS: 10
run: |
echo "[llm.eval]" > config.toml
echo "model = \"$LLM_MODEL\"" >> config.toml
echo "api_key = \"$LLM_API_KEY\"" >> config.toml
echo "base_url = \"$LLM_BASE_URL\"" >> config.toml
echo "temperature = 0.0" >> config.toml
- name: Run integration test evaluation for DeepSeek
env:
SANDBOX_FORCE_REBUILD_RUNTIME: True
run: |
poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD CodeActAgent '' 10 $N_PROCESSES '' 'deepseek_run'
# get integration tests report
REPORT_FILE_DEEPSEEK=$(find evaluation/evaluation_outputs/outputs/integration_tests/CodeActAgent/deepseek*_maxiter_10_N* -name "report.md" -type f | head -n 1)
echo "REPORT_FILE: $REPORT_FILE_DEEPSEEK"
echo "INTEGRATION_TEST_REPORT_DEEPSEEK<<EOF" >> $GITHUB_ENV
cat $REPORT_FILE_DEEPSEEK >> $GITHUB_ENV
echo >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
# -------------------------------------------------------------
# Run VisualBrowsingAgent tests for DeepSeek, limited to t05 and t06
- name: Wait a little bit (again)
run: sleep 5
- name: Configure config.toml for testing VisualBrowsingAgent (DeepSeek)
env:
LLM_MODEL: "litellm_proxy/deepseek-chat"
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }}
MAX_ITERATIONS: 15
run: |
echo "[llm.eval]" > config.toml
echo "model = \"$LLM_MODEL\"" >> config.toml
echo "api_key = \"$LLM_API_KEY\"" >> config.toml
echo "base_url = \"$LLM_BASE_URL\"" >> config.toml
echo "temperature = 0.0" >> config.toml
- name: Run integration test evaluation for VisualBrowsingAgent (DeepSeek)
env:
SANDBOX_FORCE_REBUILD_RUNTIME: True
run: |
poetry run ./evaluation/integration_tests/scripts/run_infer.sh llm.eval HEAD VisualBrowsingAgent '' 15 $N_PROCESSES "t05_simple_browsing,t06_github_pr_browsing.py" 'visualbrowsing_deepseek_run'
# Find and export the visual browsing agent test results
REPORT_FILE_VISUALBROWSING_DEEPSEEK=$(find evaluation/evaluation_outputs/outputs/integration_tests/VisualBrowsingAgent/deepseek*_maxiter_15_N* -name "report.md" -type f | head -n 1)
echo "REPORT_FILE_VISUALBROWSING_DEEPSEEK: $REPORT_FILE_VISUALBROWSING_DEEPSEEK"
echo "INTEGRATION_TEST_REPORT_VISUALBROWSING_DEEPSEEK<<EOF" >> $GITHUB_ENV
cat $REPORT_FILE_VISUALBROWSING_DEEPSEEK >> $GITHUB_ENV
echo >> $GITHUB_ENV
echo "EOF" >> $GITHUB_ENV
- name: Create archive of evaluation outputs
run: |
TIMESTAMP=$(date +'%y-%m-%d-%H-%M')
cd evaluation/evaluation_outputs/outputs # Change to the outputs directory
tar -czvf ../../../integration_tests_${TIMESTAMP}.tar.gz integration_tests/CodeActAgent/* integration_tests/VisualBrowsingAgent/* # Only include the actual result directories
- name: Upload evaluation results as artifact
uses: actions/upload-artifact@v4
id: upload_results_artifact
with:
name: integration-test-outputs-${{ github.run_id }}-${{ github.run_attempt }}
path: integration_tests_*.tar.gz
- name: Get artifact URLs
run: |
echo "ARTIFACT_URL=${{ steps.upload_results_artifact.outputs.artifact-url }}" >> $GITHUB_ENV
- name: Set timestamp and trigger reason
run: |
echo "TIMESTAMP=$(date +'%Y-%m-%d-%H-%M')" >> $GITHUB_ENV
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
echo "TRIGGER_REASON=pr-${{ github.event.pull_request.number }}" >> $GITHUB_ENV
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
echo "TRIGGER_REASON=manual-${{ github.event.inputs.reason }}" >> $GITHUB_ENV
else
echo "TRIGGER_REASON=nightly-scheduled" >> $GITHUB_ENV
fi
- name: Comment with results and artifact link
id: create_comment
uses: KeisukeYamashita/create-comment@v1
with:
# if triggered by PR, use PR number, otherwise use 9745 as fallback issue number for manual triggers
number: ${{ github.event_name == 'pull_request' && github.event.pull_request.number || 9745 }}
unique: false
comment: |
Trigger by: ${{ github.event_name == 'pull_request' && format('Pull Request (integration-test label on PR #{0})', github.event.pull_request.number) || (github.event_name == 'workflow_dispatch' && format('Manual Trigger: {0}', github.event.inputs.reason)) || 'Nightly Scheduled Run' }}
Commit: ${{ github.sha }}
**Integration Tests Report (Haiku)**
Haiku LLM Test Results:
${{ env.INTEGRATION_TEST_REPORT_HAIKU }}
---
**Integration Tests Report (DeepSeek)**
DeepSeek LLM Test Results:
${{ env.INTEGRATION_TEST_REPORT_DEEPSEEK }}
---
**Integration Tests Report VisualBrowsing (DeepSeek)**
${{ env.INTEGRATION_TEST_REPORT_VISUALBROWSING_DEEPSEEK }}
---
Download testing outputs (includes both Haiku and DeepSeek results): [Download](${{ steps.upload_results_artifact.outputs.artifact-url }})

View File

@ -72,34 +72,3 @@ jobs:
- name: Run pre-commit hooks
working-directory: ./enterprise
run: pre-commit run --all-files --show-diff-on-failure --config ./dev_config/python/.pre-commit-config.yaml
lint-cli-python:
name: Lint CLI python
runs-on: blacksmith-4vcpu-ubuntu-2204
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up python
uses: useblacksmith/setup-python@v6
with:
python-version: 3.12
cache: "pip"
- name: Install pre-commit
run: pip install pre-commit==4.2.0
- name: Run pre-commit hooks
working-directory: ./openhands-cli
run: pre-commit run --all-files --config ./dev_config/python/.pre-commit-config.yaml
# Check version consistency across documentation
check-version-consistency:
name: Check version consistency
runs-on: blacksmith-4vcpu-ubuntu-2204
steps:
- uses: actions/checkout@v4
- name: Set up python
uses: useblacksmith/setup-python@v6
with:
python-version: 3.12
- name: Run version consistency check
run: .github/scripts/check_version_consistency.py

View File

@ -1,70 +0,0 @@
# Workflow that checks MDX format in docs/ folder
name: MDX Lint
# Run on pushes to main and on pull requests that modify docs/ files
on:
push:
branches:
- main
paths:
- 'docs/**/*.mdx'
pull_request:
paths:
- 'docs/**/*.mdx'
# If triggered by a PR, it will be in the same group. However, each commit on main will be in its own unique group
concurrency:
group: ${{ github.workflow }}-${{ (github.head_ref && github.ref) || github.run_id }}
cancel-in-progress: true
jobs:
mdx-lint:
name: Lint MDX files
runs-on: blacksmith-4vcpu-ubuntu-2204
steps:
- uses: actions/checkout@v4
- name: Install Node.js 22
uses: useblacksmith/setup-node@v5
with:
node-version: 22
- name: Install MDX dependencies
run: |
npm install @mdx-js/mdx@3 glob@10
- name: Validate MDX files
run: |
node -e "
const {compile} = require('@mdx-js/mdx');
const fs = require('fs');
const path = require('path');
const glob = require('glob');
async function validateMDXFiles() {
const files = glob.sync('docs/**/*.mdx');
console.log('Found', files.length, 'MDX files to validate');
let hasErrors = false;
for (const file of files) {
try {
const content = fs.readFileSync(file, 'utf8');
await compile(content);
console.log('✅ MDX parsing successful for', file);
} catch (err) {
console.error('❌ MDX parsing failed for', file, ':', err.message);
hasErrors = true;
}
}
if (hasErrors) {
console.error('\\n❌ Some MDX files have parsing errors. Please fix them before merging.');
process.exit(1);
} else {
console.log('\\n✅ All MDX files are valid!');
}
}
validateMDXFiles();
"

View File

@ -201,7 +201,7 @@ jobs:
issue_number: ${{ env.ISSUE_NUMBER }},
owner: context.repo.owner,
repo: context.repo.repo,
body: `[OpenHands](https://github.com/All-Hands-AI/OpenHands) started fixing the ${issueType}! You can monitor the progress [here](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}).`
body: `[OpenHands](https://github.com/OpenHands/OpenHands) started fixing the ${issueType}! You can monitor the progress [here](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}).`
});
- name: Install OpenHands
@ -233,7 +233,7 @@ jobs:
if (isExperimentalLabel || isIssueCommentExperimental || isReviewCommentExperimental) {
console.log("Installing experimental OpenHands...");
await exec.exec("pip install git+https://github.com/all-hands-ai/openhands.git");
await exec.exec("pip install git+https://github.com/openhands/openhands.git");
} else {
console.log("Installing from requirements.txt...");

View File

@ -48,7 +48,10 @@ jobs:
python-version: ${{ matrix.python-version }}
cache: "poetry"
- name: Install Python dependencies using Poetry
run: poetry install --with dev,test,runtime
run: |
poetry install --with dev,test,runtime
poetry run pip install pytest-xdist
poetry run pip install pytest-rerunfailures
- name: Build Environment
run: make build
- name: Run Unit Tests
@ -56,7 +59,7 @@ jobs:
env:
COVERAGE_FILE: ".coverage.${{ matrix.python_version }}"
- name: Run Runtime Tests with CLIRuntime
run: PYTHONPATH=".:$PYTHONPATH" TEST_RUNTIME=cli poetry run pytest -s tests/runtime/test_bash.py --cov=openhands --cov-branch
run: PYTHONPATH=".:$PYTHONPATH" TEST_RUNTIME=cli poetry run pytest -n 5 --reruns 2 --reruns-delay 3 -s tests/runtime/test_bash.py --cov=openhands --cov-branch
env:
COVERAGE_FILE: ".coverage.runtime.${{ matrix.python_version }}"
- name: Store coverage file
@ -67,37 +70,7 @@ jobs:
.coverage.${{ matrix.python_version }}
.coverage.runtime.${{ matrix.python_version }}
include-hidden-files: true
# Run specific Windows python tests
test-on-windows:
name: Python Tests on Windows
runs-on: windows-latest
strategy:
matrix:
python-version: ["3.12"]
steps:
- uses: actions/checkout@v4
- name: Install pipx
run: pip install pipx
- name: Install poetry via pipx
run: pipx install poetry
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: "poetry"
- name: Install Python dependencies using Poetry
run: poetry install --with dev,test,runtime
- name: Run Windows unit tests
run: poetry run pytest -svv tests/unit/runtime/utils/test_windows_bash.py
env:
PYTHONPATH: ".;$env:PYTHONPATH"
DEBUG: "1"
- name: Run Windows runtime tests with LocalRuntime
run: $env:TEST_RUNTIME="local"; poetry run pytest -svv tests/runtime/test_bash.py
env:
PYTHONPATH: ".;$env:PYTHONPATH"
TEST_RUNTIME: local
DEBUG: "1"
test-enterprise:
name: Enterprise Python Unit Tests
runs-on: blacksmith-4vcpu-ubuntu-2404
@ -128,57 +101,11 @@ jobs:
path: ".coverage.enterprise.${{ matrix.python_version }}"
include-hidden-files: true
# Run CLI unit tests
test-cli-python:
name: CLI Unit Tests
runs-on: blacksmith-4vcpu-ubuntu-2404
strategy:
matrix:
python-version: ["3.12"]
steps:
- name: Checkout repository
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Set up Python
uses: useblacksmith/setup-python@v6
with:
python-version: ${{ matrix.python-version }}
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
version: "latest"
- name: Install dependencies
working-directory: ./openhands-cli
run: |
uv sync --group dev
- name: Run CLI unit tests
working-directory: ./openhands-cli
env:
# write coverage to repo root so the merge step finds it
COVERAGE_FILE: "${{ github.workspace }}/.coverage.openhands-cli.${{ matrix.python-version }}"
run: |
uv run pytest --forked -n auto -s \
-p no:ddtrace -p no:ddtrace.pytest_bdd -p no:ddtrace.pytest_benchmark \
tests --cov=openhands_cli --cov-branch
- name: Store coverage file
uses: actions/upload-artifact@v4
with:
name: coverage-openhands-cli
path: ".coverage.openhands-cli.${{ matrix.python-version }}"
include-hidden-files: true
coverage-comment:
name: Coverage Comment
if: github.event_name == 'pull_request'
runs-on: ubuntu-latest
needs: [test-on-linux, test-enterprise, test-cli-python]
needs: [test-on-linux, test-enterprise]
permissions:
pull-requests: write
@ -192,9 +119,6 @@ jobs:
pattern: coverage-*
merge-multiple: true
- name: Create symlink for CLI source files
run: ln -sf openhands-cli/openhands_cli openhands_cli
- name: Coverage comment
id: coverage_comment
uses: py-cov-action/python-coverage-comment-action@v3

View File

@ -10,7 +10,6 @@ on:
type: choice
options:
- app server
- cli
default: app server
push:
tags:
@ -39,36 +38,3 @@ jobs:
run: ./build.sh
- name: publish
run: poetry publish -u __token__ -p ${{ secrets.PYPI_TOKEN }}
release-cli:
name: Publish CLI to PyPI
runs-on: ubuntu-latest
# Run when manually dispatched for "cli" OR for tag pushes that contain '-cli'
if: |
(github.event_name == 'workflow_dispatch' && github.event.inputs.reason == 'cli')
|| (github.event_name == 'push' && startsWith(github.ref, 'refs/tags/') && contains(github.ref, '-cli'))
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.12
- name: Install uv
uses: astral-sh/setup-uv@v3
with:
version: "latest"
- name: Build CLI package
working-directory: openhands-cli
run: |
# Clean dist directory to avoid conflicts with binary builds
rm -rf dist/
uv build
- name: Publish CLI to PyPI
working-directory: openhands-cli
run: |
uv publish --token ${{ secrets.PYPI_TOKEN_OPENHANDS }}

View File

@ -1,135 +0,0 @@
# Run evaluation on a PR, after releases, or manually
name: Run Eval
# Runs when a PR is labeled with one of the "run-eval-" labels, after releases, or manually triggered
on:
pull_request:
types: [labeled]
release:
types: [published]
workflow_dispatch:
inputs:
branch:
description: 'Branch to evaluate'
required: true
default: 'main'
eval_instances:
description: 'Number of evaluation instances'
required: true
default: '50'
type: choice
options:
- '1'
- '2'
- '50'
- '100'
reason:
description: 'Reason for manual trigger'
required: false
default: ''
env:
# Environment variable for the master GitHub issue number where all evaluation results will be commented
# This should be set to the issue number where you want all evaluation results to be posted
MASTER_EVAL_ISSUE_NUMBER: ${{ vars.MASTER_EVAL_ISSUE_NUMBER || '0' }}
jobs:
trigger-job:
name: Trigger remote eval job
if: ${{ (github.event_name == 'pull_request' && (github.event.label.name == 'run-eval-1' || github.event.label.name == 'run-eval-2' || github.event.label.name == 'run-eval-50' || github.event.label.name == 'run-eval-100')) || github.event_name == 'release' || github.event_name == 'workflow_dispatch' }}
runs-on: blacksmith-4vcpu-ubuntu-2204
steps:
- name: Checkout branch
uses: actions/checkout@v4
with:
ref: ${{ github.event_name == 'pull_request' && github.head_ref || (github.event_name == 'workflow_dispatch' && github.event.inputs.branch) || github.ref }}
- name: Set evaluation parameters
id: eval_params
run: |
REPO_URL="https://github.com/${{ github.repository }}"
echo "Repository URL: $REPO_URL"
# Determine branch based on trigger type
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
EVAL_BRANCH="${{ github.head_ref }}"
echo "PR Branch: $EVAL_BRANCH"
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
EVAL_BRANCH="${{ github.event.inputs.branch }}"
echo "Manual Branch: $EVAL_BRANCH"
else
# For release events, use the tag name or main branch
EVAL_BRANCH="${{ github.ref_name }}"
echo "Release Branch/Tag: $EVAL_BRANCH"
fi
# Determine evaluation instances based on trigger type
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
if [[ "${{ github.event.label.name }}" == "run-eval-1" ]]; then
EVAL_INSTANCES="1"
elif [[ "${{ github.event.label.name }}" == "run-eval-2" ]]; then
EVAL_INSTANCES="2"
elif [[ "${{ github.event.label.name }}" == "run-eval-50" ]]; then
EVAL_INSTANCES="50"
elif [[ "${{ github.event.label.name }}" == "run-eval-100" ]]; then
EVAL_INSTANCES="100"
fi
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
EVAL_INSTANCES="${{ github.event.inputs.eval_instances }}"
else
# For release events, default to 50 instances
EVAL_INSTANCES="50"
fi
echo "Evaluation instances: $EVAL_INSTANCES"
echo "repo_url=$REPO_URL" >> $GITHUB_OUTPUT
echo "eval_branch=$EVAL_BRANCH" >> $GITHUB_OUTPUT
echo "eval_instances=$EVAL_INSTANCES" >> $GITHUB_OUTPUT
- name: Trigger remote job
run: |
# Determine PR number for the remote evaluation system
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
PR_NUMBER="${{ github.event.pull_request.number }}"
else
# For non-PR triggers, use the master issue number as PR number
PR_NUMBER="${{ env.MASTER_EVAL_ISSUE_NUMBER }}"
fi
curl -X POST \
-H "Authorization: Bearer ${{ secrets.PAT_TOKEN }}" \
-H "Accept: application/vnd.github+json" \
-d "{\"ref\": \"main\", \"inputs\": {\"github-repo\": \"${{ steps.eval_params.outputs.repo_url }}\", \"github-branch\": \"${{ steps.eval_params.outputs.eval_branch }}\", \"pr-number\": \"${PR_NUMBER}\", \"eval-instances\": \"${{ steps.eval_params.outputs.eval_instances }}\"}}" \
https://api.github.com/repos/All-Hands-AI/evaluation/actions/workflows/create-branch.yml/dispatches
# Send Slack message
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
TRIGGER_URL="https://github.com/${{ github.repository }}/pull/${{ github.event.pull_request.number }}"
slack_text="PR $TRIGGER_URL has triggered evaluation on ${{ steps.eval_params.outputs.eval_instances }} instances..."
elif [[ "${{ github.event_name }}" == "release" ]]; then
TRIGGER_URL="https://github.com/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
slack_text="Release $TRIGGER_URL has triggered evaluation on ${{ steps.eval_params.outputs.eval_instances }} instances..."
else
TRIGGER_URL="https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"
slack_text="Manual trigger (${{ github.event.inputs.reason || 'No reason provided' }}) has triggered evaluation on ${{ steps.eval_params.outputs.eval_instances }} instances for branch ${{ steps.eval_params.outputs.eval_branch }}..."
fi
curl -X POST -H 'Content-type: application/json' --data '{"text":"'"$slack_text"'"}' \
https://hooks.slack.com/services/${{ secrets.SLACK_TOKEN }}
- name: Comment on issue/PR
uses: KeisukeYamashita/create-comment@v1
with:
# For PR triggers, comment on the PR. For other triggers, comment on the master issue
number: ${{ github.event_name == 'pull_request' && github.event.pull_request.number || env.MASTER_EVAL_ISSUE_NUMBER }}
unique: false
comment: |
**Evaluation Triggered**
**Trigger:** ${{ github.event_name == 'pull_request' && format('Pull Request #{0}', github.event.pull_request.number) || (github.event_name == 'release' && 'Release') || format('Manual Trigger: {0}', github.event.inputs.reason || 'No reason provided') }}
**Branch:** ${{ steps.eval_params.outputs.eval_branch }}
**Instances:** ${{ steps.eval_params.outputs.eval_instances }}
**Commit:** ${{ github.sha }}
Running evaluation on the specified branch. Once eval is done, the results will be posted here.

3
.gitignore vendored
View File

@ -185,6 +185,9 @@ cython_debug/
.repomix
repomix-output.txt
# Emacs backup
*~
# evaluation
evaluation/evaluation_outputs
evaluation/outputs

View File

@ -83,6 +83,116 @@ VSCode Extension:
- Use `vscode.window.createOutputChannel()` for debug logging instead of `showErrorMessage()` popups
- Pre-commit process runs both frontend and backend checks when committing extension changes
## Enterprise Directory
The `enterprise/` directory contains additional functionality that extends the open-source OpenHands codebase. This includes:
- Authentication and user management (Keycloak integration)
- Database migrations (Alembic)
- Integration services (GitHub, GitLab, Jira, Linear, Slack)
- Billing and subscription management (Stripe)
- Telemetry and analytics (PostHog, custom metrics framework)
### Enterprise Development Setup
**Prerequisites:**
- Python 3.12
- Poetry (for dependency management)
- Node.js 22.x (for frontend)
- Docker (optional)
**Setup Steps:**
1. First, build the main OpenHands project: `make build`
2. Then install enterprise dependencies: `cd enterprise && poetry install --with dev,test` (This can take a very long time. Be patient.)
3. Set up enterprise pre-commit hooks: `poetry run pre-commit install --config ./dev_config/python/.pre-commit-config.yaml`
**Running Enterprise Tests:**
```bash
# Enterprise unit tests (full suite)
PYTHONPATH=".:$PYTHONPATH" poetry run --project=enterprise pytest --forked -n auto -s -p no:ddtrace -p no:ddtrace.pytest_bdd -p no:ddtrace.pytest_benchmark ./enterprise/tests/unit --cov=enterprise --cov-branch
# Test specific modules (faster for development)
cd enterprise
PYTHONPATH=".:$PYTHONPATH" poetry run pytest tests/unit/telemetry/ --confcutdir=tests/unit/telemetry
# Enterprise linting (IMPORTANT: use --show-diff-on-failure to match GitHub CI)
poetry run pre-commit run --all-files --show-diff-on-failure --config ./dev_config/python/.pre-commit-config.yaml
```
**Running Enterprise Server:**
```bash
cd enterprise
make start-backend # Development mode with hot reload
# or
make run # Full application (backend + frontend)
```
**Key Configuration Files:**
- `enterprise/pyproject.toml` - Enterprise-specific dependencies
- `enterprise/Makefile` - Enterprise build and run commands
- `enterprise/dev_config/python/` - Linting and type checking configuration
- `enterprise/migrations/` - Database migration files
**Database Migrations:**
Enterprise uses Alembic for database migrations. When making schema changes:
1. Create migration files in `enterprise/migrations/versions/`
2. Test migrations thoroughly
3. The CI will check for migration conflicts on PRs
**Integration Development:**
The enterprise codebase includes integrations for:
- **GitHub** - PR management, webhooks, app installations
- **GitLab** - Similar to GitHub but for GitLab instances
- **Jira** - Issue tracking and project management
- **Linear** - Modern issue tracking
- **Slack** - Team communication and notifications
Each integration follows a consistent pattern with service classes, storage models, and API endpoints.
**Important Notes:**
- Enterprise code is licensed under Polyform Free Trial License (30-day limit)
- The enterprise server extends the OSS server through dynamic imports
- Database changes require careful migration planning in `enterprise/migrations/`
- Always test changes in both OSS and enterprise contexts
- Use the enterprise-specific Makefile commands for development
**Enterprise Testing Best Practices:**
**Database Testing:**
- Use SQLite in-memory databases (`sqlite:///:memory:`) for unit tests instead of real PostgreSQL
- Create module-specific `conftest.py` files with database fixtures
- Mock external database connections in unit tests to avoid dependency on running services
- Use real database connections only for integration tests
**Import Patterns:**
- Use relative imports without `enterprise.` prefix in enterprise code
- Example: `from storage.database import session_maker` not `from enterprise.storage.database import session_maker`
- This ensures code works in both OSS and enterprise contexts
**Test Structure:**
- Place tests in `enterprise/tests/unit/` following the same structure as the source code
- Use `--confcutdir=tests/unit/[module]` when testing specific modules
- Create comprehensive fixtures for complex objects (databases, external services)
- Write platform-agnostic tests (avoid hardcoded OS-specific assertions)
**Mocking Strategy:**
- Use `AsyncMock` for async operations and `MagicMock` for complex objects
- Mock all external dependencies (databases, APIs, file systems) in unit tests
- Use `patch` with correct import paths (e.g., `telemetry.registry.logger` not `enterprise.telemetry.registry.logger`)
- Test both success and failure scenarios with proper error handling
**Coverage Goals:**
- Aim for 90%+ test coverage on new enterprise modules
- Focus on critical business logic and error handling paths
- Use `--cov-report=term-missing` to identify uncovered lines
**Troubleshooting:**
- If tests fail, ensure all dependencies are installed: `poetry install --with dev,test`
- For database issues, check migration status and run migrations if needed
- For frontend issues, ensure the main OpenHands frontend is built: `make build`
- Check logs in the `logs/` directory for runtime issues
- If tests fail with import errors, verify `PYTHONPATH=".:$PYTHONPATH"` is set
- **If GitHub CI fails but local linting passes**: Always use `--show-diff-on-failure` flag to match CI behavior exactly
## Template for Github Pull Request
If you are starting a pull request (PR), please follow the template in `.github/pull_request_template.md`.

1
CNAME Normal file
View File

@ -0,0 +1 @@
docs.all-hands.dev

View File

@ -124,7 +124,7 @@ These Slack etiquette guidelines are designed to foster an inclusive, respectful
- Post questions or discussions in the most relevant channel (e.g., for [slack - #general](https://openhands-ai.slack.com/archives/C06P5NCGSFP) for general topics, [slack - #questions](https://openhands-ai.slack.com/archives/C06U8UTKSAD) for queries/questions.
- When asking for help or raising issues, include necessary details like links, screenshots, or clear explanations to provide context.
- Keep discussions in public channels whenever possible to allow others to benefit from the conversation, unless the matter is sensitive or private.
- Always adhere to [our standards](https://github.com/All-Hands-AI/OpenHands/blob/main/CODE_OF_CONDUCT.md#our-standards) to ensure a welcoming and collaborative environment.
- Always adhere to [our standards](https://github.com/OpenHands/OpenHands/blob/main/CODE_OF_CONDUCT.md#our-standards) to ensure a welcoming and collaborative environment.
- If you choose to mute a channel, consider setting up alerts for topics that still interest you to stay engaged. For Slack, Go to Settings → Notifications → My Keywords to add specific keywords that will notify you when mentioned. For example, if you're here for discussions about LLMs, mute the channel if its too busy, but set notifications to alert you only when “LLMs” appears in messages.
## Attribution

View File

@ -1,43 +1,45 @@
# 🙌 The OpenHands Community
# The OpenHands Community
The OpenHands community is built around the belief that (1) AI and AI agents are going to fundamentally change the way
we build software, and (2) if this is true, we should do everything we can to make sure that the benefits provided by
such powerful technology are accessible to everyone.
OpenHands is a community of engineers, academics, and enthusiasts reimagining software development for an AI-powered world.
If this resonates with you, we'd love to have you join us in our quest!
## Mission
## 🤝 How to Join
Its very clear that AI is changing software development. We want the developer community to drive that change organically, through open source.
Check out our [How to Join the Community section.](https://github.com/All-Hands-AI/OpenHands?tab=readme-ov-file#-how-to-join-the-community)
So were not just building friendly interfaces for AI-driven development. Were publishing _building blocks_ that empower developers to create new experiences, tailored to your own habits, needs, and imagination.
## 💪 Becoming a Contributor
## Ethos
We welcome contributions from everyone! Whether you're a developer, a researcher, or simply enthusiastic about advancing
the field of software engineering with AI, there are many ways to get involved:
We have two core values: **high openness** and **high agency**. While we dont expect everyone in the community to embody these values, we want to establish them as norms.
- **Code Contributions:** Help us develop new core functionality, improve our agents, improve the frontend and other
interfaces, or anything else that would help make OpenHands better.
- **Research and Evaluation:** Contribute to our understanding of LLMs in software engineering, participate in
evaluating the models, or suggest improvements.
- **Feedback and Testing:** Use the OpenHands toolset, report bugs, suggest features, or provide feedback on usability.
### High Openness
For details, please check [CONTRIBUTING.md](./CONTRIBUTING.md).
We welcome anyone and everyone into our community by default. You dont have to be a software developer to help us build. You dont have to be pro-AI to help us learn.
## Code of Conduct
Our plans, our work, our successes, and our failures are all public record. We want the world to see not just the fruits of our work, but the whole process of growing it.
We have a [Code of Conduct](./CODE_OF_CONDUCT.md) that we expect all contributors to adhere to.
Long story short, we are aiming for an open, welcoming, diverse, inclusive, and healthy community.
All contributors are expected to contribute to building this sort of community.
We welcome thoughtful criticism, whether its a comment on a PR or feedback on the community as a whole.
## 🛠️ Becoming a Maintainer
### High Agency
For contributors who have made significant and sustained contributions to the project, there is a possibility of joining
the maintainer team. The process for this is as follows:
Everyone should feel empowered to contribute to OpenHands. Whether its by making a PR, hosting an event, sharing feedback, or just asking a question, dont hold back!
1. Any contributor who has made sustained and high-quality contributions to the codebase can be nominated by any
maintainer. If you feel that you may qualify you can reach out to any of the maintainers that have reviewed your PRs and ask if you can be nominated.
2. Once a maintainer nominates a new maintainer, there will be a discussion period among the maintainers for at least 3 days.
3. If no concerns are raised the nomination will be accepted by acclamation, and if concerns are raised there will be a discussion and possible vote.
OpenHands gives everyone the building blocks to create state-of-the-art developer experiences. We experiment constantly and love building new things.
Note that just making many PRs does not immediately imply that you will become a maintainer. We will be looking
at sustained high-quality contributions over a period of time, as well as good teamwork and adherence to our [Code of Conduct](./CODE_OF_CONDUCT.md).
Coding, development practices, and communities are changing rapidly. We wont hesitate to change direction and make big bets.
## Relationship to All Hands
OpenHands is supported by the for-profit organization [All Hands AI, Inc](https://www.all-hands.dev/).
All Hands was founded by three of the first major contributors to OpenHands:
- Xingyao Wang, a UIUC PhD candidate who got OpenHands to the top of the SWE-bench leaderboards
- Graham Neubig, a CMU Professor who rallied the academic community around OpenHands
- Robert Brennan, a software engineer who architected the user-facing features of OpenHands
All Hands is an important part of the OpenHands ecosystem. Weve raised over $20M--mainly to hire developers and researchers who can work on OpenHands full-time, and to provide them with expensive infrastructure. ([Join us!](https://allhandsai.applytojob.com/apply/))
But we see OpenHands as much larger, and ultimately more important, than All Hands. When our financial responsibility to investors is at odds with our social responsibility to the community—as it inevitably will be, from time to time—we promise to navigate that conflict thoughtfully and transparently.
At some point, we may transfer custody of OpenHands to an open source foundation. But for now, the [Benevolent Dictator approach](http://www.catb.org/~esr/writings/cathedral-bazaar/homesteading/ar01s16.html) helps us move forward with speed and intention. If we ever forget the “benevolent” part, please: fork us.

View File

@ -13,15 +13,15 @@ To understand the codebase, please refer to the README in each module:
## Setting up Your Development Environment
We have a separate doc [Development.md](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md) that tells you how to set up a development workflow.
We have a separate doc [Development.md](https://github.com/OpenHands/OpenHands/blob/main/Development.md) that tells you how to set up a development workflow.
## How Can I Contribute?
There are many ways that you can contribute:
1. **Download and use** OpenHands, and send [issues](https://github.com/All-Hands-AI/OpenHands/issues) when you encounter something that isn't working or a feature that you'd like to see.
1. **Download and use** OpenHands, and send [issues](https://github.com/OpenHands/OpenHands/issues) when you encounter something that isn't working or a feature that you'd like to see.
2. **Send feedback** after each session by [clicking the thumbs-up thumbs-down buttons](https://docs.all-hands.dev/usage/feedback), so we can see where things are working and failing, and also build an open dataset for training code agents.
3. **Improve the Codebase** by sending [PRs](#sending-pull-requests-to-openhands) (see details below). In particular, we have some [good first issues](https://github.com/All-Hands-AI/OpenHands/labels/good%20first%20issue) that may be ones to start on.
3. **Improve the Codebase** by sending [PRs](#sending-pull-requests-to-openhands) (see details below). In particular, we have some [good first issues](https://github.com/OpenHands/OpenHands/labels/good%20first%20issue) that may be ones to start on.
## What Can I Build?
Here are a few ways you can help improve the codebase.
@ -35,7 +35,7 @@ of the application, please open an issue first, or better, join the #eng-ui-ux c
to gather consensus from our design team first.
#### Improving the agent
Our main agent is the CodeAct agent. You can [see its prompts here](https://github.com/All-Hands-AI/OpenHands/tree/main/openhands/agenthub/codeact_agent).
Our main agent is the CodeAct agent. You can [see its prompts here](https://github.com/OpenHands/OpenHands/tree/main/openhands/agenthub/codeact_agent).
Changes to these prompts, and to the underlying behavior in Python, can have a huge impact on user experience.
You can try modifying the prompts to see how they change the behavior of the agent as you use the app
@ -54,11 +54,11 @@ The agent needs a place to run code and commands. When you run OpenHands on your
to do this by default. But there are other ways of creating a sandbox for the agent.
If you work for a company that provides a cloud-based runtime, you could help us add support for that runtime
by implementing the [interface specified here](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/runtime/base.py).
by implementing the [interface specified here](https://github.com/OpenHands/OpenHands/blob/main/openhands/runtime/base.py).
#### Testing
When you write code, it is also good to write tests. Please navigate to the [`./tests`](./tests) folder to see existing test suites.
At the moment, we have two kinds of tests: [`unit`](./tests/unit) and [`integration`](./evaluation/integration_tests). Please refer to the README for each test suite. These tests also run on GitHub's continuous integration to ensure quality of the project.
At the moment, we have these kinds of tests: [`unit`](./tests/unit), [`runtime`](./tests/runtime), and [`end-to-end (e2e)`](./tests/e2e). Please refer to the README for each test suite. These tests also run on GitHub's continuous integration to ensure quality of the project.
## Sending Pull Requests to OpenHands
@ -84,7 +84,7 @@ For example, a PR title could be:
- `refactor: modify package path`
- `feat(frontend): xxxx`, where `(frontend)` means that this PR mainly focuses on the frontend component.
You may also check out previous PRs in the [PR list](https://github.com/All-Hands-AI/OpenHands/pulls).
You may also check out previous PRs in the [PR list](https://github.com/OpenHands/OpenHands/pulls).
### Pull Request description
- If your PR is small (such as a typo fix), you can go brief.
@ -97,7 +97,7 @@ please include a short message that we can add to our changelog.
### Opening Issues
If you notice any bugs or have any feature requests please open them via the [issues page](https://github.com/All-Hands-AI/OpenHands/issues). We will triage based on how critical the bug is or how potentially useful the improvement is, discuss, and implement the ones that the community has interest/effort for.
If you notice any bugs or have any feature requests please open them via the [issues page](https://github.com/OpenHands/OpenHands/issues). We will triage based on how critical the bug is or how potentially useful the improvement is, discuss, and implement the ones that the community has interest/effort for.
Further, if you see an issue you like, please leave a "thumbs-up" or a comment, which will help us prioritize.

View File

@ -2,7 +2,7 @@
## Contributors
We would like to thank all the [contributors](https://github.com/All-Hands-AI/OpenHands/graphs/contributors) who have helped make OpenHands possible. We greatly appreciate your dedication and hard work.
We would like to thank all the [contributors](https://github.com/OpenHands/OpenHands/graphs/contributors) who have helped make OpenHands possible. We greatly appreciate your dedication and hard work.
## Open Source Projects
@ -14,7 +14,7 @@ OpenHands includes and adapts the following open source projects. We are gratefu
#### [Aider](https://github.com/paul-gauthier/aider)
- License: Apache License 2.0
- Description: AI pair programming tool. OpenHands has adapted and integrated its linter module for code-related tasks in [`agentskills utilities`](https://github.com/All-Hands-AI/OpenHands/tree/main/openhands/runtime/plugins/agent_skills/utils/aider)
- Description: AI pair programming tool. OpenHands has adapted and integrated its linter module for code-related tasks in [`agentskills utilities`](https://github.com/OpenHands/OpenHands/tree/main/openhands/runtime/plugins/agent_skills/utils/aider)
#### [BrowserGym](https://github.com/ServiceNow/BrowserGym)
- License: Apache License 2.0

View File

@ -2,7 +2,7 @@
This guide is for people working on OpenHands and editing the source code.
If you wish to contribute your changes, check out the
[CONTRIBUTING.md](https://github.com/All-Hands-AI/OpenHands/blob/main/CONTRIBUTING.md)
[CONTRIBUTING.md](https://github.com/OpenHands/OpenHands/blob/main/CONTRIBUTING.md)
on how to clone and setup the project initially before moving on. Otherwise,
you can clone the OpenHands project directly.
@ -91,14 +91,14 @@ make run
#### Option B: Individual Server Startup
- **Start the Backend Server:** If you prefer, you can start the backend server independently to focus on
backend-related tasks or configurations.
backend-related tasks or configurations.
```bash
make start-backend
```
- **Start the Frontend Server:** Similarly, you can start the frontend server on its own to work on frontend-related
components or interface enhancements.
components or interface enhancements.
```bash
make start-frontend
```
@ -110,6 +110,7 @@ You can use OpenHands to develop and improve OpenHands itself! This is a powerfu
#### Quick Start
1. **Build and run OpenHands:**
```bash
export INSTALL_DOCKER=0
export RUNTIME=local
@ -117,6 +118,7 @@ You can use OpenHands to develop and improve OpenHands itself! This is a powerfu
```
2. **Access the interface:**
- Local development: http://localhost:3001
- Remote/cloud environments: Use the appropriate external URL
@ -159,7 +161,7 @@ poetry run pytest ./tests/unit/test_*.py
To reduce build time (e.g., if no changes were made to the client-runtime component), you can use an existing Docker
container image by setting the SANDBOX_RUNTIME_CONTAINER_IMAGE environment variable to the desired Docker image.
Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/all-hands-ai/runtime:0.59-nikolaik`
Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/openhands/runtime:0.62-nikolaik`
## Develop inside Docker container
@ -193,12 +195,12 @@ Here's a guide to the important documentation files in the repository:
- [/README.md](./README.md): Main project overview, features, and basic setup instructions
- [/Development.md](./Development.md) (this file): Comprehensive guide for developers working on OpenHands
- [/CONTRIBUTING.md](./CONTRIBUTING.md): Guidelines for contributing to the project, including code style and PR process
- [/docs/DOC_STYLE_GUIDE.md](./docs/DOC_STYLE_GUIDE.md): Standards for writing and maintaining project documentation
- [DOC_STYLE_GUIDE.md](https://github.com/All-Hands-AI/docs/blob/main/openhands/DOC_STYLE_GUIDE.md): Standards for writing and maintaining project documentation
- [/openhands/README.md](./openhands/README.md): Details about the backend Python implementation
- [/frontend/README.md](./frontend/README.md): Frontend React application setup and development guide
- [/containers/README.md](./containers/README.md): Information about Docker containers and deployment
- [/tests/unit/README.md](./tests/unit/README.md): Guide to writing and running unit tests
- [/evaluation/README.md](./evaluation/README.md): Documentation for the evaluation framework and benchmarks
- [/microagents/README.md](./microagents/README.md): Information about the microagents architecture and implementation
- [/skills/README.md](./skills/README.md): Information about the skills architecture and implementation
- [/openhands/server/README.md](./openhands/server/README.md): Server implementation details and API documentation
- [/openhands/runtime/README.md](./openhands/runtime/README.md): Documentation for the runtime environment and execution model

202
README.md
View File

@ -1,184 +1,86 @@
<a name="readme-top"></a>
<div align="center">
<img src="https://raw.githubusercontent.com/All-Hands-AI/docs/main/openhands/static/img/logo.png" alt="Logo" width="200">
<h1 align="center">OpenHands: Code Less, Make More</h1>
<img src="https://raw.githubusercontent.com/OpenHands/docs/main/openhands/static/img/logo.png" alt="Logo" width="200">
<h1 align="center" style="border-bottom: none">OpenHands: AI-Driven Development</h1>
</div>
<div align="center">
<a href="https://github.com/All-Hands-AI/OpenHands/graphs/contributors"><img src="https://img.shields.io/github/contributors/All-Hands-AI/OpenHands?style=for-the-badge&color=blue" alt="Contributors"></a>
<a href="https://github.com/All-Hands-AI/OpenHands/stargazers"><img src="https://img.shields.io/github/stars/All-Hands-AI/OpenHands?style=for-the-badge&color=blue" alt="Stargazers"></a>
<a href="https://github.com/All-Hands-AI/OpenHands/blob/main/LICENSE"><img src="https://img.shields.io/github/license/All-Hands-AI/OpenHands?style=for-the-badge&color=blue" alt="MIT License"></a>
<a href="https://github.com/OpenHands/OpenHands/blob/main/LICENSE"><img src="https://img.shields.io/badge/LICENSE-MIT-20B2AA?style=for-the-badge" alt="MIT License"></a>
<a href="https://docs.google.com/spreadsheets/d/1wOUdFCMyY6Nt0AIqF705KN4JKOWgeI4wUGUP60krXXs/edit?gid=811504672#gid=811504672"><img src="https://img.shields.io/badge/SWEBench-72.8-00cc00?logoColor=FFE165&style=for-the-badge" alt="Benchmark Score"></a>
<br/>
<a href="https://all-hands.dev/joinslack"><img src="https://img.shields.io/badge/Slack-Join%20Us-red?logo=slack&logoColor=white&style=for-the-badge" alt="Join our Slack community"></a>
<a href="https://github.com/All-Hands-AI/OpenHands/blob/main/CREDITS.md"><img src="https://img.shields.io/badge/Project-Credits-blue?style=for-the-badge&color=FFE165&logo=github&logoColor=white" alt="Credits"></a>
<br/>
<a href="https://docs.all-hands.dev/usage/getting-started"><img src="https://img.shields.io/badge/Documentation-000?logo=googledocs&logoColor=FFE165&style=for-the-badge" alt="Check out the documentation"></a>
<a href="https://arxiv.org/abs/2407.16741"><img src="https://img.shields.io/badge/Paper%20on%20Arxiv-000?logoColor=FFE165&logo=arxiv&style=for-the-badge" alt="Paper on Arxiv"></a>
<a href="https://docs.google.com/spreadsheets/d/1wOUdFCMyY6Nt0AIqF705KN4JKOWgeI4wUGUP60krXXs/edit?gid=0#gid=0"><img src="https://img.shields.io/badge/Benchmark%20score-000?logoColor=FFE165&logo=huggingface&style=for-the-badge" alt="Evaluation Benchmark Score"></a>
<a href="https://docs.openhands.dev/sdk"><img src="https://img.shields.io/badge/Documentation-000?logo=googledocs&logoColor=FFE165&style=for-the-badge" alt="Check out the documentation"></a>
<a href="https://arxiv.org/abs/2511.03690"><img src="https://img.shields.io/badge/Paper-000?logoColor=FFE165&logo=arxiv&style=for-the-badge" alt="Tech Report"></a>
<!-- Keep these links. Translations will automatically update with the README. -->
<a href="https://www.readme-i18n.com/All-Hands-AI/OpenHands?lang=de">Deutsch</a> |
<a href="https://www.readme-i18n.com/All-Hands-AI/OpenHands?lang=es">Español</a> |
<a href="https://www.readme-i18n.com/All-Hands-AI/OpenHands?lang=fr">français</a> |
<a href="https://www.readme-i18n.com/All-Hands-AI/OpenHands?lang=ja">日本語</a> |
<a href="https://www.readme-i18n.com/All-Hands-AI/OpenHands?lang=ko">한국어</a> |
<a href="https://www.readme-i18n.com/All-Hands-AI/OpenHands?lang=pt">Português</a> |
<a href="https://www.readme-i18n.com/All-Hands-AI/OpenHands?lang=ru">Русский</a> |
<a href="https://www.readme-i18n.com/All-Hands-AI/OpenHands?lang=zh">中文</a>
<a href="https://www.readme-i18n.com/OpenHands/OpenHands?lang=de">Deutsch</a> |
<a href="https://www.readme-i18n.com/OpenHands/OpenHands?lang=es">Español</a> |
<a href="https://www.readme-i18n.com/OpenHands/OpenHands?lang=fr">français</a> |
<a href="https://www.readme-i18n.com/OpenHands/OpenHands?lang=ja">日本語</a> |
<a href="https://www.readme-i18n.com/OpenHands/OpenHands?lang=ko">한국어</a> |
<a href="https://www.readme-i18n.com/OpenHands/OpenHands?lang=pt">Português</a> |
<a href="https://www.readme-i18n.com/OpenHands/OpenHands?lang=ru">Русский</a> |
<a href="https://www.readme-i18n.com/OpenHands/OpenHands?lang=zh">中文</a>
<hr>
</div>
Welcome to OpenHands (formerly OpenDevin), a platform for software development agents powered by AI.
<hr>
OpenHands agents can do anything a human developer can: modify code, run commands, browse the web,
call APIs, and yes—even copy code snippets from StackOverflow.
🙌 Welcome to OpenHands, a [community](COMMUNITY.md) focused on AI-driven development. Wed love for you to [join us on Slack](https://dub.sh/openhands).
Learn more at [docs.all-hands.dev](https://docs.all-hands.dev), or [sign up for OpenHands Cloud](https://app.all-hands.dev) to get started.
There are a few ways to work with OpenHands:
### OpenHands Software Agent SDK
The SDK is a composable Python library that contains all of our agentic tech. It's the engine that powers everything else below.
> [!IMPORTANT]
> **Upcoming change**: We are renaming our GitHub Org from `All-Hands-AI` to `OpenHands` on October 20th, 2025.
> Check the [tracking issue](https://github.com/All-Hands-AI/OpenHands/issues/11376) for more information.
Define agents in code, then run them locally, or scale to 1000s of agents in the cloud.
[Check out the docs](https://docs.openhands.dev/sdk) or [view the source](https://github.com/OpenHands/software-agent-sdk/)
> [!IMPORTANT]
> Using OpenHands for work? We'd love to chat! Fill out
> [this short form](https://docs.google.com/forms/d/e/1FAIpQLSet3VbGaz8z32gW9Wm-Grl4jpt5WgMXPgJ4EDPVmCETCBpJtQ/viewform)
> to join our Design Partner program, where you'll get early access to commercial features and the opportunity to provide input on our product roadmap.
### OpenHands CLI
The CLI is the easiest way to start using OpenHands. The experience will be familiar to anyone who has worked
with e.g. Claude Code or Codex. You can power it with Claude, GPT, or any other LLM.
## ☁️ OpenHands Cloud
The easiest way to get started with OpenHands is on [OpenHands Cloud](https://app.all-hands.dev),
which comes with $20 in free credits for new users.
[Check out the docs](https://docs.openhands.dev/openhands/usage/run-openhands/cli-mode) or [view the source](https://github.com/OpenHands/OpenHands-CLI)
## 💻 Running OpenHands Locally
### OpenHands Local GUI
Use the Local GUI for running agents on your laptop. It comes with a REST API and a single-page React application.
The experience will be familiar to anyone who has used Devin or Jules.
### Option 1: CLI Launcher (Recommended)
[Check out the docs](https://docs.openhands.dev/openhands/usage/run-openhands/local-setup) or view the source in this repo.
The easiest way to run OpenHands locally is using the CLI launcher with [uv](https://docs.astral.sh/uv/). This provides better isolation from your current project's virtual environment and is required for OpenHands' default MCP servers.
### OpenHands Cloud
This is a deployment of OpenHands GUI, running on hosted infrastructure.
**Install uv** (if you haven't already):
You can try it with a free $10 credit by [signing in with your GitHub account](https://app.all-hands.dev).
See the [uv installation guide](https://docs.astral.sh/uv/getting-started/installation/) for the latest installation instructions for your platform.
OpenHands Cloud comes with source-available features and integrations:
- Integrations with Slack, Jira, and Linear
- Multi-user support
- RBAC and permissions
- Collaboration features (e.g., conversation sharing)
**Launch OpenHands**:
```bash
# Launch the GUI server
uvx --python 3.12 --from openhands-ai openhands serve
### OpenHands Enterprise
Large enterprises can work with us to self-host OpenHands Cloud in their own VPC, via Kubernetes.
OpenHands Enterprise can also work with the CLI and SDK above.
# Or launch the CLI
uvx --python 3.12 --from openhands-ai openhands
```
OpenHands Enterprise is source-available--you can see all the source code here in the enterprise/ directory,
but you'll need to purchase a license if you want to run it for more than one month.
You'll find OpenHands running at [http://localhost:3000](http://localhost:3000) (for GUI mode)!
Enterprise contracts also come with extended support and access to our research team.
### Option 2: Docker
Learn more at [openhands.dev/enterprise](https://openhands.dev/enterprise)
<details>
<summary>Click to expand Docker command</summary>
### Everything Else
You can also run OpenHands directly with Docker:
Check out our [Product Roadmap](https://github.com/orgs/openhands/projects/1), and feel free to
[open up an issue](https://github.com/OpenHands/OpenHands/issues) if there's something you'd like to see!
```bash
docker pull docker.all-hands.dev/all-hands-ai/runtime:0.59-nikolaik
You might also be interested in our [evaluation infrastructure](https://github.com/OpenHands/benchmarks), our [chrome extension](https://github.com/OpenHands/openhands-chrome-extension/), or our [Theory-of-Mind module](https://github.com/OpenHands/ToM-SWE).
docker run -it --rm --pull=always \
-e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:0.59-nikolaik \
-e LOG_ALL_EVENTS=true \
-v /var/run/docker.sock:/var/run/docker.sock \
-v ~/.openhands:/.openhands \
-p 3000:3000 \
--add-host host.docker.internal:host-gateway \
--name openhands-app \
docker.all-hands.dev/all-hands-ai/openhands:0.59
```
All our work is available under the MIT license, except for the `enterprise/` directory in this repository (see the [enterprise license](enterprise/LICENSE) for details).
The core `openhands` and `agent-server` Docker images are fully MIT-licensed as well.
</details>
> **Note**: If you used OpenHands before version 0.44, you may want to run `mv ~/.openhands-state ~/.openhands` to migrate your conversation history to the new location.
> [!WARNING]
> On a public network? See our [Hardened Docker Installation Guide](https://docs.all-hands.dev/usage/runtimes/docker#hardened-docker-installation)
> to secure your deployment by restricting network binding and implementing additional security measures.
### Getting Started
When you open the application, you'll be asked to choose an LLM provider and add an API key.
[Anthropic's Claude Sonnet 4.5](https://www.anthropic.com/api) (`anthropic/claude-sonnet-4-5-20250929`)
works best, but you have [many options](https://docs.all-hands.dev/usage/llms).
See the [Running OpenHands](https://docs.all-hands.dev/usage/installation) guide for
system requirements and more information.
## 💡 Other ways to run OpenHands
> [!WARNING]
> OpenHands is meant to be run by a single user on their local workstation.
> It is not appropriate for multi-tenant deployments where multiple users share the same instance. There is no built-in authentication, isolation, or scalability.
>
> If you're interested in running OpenHands in a multi-tenant environment, check out the source-available, commercially-licensed
> [OpenHands Cloud Helm Chart](https://github.com/all-Hands-AI/OpenHands-cloud)
You can [connect OpenHands to your local filesystem](https://docs.all-hands.dev/usage/runtimes/docker#connecting-to-your-filesystem),
interact with it via a [friendly CLI](https://docs.all-hands.dev/usage/how-to/cli-mode),
run OpenHands in a scriptable [headless mode](https://docs.all-hands.dev/usage/how-to/headless-mode),
or run it on tagged issues with [a github action](https://docs.all-hands.dev/usage/how-to/github-action).
Visit [Running OpenHands](https://docs.all-hands.dev/usage/installation) for more information and setup instructions.
If you want to modify the OpenHands source code, check out [Development.md](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md).
Having issues? The [Troubleshooting Guide](https://docs.all-hands.dev/usage/troubleshooting) can help.
## 📖 Documentation
To learn more about the project, and for tips on using OpenHands,
check out our [documentation](https://docs.all-hands.dev/usage/getting-started).
There you'll find resources on how to use different LLM providers,
troubleshooting resources, and advanced configuration options.
## 🤝 How to Join the Community
OpenHands is a community-driven project, and we welcome contributions from everyone. We do most of our communication
through Slack, so this is the best place to start, but we also are happy to have you contact us on Github:
- [Join our Slack workspace](https://all-hands.dev/joinslack) - Here we talk about research, architecture, and future development.
- [Read or post Github Issues](https://github.com/All-Hands-AI/OpenHands/issues) - Check out the issues we're working on, or add your own ideas.
See more about the community in [COMMUNITY.md](./COMMUNITY.md) or find details on contributing in [CONTRIBUTING.md](./CONTRIBUTING.md).
## 📈 Progress
See the monthly OpenHands roadmap [here](https://github.com/orgs/All-Hands-AI/projects/1) (updated at the maintainer's meeting at the end of each month).
<p align="center">
<a href="https://star-history.com/#All-Hands-AI/OpenHands&Date">
<img src="https://api.star-history.com/svg?repos=All-Hands-AI/OpenHands&type=Date" width="500" alt="Star History Chart">
</a>
</p>
## 📜 License
Distributed under the MIT License, with the exception of the `enterprise/` folder. See [`LICENSE`](./LICENSE) for more information.
## 🙏 Acknowledgements
OpenHands is built by a large number of contributors, and every contribution is greatly appreciated! We also build upon other open source projects, and we are deeply thankful for their work.
For a list of open source projects and licenses used in OpenHands, please see our [CREDITS.md](./CREDITS.md) file.
## 📚 Cite
```
@inproceedings{
wang2025openhands,
title={OpenHands: An Open Platform for {AI} Software Developers as Generalist Agents},
author={Xingyao Wang and Boxuan Li and Yufan Song and Frank F. Xu and Xiangru Tang and Mingchen Zhuge and Jiayi Pan and Yueqi Song and Bowen Li and Jaskirat Singh and Hoang H. Tran and Fuqiang Li and Ren Ma and Mingzhang Zheng and Bill Qian and Yanjun Shao and Niklas Muennighoff and Yizhe Zhang and Binyuan Hui and Junyang Lin and Robert Brennan and Hao Peng and Heng Ji and Graham Neubig},
booktitle={The Thirteenth International Conference on Learning Representations},
year={2025},
url={https://openreview.net/forum?id=OJd3ayDDoF}
}
```
If you need help with anything, or just want to chat, [come find us on Slack](https://dub.sh/openhands).

View File

@ -189,7 +189,7 @@ model = "gpt-4o"
# Whether to use native tool calling if supported by the model. Can be true, false, or None by default, which chooses the model's default behavior based on the evaluation.
# ATTENTION: Based on evaluation, enabling native function calling may lead to worse results
# in some scenarios. Use with caution and consider testing with your specific use case.
# https://github.com/All-Hands-AI/OpenHands/pull/4711
# https://github.com/OpenHands/OpenHands/pull/4711
#native_tool_calling = None

View File

@ -73,7 +73,7 @@ ENV VIRTUAL_ENV=/app/.venv \
COPY --chown=openhands:openhands --chmod=770 --from=backend-builder ${VIRTUAL_ENV} ${VIRTUAL_ENV}
COPY --chown=openhands:openhands --chmod=770 ./microagents ./microagents
COPY --chown=openhands:openhands --chmod=770 ./skills ./skills
COPY --chown=openhands:openhands --chmod=770 ./openhands ./openhands
COPY --chown=openhands:openhands --chmod=777 ./openhands/runtime/plugins ./openhands/runtime/plugins
COPY --chown=openhands:openhands pyproject.toml poetry.lock README.md MANIFEST.in LICENSE ./

View File

@ -1,4 +1,4 @@
DOCKER_REGISTRY=ghcr.io
DOCKER_ORG=all-hands-ai
DOCKER_ORG=openhands
DOCKER_IMAGE=openhands
DOCKER_BASE_DIR="."

View File

@ -104,6 +104,9 @@ RUN apt-get update && apt-get install -y \
&& apt-get clean \
&& apt-get autoremove -y
# mark /app as safe git directory to avoid pre-commit errors
RUN git config --system --add safe.directory /app
WORKDIR /app
# cache build dependencies

View File

@ -1,7 +1,7 @@
# Develop in Docker
> [!WARNING]
> This is not officially supported and may not work.
> This way of running OpenHands is not officially supported. It is maintained by the community and may not work.
Install [Docker](https://docs.docker.com/engine/install/) on your host machine and run:

View File

@ -12,7 +12,7 @@ services:
- SANDBOX_API_HOSTNAME=host.docker.internal
- DOCKER_HOST_ADDR=host.docker.internal
#
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/all-hands-ai/runtime:0.59-nikolaik}
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/openhands/runtime:0.62-nikolaik}
- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234}
- WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace}
ports:

View File

@ -1,10 +1,7 @@
DOCKER_REGISTRY=ghcr.io
DOCKER_ORG=all-hands-ai
DOCKER_ORG=openhands
DOCKER_BASE_DIR="./containers/runtime"
DOCKER_IMAGE=runtime
# These variables will be appended by the runtime_build.py script
# DOCKER_IMAGE_TAG=
# DOCKER_IMAGE_SOURCE_TAG=
DOCKER_IMAGE_TAG=oh_v0.59.0_image_nikolaik_s_python-nodejs_tag_python3.12-nodejs22
DOCKER_IMAGE_SOURCE_TAG=oh_v0.59.0_cwpsf0pego28lacp_p73ruf86qxiulkou

View File

@ -3,9 +3,9 @@ repos:
rev: v5.0.0
hooks:
- id: trailing-whitespace
exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/|openhands-cli/)
exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/)
- id: end-of-file-fixer
exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/|openhands-cli/)
exclude: ^(docs/|modules/|python/|openhands-ui/|third_party/|enterprise/)
- id: check-yaml
args: ["--allow-multiple-documents"]
- id: debug-statements
@ -28,12 +28,12 @@ repos:
entry: ruff check --config dev_config/python/ruff.toml
types_or: [python, pyi, jupyter]
args: [--fix, --unsafe-fixes]
exclude: ^(third_party/|enterprise/|openhands-cli/)
exclude: ^(third_party/|enterprise/)
# Run the formatter.
- id: ruff-format
entry: ruff format --config dev_config/python/ruff.toml
types_or: [python, pyi, jupyter]
exclude: ^(third_party/|enterprise/|openhands-cli/)
exclude: ^(third_party/|enterprise/)
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.15.0

View File

@ -7,7 +7,7 @@ services:
image: openhands:latest
container_name: openhands-app-${DATE:-}
environment:
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/all-hands-ai/runtime:0.59-nikolaik}
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.openhands.dev/openhands/runtime:0.62-nikolaik}
#- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} # enable this only if you want a specific non-root sandbox user but you will have to manually adjust permissions of ~/.openhands for this user
- WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace}
ports:

View File

@ -1,5 +1,5 @@
ARG OPENHANDS_VERSION=latest
ARG BASE="ghcr.io/all-hands-ai/openhands"
ARG BASE="ghcr.io/openhands/openhands"
FROM ${BASE}:${OPENHANDS_VERSION}
# Datadog labels

View File

@ -8,7 +8,7 @@
This directory contains the enterprise server used by [OpenHands Cloud](https://github.com/All-Hands-AI/OpenHands-Cloud/). The official, public version of OpenHands Cloud is available at
[app.all-hands.dev](https://app.all-hands.dev).
You may also want to check out the MIT-licensed [OpenHands](https://github.com/All-Hands-AI/OpenHands)
You may also want to check out the MIT-licensed [OpenHands](https://github.com/OpenHands/OpenHands)
## Extension of OpenHands (OSS)
@ -16,7 +16,7 @@ The code in `/enterprise` directory builds on top of open source (OSS) code, ext
- Enterprise stacks on top of OSS. For example, the middleware in enterprise is stacked right on top of the middlewares in OSS. In `SAAS`, the middleware from BOTH repos will be present and running (which can sometimes cause conflicts)
- Enterprise overrides the implementation in OSS (only one is present at a time). For example, the server config SaasServerConfig which overrides [`ServerConfig`](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/server/config/server_config.py#L8) on OSS. This is done through dynamic imports ([see here](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/server/config/server_config.py#L37-#L45))
- Enterprise overrides the implementation in OSS (only one is present at a time). For example, the server config SaasServerConfig which overrides [`ServerConfig`](https://github.com/OpenHands/OpenHands/blob/main/openhands/server/config/server_config.py#L8) on OSS. This is done through dynamic imports ([see here](https://github.com/OpenHands/OpenHands/blob/main/openhands/server/config/server_config.py#L37-#L45))
Key areas that change on `SAAS` are

View File

@ -0,0 +1,856 @@
# OpenHands Enterprise Usage Telemetry Service
## Table of Contents
1. [Introduction](#1-introduction)
- 1.1 [Problem Statement](#11-problem-statement)
- 1.2 [Proposed Solution](#12-proposed-solution)
2. [User Interface](#2-user-interface)
- 2.1 [License Warning Banner](#21-license-warning-banner)
- 2.2 [Administrator Experience](#22-administrator-experience)
3. [Other Context](#3-other-context)
- 3.1 [Replicated Platform Integration](#31-replicated-platform-integration)
- 3.2 [Administrator Email Detection Strategy](#32-administrator-email-detection-strategy)
- 3.3 [Metrics Collection Framework](#33-metrics-collection-framework)
4. [Technical Design](#4-technical-design)
- 4.1 [Database Schema](#41-database-schema)
- 4.1.1 [Telemetry Metrics Table](#411-telemetry-metrics-table)
- 4.1.2 [Telemetry Identity Table](#412-telemetry-identity-table)
- 4.2 [Metrics Collection Framework](#42-metrics-collection-framework)
- 4.2.1 [Base Collector Interface](#421-base-collector-interface)
- 4.2.2 [Collector Registry](#422-collector-registry)
- 4.2.3 [Example Collector Implementation](#423-example-collector-implementation)
- 4.3 [Collection and Upload System](#43-collection-and-upload-system)
- 4.3.1 [Metrics Collection Processor](#431-metrics-collection-processor)
- 4.3.2 [Replicated Upload Processor](#432-replicated-upload-processor)
- 4.4 [License Warning System](#44-license-warning-system)
- 4.4.1 [License Status Endpoint](#441-license-status-endpoint)
- 4.4.2 [UI Integration](#442-ui-integration)
- 4.5 [Cronjob Configuration](#45-cronjob-configuration)
- 4.5.1 [Collection Cronjob](#451-collection-cronjob)
- 4.5.2 [Upload Cronjob](#452-upload-cronjob)
5. [Implementation Plan](#5-implementation-plan)
- 5.1 [Database Schema and Models (M1)](#51-database-schema-and-models-m1)
- 5.1.1 [OpenHands - Database Migration](#511-openhands---database-migration)
- 5.1.2 [OpenHands - Model Tests](#512-openhands---model-tests)
- 5.2 [Metrics Collection Framework (M2)](#52-metrics-collection-framework-m2)
- 5.2.1 [OpenHands - Core Collection Framework](#521-openhands---core-collection-framework)
- 5.2.2 [OpenHands - Example Collectors](#522-openhands---example-collectors)
- 5.2.3 [OpenHands - Framework Tests](#523-openhands---framework-tests)
- 5.3 [Collection and Upload Processors (M3)](#53-collection-and-upload-processors-m3)
- 5.3.1 [OpenHands - Collection Processor](#531-openhands---collection-processor)
- 5.3.2 [OpenHands - Upload Processor](#532-openhands---upload-processor)
- 5.3.3 [OpenHands - Integration Tests](#533-openhands---integration-tests)
- 5.4 [License Warning API (M4)](#54-license-warning-api-m4)
- 5.4.1 [OpenHands - License Status API](#541-openhands---license-status-api)
- 5.4.2 [OpenHands - API Integration](#542-openhands---api-integration)
- 5.5 [UI Warning Banner (M5)](#55-ui-warning-banner-m5)
- 5.5.1 [OpenHands - UI Warning Banner](#551-openhands---ui-warning-banner)
- 5.5.2 [OpenHands - UI Integration](#552-openhands---ui-integration)
- 5.6 [Helm Chart Deployment Configuration (M6)](#56-helm-chart-deployment-configuration-m6)
- 5.6.1 [OpenHands-Cloud - Cronjob Manifests](#561-openhands-cloud---cronjob-manifests)
- 5.6.2 [OpenHands-Cloud - Configuration Management](#562-openhands-cloud---configuration-management)
- 5.7 [Documentation and Enhanced Collectors (M7)](#57-documentation-and-enhanced-collectors-m7)
- 5.7.1 [OpenHands - Advanced Collectors](#571-openhands---advanced-collectors)
- 5.7.2 [OpenHands - Monitoring and Testing](#572-openhands---monitoring-and-testing)
- 5.7.3 [OpenHands - Technical Documentation](#573-openhands---technical-documentation)
## 1. Introduction
### 1.1 Problem Statement
OpenHands Enterprise (OHE) helm charts are publicly available but not open source, creating a visibility gap for the sales team. Unknown users can install and use OHE without the vendor's knowledge, preventing proper customer engagement and sales pipeline management. Without usage telemetry, the vendor cannot identify potential customers, track installation health, or proactively support users who may need assistance.
### 1.2 Proposed Solution
We propose implementing a comprehensive telemetry service that leverages the Replicated metrics platform and Python SDK to track OHE installations and usage. The solution provides automatic customer discovery, instance monitoring, and usage metrics collection while maintaining a clear license compliance pathway.
The system consists of three main components: (1) a pluggable metrics collection framework that allows developers to easily define and register custom metrics collectors, (2) automated cronjobs that periodically collect metrics and upload them to Replicated's vendor portal, and (3) a license compliance warning system that displays UI notifications when telemetry uploads fail, indicating potential license expiration.
The design ensures that telemetry cannot be easily disabled without breaking core OHE functionality by tying the warning system to environment variables that are essential for OHE operation. This approach balances user transparency with business requirements for customer visibility.
## 2. User Interface
### 2.1 License Warning Banner
When telemetry uploads fail for more than 4 days, users will see a prominent warning banner in the OpenHands Enterprise UI:
```
⚠️ Your OpenHands Enterprise license will expire in 30 days. Please contact support if this issue persists.
```
The banner appears at the top of all pages and cannot be permanently dismissed while the condition persists. Users can temporarily dismiss it, but it will reappear on page refresh until telemetry uploads resume successfully.
### 2.2 Administrator Experience
System administrators will not need to configure the telemetry system manually. The service automatically:
1. **Detects OHE installations** using existing required environment variables (`GITHUB_APP_CLIENT_ID`, `KEYCLOAK_SERVER_URL`, etc.)
2. **Generates unique customer identifiers** using administrator contact information:
- Customer email: Determined by the following priority order:
1. `OPENHANDS_ADMIN_EMAIL` environment variable (if set in helm values)
2. Email of the first user who accepted Terms of Service (earliest `accepted_tos` timestamp)
- Instance ID: Automatically generated by Replicated SDK using machine fingerprinting (IOPlatformUUID on macOS, D-Bus machine ID on Linux, Machine GUID on Windows)
- **No Fallback**: If neither email source is available, telemetry collection is skipped until at least one user exists
3. **Collects and uploads metrics transparently** in the background via weekly collection and daily upload cronjobs
4. **Displays warnings only when necessary** for license compliance - no notifications appear during normal operation
## 3. Other Context
### 3.1 Replicated Platform Integration
The Replicated platform provides vendor-hosted infrastructure for collecting customer and instance telemetry. The Python SDK handles authentication, state management, and reliable metric delivery. Key concepts:
- **Customer**: Represents a unique OHE installation, identified by email or installation fingerprint
- **Instance**: Represents a specific deployment of OHE for a customer
- **Metrics**: Custom key-value data points collected from the installation
- **Status**: Instance health indicators (running, degraded, updating, etc.)
The SDK automatically handles machine fingerprinting, local state caching, and retry logic for failed uploads.
### 3.2 Administrator Email Detection Strategy
To identify the appropriate administrator contact for sales outreach, the system uses a three-tier approach that avoids performance penalties on user authentication:
**Tier 1: Explicit Configuration** - The `OPENHANDS_ADMIN_EMAIL` environment variable allows administrators to explicitly specify the contact email during deployment.
**Tier 2: First Active User Detection** - If no explicit email is configured, the system identifies the first user who accepted Terms of Service (earliest `accepted_tos` timestamp with a valid email). This represents the first person to actively engage with the system and is very likely the administrator or installer.
**No Fallback Needed** - If neither email source is available, telemetry collection is skipped entirely. This ensures we only report meaningful usage data when there are actual active users.
**Performance Optimization**: The admin email determination is performed only during telemetry upload attempts, ensuring zero performance impact on user login flows.
### 3.3 Metrics Collection Framework
The proposed collector framework allows developers to define metrics in a single file change:
```python
@register_collector("user_activity")
class UserActivityCollector(MetricsCollector):
def collect(self) -> Dict[str, Any]:
# Query database and return metrics
return {"active_users_7d": count, "conversations_created": total}
```
Collectors are automatically discovered and executed by the collection cronjob, making the system extensible without modifying core collection logic.
## 4. Technical Design
### 4.1 Database Schema
#### 4.1.1 Telemetry Metrics Table
Stores collected metrics with transmission status tracking:
```sql
CREATE TABLE telemetry_metrics (
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
collected_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
metrics_data JSONB NOT NULL,
uploaded_at TIMESTAMP WITH TIME ZONE NULL,
upload_attempts INTEGER DEFAULT 0,
last_upload_error TEXT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
);
CREATE INDEX idx_telemetry_metrics_collected_at ON telemetry_metrics(collected_at);
CREATE INDEX idx_telemetry_metrics_uploaded_at ON telemetry_metrics(uploaded_at);
```
#### 4.1.2 Telemetry Identity Table
Stores persistent identity information that must survive container restarts:
```sql
CREATE TABLE telemetry_identity (
id INTEGER PRIMARY KEY DEFAULT 1,
customer_id VARCHAR(255) NULL,
instance_id VARCHAR(255) NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT single_identity_row CHECK (id = 1)
);
```
**Design Rationale:**
- **Separation of Concerns**: Identity data (customer_id, instance_id) is separated from operational data
- **Persistent vs Computed**: Only data that cannot be reliably recomputed is persisted
- **Upload Tracking**: Upload timestamps are tied directly to the metrics they represent
- **Simplified Queries**: System state can be derived from metrics table (e.g., `MAX(uploaded_at)` for last successful upload)
### 4.2 Metrics Collection Framework
#### 4.2.1 Base Collector Interface
```python
from abc import ABC, abstractmethod
from typing import Dict, Any, List
from dataclasses import dataclass
@dataclass
class MetricResult:
key: str
value: Any
class MetricsCollector(ABC):
"""Base class for metrics collectors."""
@abstractmethod
def collect(self) -> List[MetricResult]:
"""Collect metrics and return results."""
pass
@property
@abstractmethod
def collector_name(self) -> str:
"""Unique name for this collector."""
pass
def should_collect(self) -> bool:
"""Override to add collection conditions."""
return True
```
#### 4.2.2 Collector Registry
```python
from typing import Dict, Type, List
import importlib
import pkgutil
class CollectorRegistry:
"""Registry for metrics collectors."""
def __init__(self):
self._collectors: Dict[str, Type[MetricsCollector]] = {}
def register(self, collector_class: Type[MetricsCollector]) -> None:
"""Register a collector class."""
collector = collector_class()
self._collectors[collector.collector_name] = collector_class
def get_all_collectors(self) -> List[MetricsCollector]:
"""Get instances of all registered collectors."""
return [cls() for cls in self._collectors.values()]
def discover_collectors(self, package_path: str) -> None:
"""Auto-discover collectors in a package."""
# Implementation to scan for @register_collector decorators
pass
# Global registry instance
collector_registry = CollectorRegistry()
def register_collector(name: str):
"""Decorator to register a collector."""
def decorator(cls: Type[MetricsCollector]) -> Type[MetricsCollector]:
collector_registry.register(cls)
return cls
return decorator
```
#### 4.2.3 Example Collector Implementation
```python
@register_collector("system_metrics")
class SystemMetricsCollector(MetricsCollector):
"""Collects basic system and usage metrics."""
@property
def collector_name(self) -> str:
return "system_metrics"
def collect(self) -> List[MetricResult]:
results = []
# Collect user count
with session_maker() as session:
user_count = session.query(UserSettings).count()
results.append(MetricResult(
key="total_users",
value=user_count
))
# Collect conversation count (last 30 days)
thirty_days_ago = datetime.now(timezone.utc) - timedelta(days=30)
conversation_count = session.query(StoredConversationMetadata)\
.filter(StoredConversationMetadata.created_at >= thirty_days_ago)\
.count()
results.append(MetricResult(
key="conversations_30d",
value=conversation_count
))
return results
```
### 4.3 Collection and Upload System
#### 4.3.1 Metrics Collection Processor
```python
class TelemetryCollectionProcessor(MaintenanceTaskProcessor):
"""Maintenance task processor for collecting metrics."""
collection_interval_days: int = 7
async def __call__(self, task: MaintenanceTask) -> dict:
"""Collect metrics from all registered collectors."""
# Check if collection is needed
if not self._should_collect():
return {"status": "skipped", "reason": "too_recent"}
# Collect metrics from all registered collectors
all_metrics = {}
collector_results = {}
for collector in collector_registry.get_all_collectors():
try:
if collector.should_collect():
results = collector.collect()
for result in results:
all_metrics[result.key] = result.value
collector_results[collector.collector_name] = len(results)
except Exception as e:
logger.error(f"Collector {collector.collector_name} failed: {e}")
collector_results[collector.collector_name] = f"error: {e}"
# Store metrics in database
with session_maker() as session:
telemetry_record = TelemetryMetrics(
metrics_data=all_metrics,
collected_at=datetime.now(timezone.utc)
)
session.add(telemetry_record)
session.commit()
# Note: No need to track last_collection_at separately
# Can be derived from MAX(collected_at) in telemetry_metrics
return {
"status": "completed",
"metrics_collected": len(all_metrics),
"collectors_run": collector_results
}
def _should_collect(self) -> bool:
"""Check if collection is needed based on interval."""
with session_maker() as session:
# Get last collection time from metrics table
last_collected = session.query(func.max(TelemetryMetrics.collected_at)).scalar()
if not last_collected:
return True
time_since_last = datetime.now(timezone.utc) - last_collected
return time_since_last.days >= self.collection_interval_days
```
#### 4.3.2 Replicated Upload Processor
```python
from replicated import AsyncReplicatedClient, InstanceStatus
class TelemetryUploadProcessor(MaintenanceTaskProcessor):
"""Maintenance task processor for uploading metrics to Replicated."""
replicated_publishable_key: str
replicated_app_slug: str
async def __call__(self, task: MaintenanceTask) -> dict:
"""Upload pending metrics to Replicated."""
# Get pending metrics
with session_maker() as session:
pending_metrics = session.query(TelemetryMetrics)\
.filter(TelemetryMetrics.uploaded_at.is_(None))\
.order_by(TelemetryMetrics.collected_at)\
.all()
if not pending_metrics:
return {"status": "no_pending_metrics"}
# Get admin email - skip if not available
admin_email = self._get_admin_email()
if not admin_email:
logger.info("Skipping telemetry upload - no admin email available")
return {
"status": "skipped",
"reason": "no_admin_email",
"total_processed": 0
}
uploaded_count = 0
failed_count = 0
async with AsyncReplicatedClient(
publishable_key=self.replicated_publishable_key,
app_slug=self.replicated_app_slug
) as client:
# Get or create customer and instance
customer = await client.customer.get_or_create(
email_address=admin_email
)
instance = await customer.get_or_create_instance()
# Store customer/instance IDs for future use
await self._update_telemetry_identity(customer.customer_id, instance.instance_id)
# Upload each metric batch
for metric_record in pending_metrics:
try:
# Send individual metrics
for key, value in metric_record.metrics_data.items():
await instance.send_metric(key, value)
# Update instance status
await instance.set_status(InstanceStatus.RUNNING)
# Mark as uploaded
with session_maker() as session:
record = session.query(TelemetryMetrics)\
.filter(TelemetryMetrics.id == metric_record.id)\
.first()
if record:
record.uploaded_at = datetime.now(timezone.utc)
session.commit()
uploaded_count += 1
except Exception as e:
logger.error(f"Failed to upload metrics {metric_record.id}: {e}")
# Update error info
with session_maker() as session:
record = session.query(TelemetryMetrics)\
.filter(TelemetryMetrics.id == metric_record.id)\
.first()
if record:
record.upload_attempts += 1
record.last_upload_error = str(e)
session.commit()
failed_count += 1
# Note: No need to track last_successful_upload_at separately
# Can be derived from MAX(uploaded_at) in telemetry_metrics
return {
"status": "completed",
"uploaded": uploaded_count,
"failed": failed_count,
"total_processed": len(pending_metrics)
}
def _get_admin_email(self) -> str | None:
"""Get administrator email for customer identification."""
# 1. Check environment variable first
env_admin_email = os.getenv('OPENHANDS_ADMIN_EMAIL')
if env_admin_email:
logger.info("Using admin email from environment variable")
return env_admin_email
# 2. Use first active user's email (earliest accepted_tos)
with session_maker() as session:
first_user = session.query(UserSettings)\
.filter(UserSettings.email.isnot(None))\
.filter(UserSettings.accepted_tos.isnot(None))\
.order_by(UserSettings.accepted_tos.asc())\
.first()
if first_user and first_user.email:
logger.info(f"Using first active user email: {first_user.email}")
return first_user.email
# No admin email available - skip telemetry
logger.info("No admin email available - skipping telemetry collection")
return None
async def _update_telemetry_identity(self, customer_id: str, instance_id: str) -> None:
"""Update or create telemetry identity record."""
with session_maker() as session:
identity = session.query(TelemetryIdentity).first()
if not identity:
identity = TelemetryIdentity()
session.add(identity)
identity.customer_id = customer_id
identity.instance_id = instance_id
session.commit()
```
### 4.4 License Warning System
#### 4.4.1 License Status Endpoint
```python
from fastapi import APIRouter
from datetime import datetime, timezone, timedelta
license_router = APIRouter()
@license_router.get("/license-status")
async def get_license_status():
"""Get license warning status for UI display."""
# Only show warnings for OHE installations
if not _is_openhands_enterprise():
return {"warn": False, "message": ""}
with session_maker() as session:
# Get last successful upload time from metrics table
last_upload = session.query(func.max(TelemetryMetrics.uploaded_at))\
.filter(TelemetryMetrics.uploaded_at.isnot(None))\
.scalar()
if not last_upload:
# No successful uploads yet - show warning after 4 days
return {
"warn": True,
"message": "OpenHands Enterprise license verification pending. Please ensure network connectivity."
}
# Check if last successful upload was more than 4 days ago
days_since_upload = (datetime.now(timezone.utc) - last_upload).days
if days_since_upload > 4:
# Find oldest unsent batch
oldest_unsent = session.query(TelemetryMetrics)\
.filter(TelemetryMetrics.uploaded_at.is_(None))\
.order_by(TelemetryMetrics.collected_at)\
.first()
if oldest_unsent:
# Calculate expiration date (oldest unsent + 34 days)
expiration_date = oldest_unsent.collected_at + timedelta(days=34)
days_until_expiration = (expiration_date - datetime.now(timezone.utc)).days
if days_until_expiration <= 0:
message = "Your OpenHands Enterprise license has expired. Please contact support immediately."
else:
message = f"Your OpenHands Enterprise license will expire in {days_until_expiration} days. Please contact support if this issue persists."
return {"warn": True, "message": message}
return {"warn": False, "message": ""}
def _is_openhands_enterprise() -> bool:
"""Detect if this is an OHE installation."""
# Check for required OHE environment variables
required_vars = [
'GITHUB_APP_CLIENT_ID',
'KEYCLOAK_SERVER_URL',
'KEYCLOAK_REALM_NAME'
]
return all(os.getenv(var) for var in required_vars)
```
#### 4.4.2 UI Integration
The frontend will poll the license status endpoint and display warnings using the existing banner component pattern:
```typescript
// New component: LicenseWarningBanner.tsx
interface LicenseStatus {
warn: boolean;
message: string;
}
export function LicenseWarningBanner() {
const [licenseStatus, setLicenseStatus] = useState<LicenseStatus>({ warn: false, message: "" });
useEffect(() => {
const checkLicenseStatus = async () => {
try {
const response = await fetch('/api/license-status');
const status = await response.json();
setLicenseStatus(status);
} catch (error) {
console.error('Failed to check license status:', error);
}
};
// Check immediately and then every hour
checkLicenseStatus();
const interval = setInterval(checkLicenseStatus, 60 * 60 * 1000);
return () => clearInterval(interval);
}, []);
if (!licenseStatus.warn) {
return null;
}
return (
<div className="bg-red-600 text-white p-4 rounded flex items-center justify-between">
<div className="flex items-center">
<FaExclamationTriangle className="mr-3" />
<span>{licenseStatus.message}</span>
</div>
</div>
);
}
```
### 4.5 Cronjob Configuration
The cronjob configurations will be deployed via the OpenHands-Cloud helm charts.
#### 4.5.1 Collection Cronjob
The collection cronjob runs weekly to gather metrics:
```yaml
# charts/openhands/templates/telemetry-collection-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ include "openhands.fullname" . }}-telemetry-collection
labels:
{{- include "openhands.labels" . | nindent 4 }}
spec:
schedule: "0 2 * * 0" # Weekly on Sunday at 2 AM
jobTemplate:
spec:
template:
spec:
containers:
- name: telemetry-collector
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
env:
{{- include "openhands.env" . | nindent 12 }}
command:
- python
- -c
- |
from enterprise.storage.maintenance_task import MaintenanceTask, MaintenanceTaskStatus
from enterprise.storage.database import session_maker
from enterprise.server.telemetry.collection_processor import TelemetryCollectionProcessor
# Create collection task
processor = TelemetryCollectionProcessor()
task = MaintenanceTask()
task.set_processor(processor)
task.status = MaintenanceTaskStatus.PENDING
with session_maker() as session:
session.add(task)
session.commit()
restartPolicy: OnFailure
```
#### 4.5.2 Upload Cronjob
The upload cronjob runs daily to send metrics to Replicated:
```yaml
# charts/openhands/templates/telemetry-upload-cronjob.yaml
apiVersion: batch/v1
kind: CronJob
metadata:
name: {{ include "openhands.fullname" . }}-telemetry-upload
labels:
{{- include "openhands.labels" . | nindent 4 }}
spec:
schedule: "0 3 * * *" # Daily at 3 AM
jobTemplate:
spec:
template:
spec:
containers:
- name: telemetry-uploader
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
env:
{{- include "openhands.env" . | nindent 12 }}
- name: REPLICATED_PUBLISHABLE_KEY
valueFrom:
secretKeyRef:
name: {{ include "openhands.fullname" . }}-replicated-config
key: publishable-key
- name: REPLICATED_APP_SLUG
value: {{ .Values.telemetry.replicatedAppSlug | default "openhands-enterprise" | quote }}
command:
- python
- -c
- |
from enterprise.storage.maintenance_task import MaintenanceTask, MaintenanceTaskStatus
from enterprise.storage.database import session_maker
from enterprise.server.telemetry.upload_processor import TelemetryUploadProcessor
import os
# Create upload task
processor = TelemetryUploadProcessor(
replicated_publishable_key=os.getenv('REPLICATED_PUBLISHABLE_KEY'),
replicated_app_slug=os.getenv('REPLICATED_APP_SLUG', 'openhands-enterprise')
)
task = MaintenanceTask()
task.set_processor(processor)
task.status = MaintenanceTaskStatus.PENDING
with session_maker() as session:
session.add(task)
session.commit()
restartPolicy: OnFailure
```
## 5. Implementation Plan
All implementation must pass existing lints and tests. New functionality requires comprehensive unit tests with >90% coverage. Integration tests should verify end-to-end telemetry flow including collection, storage, upload, and warning display.
### 5.1 Database Schema and Models (M1)
**Repository**: OpenHands
Establish the foundational database schema and SQLAlchemy models for telemetry data storage.
#### 5.1.1 OpenHands - Database Migration
- [ ] `enterprise/migrations/versions/077_create_telemetry_tables.py`
- [ ] `enterprise/storage/telemetry_metrics.py`
- [ ] `enterprise/storage/telemetry_config.py`
#### 5.1.2 OpenHands - Model Tests
- [ ] `enterprise/tests/unit/storage/test_telemetry_metrics.py`
- [ ] `enterprise/tests/unit/storage/test_telemetry_config.py`
**Demo**: Database tables created and models can store/retrieve telemetry data.
### 5.2 Metrics Collection Framework (M2)
**Repository**: OpenHands
Implement the pluggable metrics collection system with registry and base classes.
#### 5.2.1 OpenHands - Core Collection Framework
- [ ] `enterprise/server/telemetry/__init__.py`
- [ ] `enterprise/server/telemetry/collector_base.py`
- [ ] `enterprise/server/telemetry/collector_registry.py`
- [ ] `enterprise/server/telemetry/decorators.py`
#### 5.2.2 OpenHands - Example Collectors
- [ ] `enterprise/server/telemetry/collectors/__init__.py`
- [ ] `enterprise/server/telemetry/collectors/system_metrics.py`
- [ ] `enterprise/server/telemetry/collectors/user_activity.py`
#### 5.2.3 OpenHands - Framework Tests
- [ ] `enterprise/tests/unit/telemetry/test_collector_base.py`
- [ ] `enterprise/tests/unit/telemetry/test_collector_registry.py`
- [ ] `enterprise/tests/unit/telemetry/test_system_metrics.py`
**Demo**: Developers can create new collectors with a single file change using the @register_collector decorator.
### 5.3 Collection and Upload Processors (M3)
**Repository**: OpenHands
Implement maintenance task processors for collecting metrics and uploading to Replicated.
#### 5.3.1 OpenHands - Collection Processor
- [ ] `enterprise/server/telemetry/collection_processor.py`
- [ ] `enterprise/tests/unit/telemetry/test_collection_processor.py`
#### 5.3.2 OpenHands - Upload Processor
- [ ] `enterprise/server/telemetry/upload_processor.py`
- [ ] `enterprise/tests/unit/telemetry/test_upload_processor.py`
#### 5.3.3 OpenHands - Integration Tests
- [ ] `enterprise/tests/integration/test_telemetry_flow.py`
**Demo**: Metrics are automatically collected weekly and uploaded daily to Replicated vendor portal.
### 5.4 License Warning API (M4)
**Repository**: OpenHands
Implement the license status endpoint for the warning system.
#### 5.4.1 OpenHands - License Status API
- [ ] `enterprise/server/routes/license.py`
- [ ] `enterprise/tests/unit/routes/test_license.py`
#### 5.4.2 OpenHands - API Integration
- [ ] Update `enterprise/saas_server.py` to include license router
**Demo**: License status API returns warning status based on telemetry upload success.
### 5.5 UI Warning Banner (M5)
**Repository**: OpenHands
Implement the frontend warning banner component and integration.
#### 5.5.1 OpenHands - UI Warning Banner
- [ ] `frontend/src/components/features/license/license-warning-banner.tsx`
- [ ] `frontend/src/components/features/license/license-warning-banner.test.tsx`
#### 5.5.2 OpenHands - UI Integration
- [ ] Update main UI layout to include license warning banner
- [ ] Add license status polling service
**Demo**: License warnings appear in UI when telemetry uploads fail for >4 days, with accurate expiration countdown.
### 5.6 Helm Chart Deployment Configuration (M6)
**Repository**: OpenHands-Cloud
Create Kubernetes cronjob configurations and deployment scripts.
#### 5.6.1 OpenHands-Cloud - Cronjob Manifests
- [ ] `charts/openhands/templates/telemetry-collection-cronjob.yaml`
- [ ] `charts/openhands/templates/telemetry-upload-cronjob.yaml`
#### 5.6.2 OpenHands-Cloud - Configuration Management
- [ ] `charts/openhands/templates/replicated-secret.yaml`
- [ ] Update `charts/openhands/values.yaml` with telemetry configuration options:
```yaml
# Add to values.yaml
telemetry:
enabled: true
replicatedAppSlug: "openhands-enterprise"
adminEmail: "" # Optional: admin email for customer identification
# Add to deployment environment variables
env:
OPENHANDS_ADMIN_EMAIL: "{{ .Values.telemetry.adminEmail }}"
```
**Demo**: Complete telemetry system deployed via helm chart with configurable collection intervals and Replicated integration.
### 5.7 Documentation and Enhanced Collectors (M7)
**Repository**: OpenHands
Add comprehensive metrics collectors, monitoring capabilities, and documentation.
#### 5.7.1 OpenHands - Advanced Collectors
- [ ] `enterprise/server/telemetry/collectors/conversation_metrics.py`
- [ ] `enterprise/server/telemetry/collectors/integration_usage.py`
- [ ] `enterprise/server/telemetry/collectors/performance_metrics.py`
#### 5.7.2 OpenHands - Monitoring and Testing
- [ ] `enterprise/server/telemetry/monitoring.py`
- [ ] `enterprise/tests/e2e/test_telemetry_system.py`
- [ ] Performance tests for large-scale metric collection
#### 5.7.3 OpenHands - Technical Documentation
- [ ] `enterprise/server/telemetry/README.md`
- [ ] Update deployment documentation with telemetry configuration instructions
- [ ] Add troubleshooting guide for telemetry issues
**Demo**: Rich telemetry data flowing to vendor portal with comprehensive monitoring, alerting for system health, and complete documentation.

View File

@ -64,7 +64,7 @@ python enterprise_local/convert_to_env.py
You'll also need to set up the runtime image, so that the dev server doesn't try to rebuild it.
```
export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/all-hands-ai/runtime:main-nikolaik
export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/openhands/runtime:main-nikolaik
docker pull $SANDBOX_RUNTIME_CONTAINER_IMAGE
```
@ -203,7 +203,7 @@ And then invoking `printenv`. NOTE: _DO NOT DO THIS WITH PROD!!!_ (Hopefully by
"REDIS_HOST": "localhost:6379",
"OPENHANDS": "<YOUR LOCAL OSS OPENHANDS DIR>",
"FRONTEND_DIRECTORY": "<YOUR LOCAL OSS OPENHANDS DIR>/frontend/build",
"SANDBOX_RUNTIME_CONTAINER_IMAGE": "ghcr.io/all-hands-ai/runtime:main-nikolaik",
"SANDBOX_RUNTIME_CONTAINER_IMAGE": "ghcr.io/openhands/runtime:main-nikolaik",
"FILE_STORE_PATH": "<YOUR HOME DIRECTORY>>/.openhands-state",
"OPENHANDS_CONFIG_CLS": "server.config.SaaSServerConfig",
"GITHUB_APP_ID": "1062351",
@ -237,7 +237,7 @@ And then invoking `printenv`. NOTE: _DO NOT DO THIS WITH PROD!!!_ (Hopefully by
"REDIS_HOST": "localhost:6379",
"OPENHANDS": "<YOUR LOCAL OSS OPENHANDS DIR>",
"FRONTEND_DIRECTORY": "<YOUR LOCAL OSS OPENHANDS DIR>/frontend/build",
"SANDBOX_RUNTIME_CONTAINER_IMAGE": "ghcr.io/all-hands-ai/runtime:main-nikolaik",
"SANDBOX_RUNTIME_CONTAINER_IMAGE": "ghcr.io/openhands/runtime:main-nikolaik",
"FILE_STORE_PATH": "<YOUR HOME DIRECTORY>>/.openhands-state",
"OPENHANDS_CONFIG_CLS": "server.config.SaaSServerConfig",
"GITHUB_APP_ID": "1062351",

View File

@ -5,12 +5,8 @@ from experiments.constants import (
EXPERIMENT_SYSTEM_PROMPT_EXPERIMENT,
)
from experiments.experiment_versions import (
handle_condenser_max_step_experiment,
handle_system_prompt_experiment,
)
from experiments.experiment_versions._004_condenser_max_step_experiment import (
handle_condenser_max_step_experiment__v1,
)
from openhands.core.config.openhands_config import OpenHandsConfig
from openhands.core.logger import openhands_logger as logger
@ -31,10 +27,6 @@ class SaaSExperimentManager(ExperimentManager):
)
return agent
agent = handle_condenser_max_step_experiment__v1(
user_id, conversation_id, agent
)
if EXPERIMENT_SYSTEM_PROMPT_EXPERIMENT:
agent = agent.model_copy(
update={'system_prompt_filename': 'system_prompt_long_horizon.j2'}
@ -60,20 +52,7 @@ class SaaSExperimentManager(ExperimentManager):
"""
logger.debug(
'experiment_manager:run_conversation_variant_test:started',
extra={'user_id': user_id},
)
# Skip all experiment processing if the experiment manager is disabled
if not ENABLE_EXPERIMENT_MANAGER:
logger.info(
'experiment_manager:run_conversation_variant_test:skipped',
extra={'reason': 'experiment_manager_disabled'},
)
return conversation_settings
# Apply conversation-scoped experiments
conversation_settings = handle_condenser_max_step_experiment(
user_id, conversation_id, conversation_settings
extra={'user_id': user_id, 'conversation_id': conversation_id},
)
return conversation_settings

View File

@ -32,7 +32,7 @@ from openhands.core.logger import openhands_logger as logger
from openhands.integrations.provider import ProviderToken, ProviderType
from openhands.integrations.service_types import AuthenticationError
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
from openhands.storage.data_models.user_secrets import UserSecrets
from openhands.storage.data_models.secrets import Secrets
from openhands.utils.async_utils import call_sync_from_async
@ -251,7 +251,7 @@ class GithubManager(Manager):
f'[GitHub] Creating new conversation for user {user_info.username}'
)
secret_store = UserSecrets(
secret_store = Secrets(
provider_tokens=MappingProxyType(
{
ProviderType.GITHUB: ProviderToken(
@ -293,18 +293,26 @@ class GithubManager(Manager):
f'[GitHub] Created conversation {conversation_id} for user {user_info.username}'
)
# Create a GithubCallbackProcessor
processor = GithubCallbackProcessor(
github_view=github_view,
send_summary_instruction=True,
)
from openhands.server.shared import ConversationStoreImpl, config
# Register the callback processor
register_callback_processor(conversation_id, processor)
logger.info(
f'[Github] Registered callback processor for conversation {conversation_id}'
conversation_store = await ConversationStoreImpl.get_instance(
config, github_view.user_info.keycloak_user_id
)
metadata = await conversation_store.get_metadata(conversation_id)
if metadata.conversation_version != 'v1':
# Create a GithubCallbackProcessor
processor = GithubCallbackProcessor(
github_view=github_view,
send_summary_instruction=True,
)
# Register the callback processor
register_callback_processor(conversation_id, processor)
logger.info(
f'[Github] Registered callback processor for conversation {conversation_id}'
)
# Send message with conversation link
conversation_link = CONVERSATION_URL.format(conversation_id)

View File

@ -1,4 +1,4 @@
from uuid import uuid4
from uuid import UUID, uuid4
from github import Github, GithubIntegration
from github.Issue import Issue
@ -24,12 +24,24 @@ from server.config import get_config
from storage.database import session_maker
from storage.proactive_conversation_store import ProactiveConversationStore
from storage.saas_secrets_store import SaasSecretsStore
from storage.user_settings import UserSettings
from storage.saas_settings_store import SaasSettingsStore
from openhands.agent_server.models import SendMessageRequest
from openhands.app_server.app_conversation.app_conversation_models import (
AppConversationStartRequest,
AppConversationStartTaskStatus,
)
from openhands.app_server.config import get_app_conversation_service
from openhands.app_server.services.injector import InjectorState
from openhands.app_server.user.specifiy_user_context import USER_CONTEXT_ATTR
from openhands.app_server.user.user_context import UserContext
from openhands.app_server.user.user_models import UserInfo
from openhands.core.logger import openhands_logger as logger
from openhands.integrations.github.github_service import GithubServiceImpl
from openhands.integrations.provider import PROVIDER_TOKEN_TYPE, ProviderType
from openhands.integrations.service_types import Comment
from openhands.sdk import TextContent
from openhands.sdk.conversation.secret_source import SecretSource
from openhands.server.services.conversation_service import (
initialize_conversation,
start_conversation,
@ -43,6 +55,52 @@ from openhands.utils.async_utils import call_sync_from_async
OH_LABEL, INLINE_OH_LABEL = get_oh_labels(HOST)
class GithubUserContext(UserContext):
"""User context for GitHub integration that provides user info without web request."""
def __init__(self, keycloak_user_id: str, git_provider_tokens: PROVIDER_TOKEN_TYPE):
self.keycloak_user_id = keycloak_user_id
self.git_provider_tokens = git_provider_tokens
self.settings_store = SaasSettingsStore(
user_id=self.keycloak_user_id,
session_maker=session_maker,
config=get_config(),
)
self.secrets_store = SaasSecretsStore(
self.keycloak_user_id, session_maker, get_config()
)
async def get_user_id(self) -> str | None:
return self.keycloak_user_id
async def get_user_info(self) -> UserInfo:
user_settings = await self.settings_store.load()
return UserInfo(
id=self.keycloak_user_id,
**user_settings.model_dump(context={'expose_secrets': True}),
)
async def get_authenticated_git_url(self, repository: str) -> str:
# This would need to be implemented based on the git provider tokens
# For now, return a basic HTTPS URL
return f'https://github.com/{repository}.git'
async def get_latest_token(self, provider_type: ProviderType) -> str | None:
# Return the appropriate token from git_provider_tokens
if provider_type == ProviderType.GITHUB and self.git_provider_tokens:
return self.git_provider_tokens.get(ProviderType.GITHUB)
return None
async def get_secrets(self) -> dict[str, SecretSource]:
# Return empty dict for now - GitHub integration handles secrets separately
user_secrets = await self.secrets_store.load()
return dict(user_secrets.custom_secrets) if user_secrets else {}
async def get_mcp_api_key(self) -> str | None:
raise NotImplementedError()
async def get_user_proactive_conversation_setting(user_id: str | None) -> bool:
"""Get the user's proactive conversation setting.
@ -61,20 +119,48 @@ async def get_user_proactive_conversation_setting(user_id: str | None) -> bool:
if not user_id:
return False
def _get_setting():
with session_maker() as session:
settings = (
session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == user_id)
.first()
)
config = get_config()
settings_store = SaasSettingsStore(
user_id=user_id, session_maker=session_maker, config=config
)
if not settings or settings.enable_proactive_conversation_starters is None:
return False
settings = await call_sync_from_async(
settings_store.get_user_settings_by_keycloak_id, user_id
)
return settings.enable_proactive_conversation_starters
if not settings or settings.enable_proactive_conversation_starters is None:
return False
return await call_sync_from_async(_get_setting)
return settings.enable_proactive_conversation_starters
async def get_user_v1_enabled_setting(user_id: str | None) -> bool:
"""Get the user's V1 conversation API setting.
Args:
user_id: The keycloak user ID
Returns:
True if V1 conversations are enabled for this user, False otherwise
"""
# If no user ID is provided, we can't check user settings
if not user_id:
return False
config = get_config()
settings_store = SaasSettingsStore(
user_id=user_id, session_maker=session_maker, config=config
)
settings = await call_sync_from_async(
settings_store.get_user_settings_by_keycloak_id, user_id
)
if not settings or settings.v1_enabled is None:
return False
return settings.v1_enabled
# =================================================
@ -160,6 +246,31 @@ class GithubIssue(ResolverViewInterface):
git_provider_tokens: PROVIDER_TOKEN_TYPE,
conversation_metadata: ConversationMetadata,
):
v1_enabled = await get_user_v1_enabled_setting(self.user_info.keycloak_user_id)
if v1_enabled:
try:
# Use V1 app conversation service
await self._create_v1_conversation(
jinja_env, git_provider_tokens, conversation_metadata
)
return
except Exception as e:
logger.warning(f'Error checking V1 settings, falling back to V0: {e}')
# Use existing V0 conversation service
await self._create_v0_conversation(
jinja_env, git_provider_tokens, conversation_metadata
)
async def _create_v0_conversation(
self,
jinja_env: Environment,
git_provider_tokens: PROVIDER_TOKEN_TYPE,
conversation_metadata: ConversationMetadata,
):
"""Create conversation using the legacy V0 system."""
custom_secrets = await self._get_user_secrets()
user_instructions, conversation_instructions = await self._get_instructions(
@ -178,6 +289,77 @@ class GithubIssue(ResolverViewInterface):
conversation_instructions=conversation_instructions,
)
async def _create_v1_conversation(
self,
jinja_env: Environment,
git_provider_tokens: PROVIDER_TOKEN_TYPE,
conversation_metadata: ConversationMetadata,
):
"""Create conversation using the new V1 app conversation system."""
user_instructions, conversation_instructions = await self._get_instructions(
jinja_env
)
# Create the initial message request
initial_message = SendMessageRequest(
role='user', content=[TextContent(text=user_instructions)]
)
# Create the GitHub V1 callback processor
github_callback_processor = self._create_github_v1_callback_processor()
# Get the app conversation service and start the conversation
injector_state = InjectorState()
# Create the V1 conversation start request with the callback processor
start_request = AppConversationStartRequest(
conversation_id=UUID(conversation_metadata.conversation_id),
system_message_suffix=conversation_instructions,
initial_message=initial_message,
selected_repository=self.full_repo_name,
git_provider=ProviderType.GITHUB,
title=f'GitHub Issue #{self.issue_number}: {self.title}',
trigger=ConversationTrigger.RESOLVER,
processors=[
github_callback_processor
], # Pass the callback processor directly
)
# Set up the GitHub user context for the V1 system
github_user_context = GithubUserContext(
keycloak_user_id=self.user_info.keycloak_user_id,
git_provider_tokens=git_provider_tokens,
)
setattr(injector_state, USER_CONTEXT_ATTR, github_user_context)
async with get_app_conversation_service(
injector_state
) as app_conversation_service:
async for task in app_conversation_service.start_app_conversation(
start_request
):
if task.status == AppConversationStartTaskStatus.ERROR:
logger.error(f'Failed to start V1 conversation: {task.detail}')
raise RuntimeError(
f'Failed to start V1 conversation: {task.detail}'
)
def _create_github_v1_callback_processor(self):
"""Create a V1 callback processor for GitHub integration."""
from openhands.app_server.event_callback.github_v1_callback_processor import (
GithubV1CallbackProcessor,
)
# Create and return the GitHub V1 callback processor
return GithubV1CallbackProcessor(
github_view_data={
'issue_number': self.issue_number,
'full_repo_name': self.full_repo_name,
'installation_id': self.installation_id,
},
send_summary_instruction=self.send_summary_instruction,
)
@dataclass
class GithubIssueComment(GithubIssue):
@ -293,6 +475,24 @@ class GithubInlinePRComment(GithubPRComment):
return user_instructions, conversation_instructions
def _create_github_v1_callback_processor(self):
"""Create a V1 callback processor for GitHub integration."""
from openhands.app_server.event_callback.github_v1_callback_processor import (
GithubV1CallbackProcessor,
)
# Create and return the GitHub V1 callback processor
return GithubV1CallbackProcessor(
github_view_data={
'issue_number': self.issue_number,
'full_repo_name': self.full_repo_name,
'installation_id': self.installation_id,
'comment_id': self.comment_id,
},
inline_pr_comment=True,
send_summary_instruction=self.send_summary_instruction,
)
@dataclass
class GithubFailingAction:

View File

@ -26,7 +26,7 @@ from openhands.integrations.gitlab.gitlab_service import GitLabServiceImpl
from openhands.integrations.provider import ProviderToken, ProviderType
from openhands.integrations.service_types import AuthenticationError
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
from openhands.storage.data_models.user_secrets import UserSecrets
from openhands.storage.data_models.secrets import Secrets
class GitlabManager(Manager):
@ -199,7 +199,7 @@ class GitlabManager(Manager):
f'[GitLab] Creating new conversation for user {user_info.username}'
)
secret_store = UserSecrets(
secret_store = Secrets(
provider_tokens=MappingProxyType(
{
ProviderType.GITLAB: ProviderToken(

View File

@ -32,6 +32,7 @@ from openhands.integrations.service_types import Repository
from openhands.server.shared import server_config
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
from openhands.server.user_auth.user_auth import UserAuth
from openhands.utils.http_session import httpx_verify_option
JIRA_CLOUD_API_URL = 'https://api.atlassian.com/ex/jira'
@ -408,7 +409,7 @@ class JiraManager(Manager):
svc_acc_api_key: str,
) -> Tuple[str, str]:
url = f'{JIRA_CLOUD_API_URL}/{jira_cloud_id}/rest/api/2/issue/{job_context.issue_key}'
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.get(url, auth=(svc_acc_email, svc_acc_api_key))
response.raise_for_status()
issue_payload = response.json()
@ -443,7 +444,7 @@ class JiraManager(Manager):
f'{JIRA_CLOUD_API_URL}/{jira_cloud_id}/rest/api/2/issue/{issue_key}/comment'
)
data = {'body': message.message}
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.post(
url, auth=(svc_acc_email, svc_acc_api_key), json=data
)

View File

@ -57,7 +57,7 @@ class JiraNewConversationView(JiraViewInterface):
raise StartingConvoException('No repository selected for this conversation')
provider_tokens = await self.saas_user_auth.get_provider_tokens()
user_secrets = await self.saas_user_auth.get_user_secrets()
user_secrets = await self.saas_user_auth.get_secrets()
instructions, user_msg = self._get_instructions(jinja_env)
try:

View File

@ -34,6 +34,7 @@ from openhands.integrations.service_types import Repository
from openhands.server.shared import server_config
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
from openhands.server.user_auth.user_auth import UserAuth
from openhands.utils.http_session import httpx_verify_option
class JiraDcManager(Manager):
@ -422,7 +423,7 @@ class JiraDcManager(Manager):
"""Get issue details from Jira DC API."""
url = f'{job_context.base_api_url}/rest/api/2/issue/{job_context.issue_key}'
headers = {'Authorization': f'Bearer {svc_acc_api_key}'}
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.get(url, headers=headers)
response.raise_for_status()
issue_payload = response.json()
@ -452,7 +453,7 @@ class JiraDcManager(Manager):
url = f'{base_api_url}/rest/api/2/issue/{issue_key}/comment'
headers = {'Authorization': f'Bearer {svc_acc_api_key}'}
data = {'body': message.message}
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.post(url, headers=headers, json=data)
response.raise_for_status()
return response.json()

View File

@ -60,7 +60,7 @@ class JiraDcNewConversationView(JiraDcViewInterface):
raise StartingConvoException('No repository selected for this conversation')
provider_tokens = await self.saas_user_auth.get_provider_tokens()
user_secrets = await self.saas_user_auth.get_user_secrets()
user_secrets = await self.saas_user_auth.get_secrets()
instructions, user_msg = self._get_instructions(jinja_env)
try:

View File

@ -31,6 +31,7 @@ from openhands.integrations.service_types import Repository
from openhands.server.shared import server_config
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
from openhands.server.user_auth.user_auth import UserAuth
from openhands.utils.http_session import httpx_verify_option
class LinearManager(Manager):
@ -408,7 +409,7 @@ class LinearManager(Manager):
async def _query_api(self, query: str, variables: Dict, api_key: str) -> Dict:
"""Query Linear GraphQL API."""
headers = {'Authorization': api_key}
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.post(
self.api_url,
headers=headers,

View File

@ -57,7 +57,7 @@ class LinearNewConversationView(LinearViewInterface):
raise StartingConvoException('No repository selected for this conversation')
provider_tokens = await self.saas_user_auth.get_provider_tokens()
user_secrets = await self.saas_user_auth.get_user_secrets()
user_secrets = await self.saas_user_auth.get_secrets()
instructions, user_msg = self._get_instructions(jinja_env)
try:

View File

@ -87,7 +87,7 @@ class SlackManager(Manager):
return slack_user, saas_user_auth
def _infer_repo_from_message(self, user_msg: str) -> str | None:
# Regular expression to match patterns like "All-Hands-AI/OpenHands" or "deploy repo"
# Regular expression to match patterns like "OpenHands/OpenHands" or "deploy repo"
pattern = r'([a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)|([a-zA-Z0-9_-]+)(?=\s+repo)'
match = re.search(pattern, user_msg)

View File

@ -14,6 +14,7 @@ from openhands.core.logger import openhands_logger as logger
from openhands.core.schema.agent import AgentState
from openhands.events.action import MessageAction
from openhands.events.serialization.event import event_to_dict
from openhands.integrations.provider import ProviderHandler
from openhands.server.services.conversation_service import (
create_new_conversation,
setup_init_conversation_settings,
@ -185,22 +186,30 @@ class SlackNewConversationView(SlackViewInterface):
self._verify_necessary_values_are_set()
provider_tokens = await self.saas_user_auth.get_provider_tokens()
user_secrets = await self.saas_user_auth.get_user_secrets()
user_secrets = await self.saas_user_auth.get_secrets()
user_instructions, conversation_instructions = self._get_instructions(jinja)
# Determine git provider from repository
git_provider = None
if self.selected_repo and provider_tokens:
provider_handler = ProviderHandler(provider_tokens)
repository = await provider_handler.verify_repo_provider(self.selected_repo)
git_provider = repository.git_provider
agent_loop_info = await create_new_conversation(
user_id=self.slack_to_openhands_user.keycloak_user_id,
git_provider_tokens=provider_tokens,
selected_repository=self.selected_repo,
selected_branch=None,
initial_user_msg=user_instructions,
conversation_instructions=conversation_instructions
if conversation_instructions
else None,
conversation_instructions=(
conversation_instructions if conversation_instructions else None
),
image_urls=None,
replay_json=None,
conversation_trigger=ConversationTrigger.SLACK,
custom_secrets=user_secrets.custom_secrets if user_secrets else None,
git_provider=git_provider,
)
self.conversation_id = agent_loop_info.conversation_id

View File

@ -381,7 +381,7 @@ def infer_repo_from_message(user_msg: str) -> list[str]:
# Captures: protocol, domain, owner, repo (with optional .git extension)
git_url_pattern = r'https?://(?:github\.com|gitlab\.com|bitbucket\.org)/([a-zA-Z0-9_.-]+)/([a-zA-Z0-9_.-]+?)(?:\.git)?(?:[/?#].*?)?(?=\s|$|[^\w.-])'
# Pattern to match direct owner/repo mentions (e.g., "All-Hands-AI/OpenHands")
# Pattern to match direct owner/repo mentions (e.g., "OpenHands/OpenHands")
# Must be surrounded by word boundaries or specific characters to avoid false positives
direct_pattern = (
r'(?:^|\s|[\[\(\'"])([a-zA-Z0-9_.-]+)/([a-zA-Z0-9_.-]+)(?=\s|$|[\]\)\'",.])'

View File

@ -0,0 +1,27 @@
"""drop settings table
Revision ID: 077
Revises: 076
Create Date: 2025-10-21 00:00:00.000000
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '077'
down_revision: Union[str, None] = '076'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Drop the deprecated settings table."""
op.execute('DROP TABLE IF EXISTS settings')
def downgrade() -> None:
"""No-op downgrade since the settings table is deprecated."""
pass

View File

@ -0,0 +1,129 @@
"""create telemetry tables
Revision ID: 078
Revises: 077
Create Date: 2025-10-21
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '078'
down_revision: Union[str, None] = '077'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Create telemetry tables for metrics collection and configuration."""
# Create telemetry_metrics table
op.create_table(
'telemetry_metrics',
sa.Column(
'id',
sa.String(), # UUID as string
nullable=False,
primary_key=True,
),
sa.Column(
'collected_at',
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.text('CURRENT_TIMESTAMP'),
),
sa.Column(
'metrics_data',
sa.JSON(),
nullable=False,
),
sa.Column(
'uploaded_at',
sa.DateTime(timezone=True),
nullable=True,
),
sa.Column(
'upload_attempts',
sa.Integer(),
nullable=False,
server_default='0',
),
sa.Column(
'last_upload_error',
sa.Text(),
nullable=True,
),
sa.Column(
'created_at',
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.text('CURRENT_TIMESTAMP'),
),
sa.Column(
'updated_at',
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.text('CURRENT_TIMESTAMP'),
),
)
# Create indexes for telemetry_metrics
op.create_index(
'ix_telemetry_metrics_collected_at', 'telemetry_metrics', ['collected_at']
)
op.create_index(
'ix_telemetry_metrics_uploaded_at', 'telemetry_metrics', ['uploaded_at']
)
# Create telemetry_replicated_identity table (minimal persistent identity data)
op.create_table(
'telemetry_replicated_identity',
sa.Column(
'id',
sa.Integer(),
nullable=False,
primary_key=True,
server_default='1',
),
sa.Column(
'customer_id',
sa.String(255),
nullable=True,
),
sa.Column(
'instance_id',
sa.String(255),
nullable=True,
),
sa.Column(
'created_at',
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.text('CURRENT_TIMESTAMP'),
),
sa.Column(
'updated_at',
sa.DateTime(timezone=True),
nullable=False,
server_default=sa.text('CURRENT_TIMESTAMP'),
),
)
# Add constraint to ensure single row in telemetry_replicated_identity
op.create_check_constraint(
'single_identity_row', 'telemetry_replicated_identity', 'id = 1'
)
def downgrade() -> None:
"""Drop telemetry tables."""
# Drop indexes first
op.drop_index('ix_telemetry_metrics_uploaded_at', 'telemetry_metrics')
op.drop_index('ix_telemetry_metrics_collected_at', 'telemetry_metrics')
# Drop tables
op.drop_table('telemetry_replicated_identity')
op.drop_table('telemetry_metrics')

View File

@ -0,0 +1,39 @@
"""rename user_secrets table to custom_secrets
Revision ID: 079
Revises: 078
Create Date: 2025-10-27 00:00:00.000000
"""
from typing import Sequence, Union
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '079'
down_revision: Union[str, None] = '078'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
# Rename the table from user_secrets to custom_secrets
op.rename_table('user_secrets', 'custom_secrets')
# Rename the index to match the new table name
op.drop_index('idx_user_secrets_keycloak_user_id', 'custom_secrets')
op.create_index(
'idx_custom_secrets_keycloak_user_id', 'custom_secrets', ['keycloak_user_id']
)
def downgrade() -> None:
# Rename the index back to the original name
op.drop_index('idx_custom_secrets_keycloak_user_id', 'custom_secrets')
op.create_index(
'idx_user_secrets_keycloak_user_id', 'custom_secrets', ['keycloak_user_id']
)
# Rename the table back from custom_secrets to user_secrets
op.rename_table('custom_secrets', 'user_secrets')

View File

@ -0,0 +1,71 @@
"""add status and updated_at to callback
Revision ID: 080
Revises: 079
Create Date: 2025-11-05 00:00:00.000000
"""
from enum import Enum
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '080'
down_revision: Union[str, None] = '079'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
class EventCallbackStatus(Enum):
ACTIVE = 'ACTIVE'
DISABLED = 'DISABLED'
COMPLETED = 'COMPLETED'
ERROR = 'ERROR'
def upgrade() -> None:
"""Upgrade schema."""
status = sa.Enum(EventCallbackStatus, name='eventcallbackstatus')
status.create(op.get_bind(), checkfirst=True)
op.add_column(
'event_callback',
sa.Column('status', status, nullable=False, server_default='ACTIVE'),
)
op.add_column(
'event_callback',
sa.Column(
'updated_at', sa.DateTime, nullable=False, server_default=sa.func.now()
),
)
op.drop_index('ix_event_callback_result_event_id')
op.drop_column('event_callback_result', 'event_id')
op.add_column(
'event_callback_result', sa.Column('event_id', sa.String, nullable=True)
)
op.create_index(
op.f('ix_event_callback_result_event_id'),
'event_callback_result',
['event_id'],
unique=False,
)
def downgrade() -> None:
"""Downgrade schema."""
op.drop_column('event_callback', 'status')
op.drop_column('event_callback', 'updated_at')
op.drop_index('ix_event_callback_result_event_id')
op.drop_column('event_callback_result', 'event_id')
op.add_column(
'event_callback_result', sa.Column('event_id', sa.UUID, nullable=True)
)
op.create_index(
op.f('ix_event_callback_result_event_id'),
'event_callback_result',
['event_id'],
unique=False,
)
op.execute('DROP TYPE eventcallbackstatus')

View File

@ -0,0 +1,41 @@
"""add parent_conversation_id to conversation_metadata
Revision ID: 081
Revises: 080
Create Date: 2025-11-06 00:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '081'
down_revision: Union[str, None] = '080'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Upgrade schema."""
op.add_column(
'conversation_metadata',
sa.Column('parent_conversation_id', sa.String(), nullable=True),
)
op.create_index(
op.f('ix_conversation_metadata_parent_conversation_id'),
'conversation_metadata',
['parent_conversation_id'],
unique=False,
)
def downgrade() -> None:
"""Downgrade schema."""
op.drop_index(
op.f('ix_conversation_metadata_parent_conversation_id'),
table_name='conversation_metadata',
)
op.drop_column('conversation_metadata', 'parent_conversation_id')

View File

@ -0,0 +1,51 @@
"""Add SETTING_UP_SKILLS to appconversationstarttaskstatus enum
Revision ID: 082
Revises: 081
Create Date: 2025-11-19 12:00:00.000000
"""
from typing import Sequence, Union
from alembic import op
from sqlalchemy import text
# revision identifiers, used by Alembic.
revision: str = '082'
down_revision: Union[str, Sequence[str], None] = '081'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Add SETTING_UP_SKILLS enum value to appconversationstarttaskstatus."""
# Check if the enum value already exists before adding it
# This handles the case where the enum was created with the value already included
connection = op.get_bind()
result = connection.execute(
text(
"SELECT 1 FROM pg_enum WHERE enumlabel = 'SETTING_UP_SKILLS' "
"AND enumtypid = (SELECT oid FROM pg_type WHERE typname = 'appconversationstarttaskstatus')"
)
)
if not result.fetchone():
# Add the new enum value only if it doesn't already exist
op.execute(
"ALTER TYPE appconversationstarttaskstatus ADD VALUE 'SETTING_UP_SKILLS'"
)
def downgrade() -> None:
"""Remove SETTING_UP_SKILLS enum value from appconversationstarttaskstatus.
Note: PostgreSQL doesn't support removing enum values directly.
This would require recreating the enum type and updating all references.
For safety, this downgrade is not implemented.
"""
# PostgreSQL doesn't support removing enum values directly
# This would require a complex migration to recreate the enum
# For now, we'll leave this as a no-op since removing enum values
# is rarely needed and can be dangerous
pass

View File

@ -0,0 +1,35 @@
"""Add v1_enabled column to user_settings
Revision ID: 083
Revises: 082
Create Date: 2025-11-18 00:00:00.000000
"""
from typing import Sequence, Union
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = '083'
down_revision: Union[str, None] = '082'
branch_labels: Union[str, Sequence[str], None] = None
depends_on: Union[str, Sequence[str], None] = None
def upgrade() -> None:
"""Add v1_enabled column to user_settings table."""
op.add_column(
'user_settings',
sa.Column(
'v1_enabled',
sa.Boolean(),
nullable=True,
),
)
def downgrade() -> None:
"""Remove v1_enabled column from user_settings table."""
op.drop_column('user_settings', 'v1_enabled')

348
enterprise/poetry.lock generated
View File

@ -201,19 +201,20 @@ files = [
[[package]]
name = "anthropic"
version = "0.65.0"
version = "0.72.0"
description = "The official Python library for the anthropic API"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "anthropic-0.65.0-py3-none-any.whl", hash = "sha256:ba9d9f82678046c74ddf5698ca06d9f5b0f599cfac922ab0d5921638eb448d98"},
{file = "anthropic-0.65.0.tar.gz", hash = "sha256:6b6b6942574e54342050dfd42b8d856a8366b171daec147df3b80be4722733b9"},
{file = "anthropic-0.72.0-py3-none-any.whl", hash = "sha256:0e9f5a7582f038cab8efbb4c959e49ef654a56bfc7ba2da51b5a7b8a84de2e4d"},
{file = "anthropic-0.72.0.tar.gz", hash = "sha256:8971fe76dcffc644f74ac3883069beb1527641115ae0d6eb8fa21c1ce4082f7a"},
]
[package.dependencies]
anyio = ">=3.5.0,<5"
distro = ">=1.7.0,<2"
docstring-parser = ">=0.15,<1"
google-auth = {version = ">=2,<3", extras = ["requests"], optional = true, markers = "extra == \"vertex\""}
httpx = ">=0.25.0,<1"
jiter = ">=0.4.0,<1"
@ -222,7 +223,7 @@ sniffio = "*"
typing-extensions = ">=4.10,<5"
[package.extras]
aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.8)"]
aiohttp = ["aiohttp", "httpx-aiohttp (>=0.1.9)"]
bedrock = ["boto3 (>=1.28.57)", "botocore (>=1.31.57)"]
vertex = ["google-auth[requests] (>=2,<3)"]
@ -681,31 +682,34 @@ crt = ["awscrt (==0.27.6)"]
[[package]]
name = "browser-use"
version = "0.7.10"
version = "0.9.5"
description = "Make websites accessible for AI agents"
optional = false
python-versions = "<4.0,>=3.11"
groups = ["main"]
files = [
{file = "browser_use-0.7.10-py3-none-any.whl", hash = "sha256:669e12571a0c0c4c93e5fd26abf9e2534eb9bacbc510328aedcab795bd8906a9"},
{file = "browser_use-0.7.10.tar.gz", hash = "sha256:f93ce59e06906c12d120360dee4aa33d83618ddf7c9a575dd0ac517d2de7ccbc"},
{file = "browser_use-0.9.5-py3-none-any.whl", hash = "sha256:4a2e92847204d1ded269026a99cb0cc0e60e38bd2751fa3f58aedd78f00b4e67"},
{file = "browser_use-0.9.5.tar.gz", hash = "sha256:f8285fe253b149d01769a7084883b4cf4db351e2f38e26302c157bcbf14a703f"},
]
[package.dependencies]
aiohttp = "3.12.15"
anthropic = ">=0.58.2,<1.0.0"
anthropic = ">=0.68.1,<1.0.0"
anyio = ">=4.9.0"
authlib = ">=1.6.0"
bubus = ">=1.5.6"
cdp-use = ">=1.4.0"
click = ">=8.1.8"
cloudpickle = ">=3.1.1"
google-api-core = ">=2.25.0"
google-api-python-client = ">=2.174.0"
google-auth = ">=2.40.3"
google-auth-oauthlib = ">=1.2.2"
google-genai = ">=1.29.0,<2.0.0"
groq = ">=0.30.0"
html2text = ">=2025.4.15"
httpx = ">=0.28.1"
inquirerpy = ">=0.3.4"
markdownify = ">=1.2.0"
mcp = ">=1.10.1"
ollama = ">=0.5.1"
openai = ">=1.99.2,<2.0.0"
@ -720,16 +724,20 @@ pypdf = ">=5.7.0"
python-dotenv = ">=1.0.1"
reportlab = ">=4.0.0"
requests = ">=2.32.3"
rich = ">=14.0.0"
screeninfo = {version = ">=0.8.1", markers = "platform_system != \"darwin\""}
typing-extensions = ">=4.12.2"
uuid7 = ">=0.1.0"
[package.extras]
all = ["agentmail (>=0.0.53)", "boto3 (>=1.38.45)", "botocore (>=1.37.23)", "click (>=8.1.8)", "imgcat (>=0.6.0)", "langchain-openai (>=0.3.26)", "rich (>=14.0.0)", "textual (>=3.2.0)"]
all = ["agentmail (==0.0.59)", "boto3 (>=1.38.45)", "botocore (>=1.37.23)", "imgcat (>=0.6.0)", "langchain-openai (>=0.3.26)", "oci (>=2.126.4)", "textual (>=3.2.0)"]
aws = ["boto3 (>=1.38.45)"]
cli = ["click (>=8.1.8)", "rich (>=14.0.0)", "textual (>=3.2.0)"]
eval = ["anyio (>=4.9.0)", "browserbase (==1.4.0)", "datamodel-code-generator (>=0.26.0)", "hyperbrowser (==0.47.0)", "lmnr[all] (==0.7.10)", "psutil (>=7.0.0)"]
examples = ["agentmail (>=0.0.53)", "botocore (>=1.37.23)", "imgcat (>=0.6.0)", "langchain-openai (>=0.3.26)"]
cli = ["textual (>=3.2.0)"]
cli-oci = ["oci (>=2.126.4)", "textual (>=3.2.0)"]
code = ["matplotlib (>=3.9.0)", "numpy (>=2.3.2)", "pandas (>=2.2.0)", "tabulate (>=0.9.0)"]
eval = ["anyio (>=4.9.0)", "datamodel-code-generator (>=0.26.0)", "lmnr[all] (==0.7.17)", "psutil (>=7.0.0)"]
examples = ["agentmail (==0.0.59)", "botocore (>=1.37.23)", "imgcat (>=0.6.0)", "langchain-openai (>=0.3.26)"]
oci = ["oci (>=2.126.4)"]
video = ["imageio[ffmpeg] (>=2.37.0)", "numpy (>=2.3.2)"]
[[package]]
@ -3525,6 +3533,25 @@ files = [
{file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"},
]
[[package]]
name = "inquirerpy"
version = "0.3.4"
description = "Python port of Inquirer.js (A collection of common interactive command-line user interfaces)"
optional = false
python-versions = ">=3.7,<4.0"
groups = ["main"]
files = [
{file = "InquirerPy-0.3.4-py3-none-any.whl", hash = "sha256:c65fdfbac1fa00e3ee4fb10679f4d3ed7a012abf4833910e63c295827fe2a7d4"},
{file = "InquirerPy-0.3.4.tar.gz", hash = "sha256:89d2ada0111f337483cb41ae31073108b2ec1e618a49d7110b0d7ade89fc197e"},
]
[package.dependencies]
pfzy = ">=0.3.1,<0.4.0"
prompt-toolkit = ">=3.0.1,<4.0.0"
[package.extras]
docs = ["Sphinx (>=4.1.2,<5.0.0)", "furo (>=2021.8.17-beta.43,<2022.0.0)", "myst-parser (>=0.15.1,<0.16.0)", "sphinx-autobuild (>=2021.3.14,<2022.0.0)", "sphinx-copybutton (>=0.4.0,<0.5.0)"]
[[package]]
name = "installer"
version = "0.7.0"
@ -4580,6 +4607,62 @@ files = [
{file = "llvmlite-0.44.0.tar.gz", hash = "sha256:07667d66a5d150abed9157ab6c0b9393c9356f229784a4385c02f99e94fc94d4"},
]
[[package]]
name = "lmnr"
version = "0.7.20"
description = "Python SDK for Laminar"
optional = false
python-versions = "<4,>=3.10"
groups = ["main"]
files = [
{file = "lmnr-0.7.20-py3-none-any.whl", hash = "sha256:5f9fa7444e6f96c25e097f66484ff29e632bdd1de0e9346948bf5595f4a8af38"},
{file = "lmnr-0.7.20.tar.gz", hash = "sha256:1f484cd618db2d71af65f90a0b8b36d20d80dc91a5138b811575c8677bf7c4fd"},
]
[package.dependencies]
grpcio = ">=1"
httpx = ">=0.24.0"
opentelemetry-api = ">=1.33.0"
opentelemetry-exporter-otlp-proto-grpc = ">=1.33.0"
opentelemetry-exporter-otlp-proto-http = ">=1.33.0"
opentelemetry-instrumentation = ">=0.54b0"
opentelemetry-instrumentation-threading = ">=0.57b0"
opentelemetry-sdk = ">=1.33.0"
opentelemetry-semantic-conventions = ">=0.54b0"
opentelemetry-semantic-conventions-ai = ">=0.4.13"
orjson = ">=3.0.0"
packaging = ">=22.0"
pydantic = ">=2.0.3,<3.0.0"
python-dotenv = ">=1.0"
tenacity = ">=8.0"
tqdm = ">=4.0"
[package.extras]
alephalpha = ["opentelemetry-instrumentation-alephalpha (>=0.47.1)"]
all = ["opentelemetry-instrumentation-alephalpha (>=0.47.1)", "opentelemetry-instrumentation-bedrock (>=0.47.1)", "opentelemetry-instrumentation-chromadb (>=0.47.1)", "opentelemetry-instrumentation-cohere (>=0.47.1)", "opentelemetry-instrumentation-crewai (>=0.47.1)", "opentelemetry-instrumentation-haystack (>=0.47.1)", "opentelemetry-instrumentation-lancedb (>=0.47.1)", "opentelemetry-instrumentation-langchain (>=0.47.1)", "opentelemetry-instrumentation-llamaindex (>=0.47.1)", "opentelemetry-instrumentation-marqo (>=0.47.1)", "opentelemetry-instrumentation-mcp (>=0.47.1)", "opentelemetry-instrumentation-milvus (>=0.47.1)", "opentelemetry-instrumentation-mistralai (>=0.47.1)", "opentelemetry-instrumentation-ollama (>=0.47.1)", "opentelemetry-instrumentation-pinecone (>=0.47.1)", "opentelemetry-instrumentation-qdrant (>=0.47.1)", "opentelemetry-instrumentation-replicate (>=0.47.1)", "opentelemetry-instrumentation-sagemaker (>=0.47.1)", "opentelemetry-instrumentation-together (>=0.47.1)", "opentelemetry-instrumentation-transformers (>=0.47.1)", "opentelemetry-instrumentation-vertexai (>=0.47.1)", "opentelemetry-instrumentation-watsonx (>=0.47.1)", "opentelemetry-instrumentation-weaviate (>=0.47.1)"]
bedrock = ["opentelemetry-instrumentation-bedrock (>=0.47.1)"]
chromadb = ["opentelemetry-instrumentation-chromadb (>=0.47.1)"]
cohere = ["opentelemetry-instrumentation-cohere (>=0.47.1)"]
crewai = ["opentelemetry-instrumentation-crewai (>=0.47.1)"]
haystack = ["opentelemetry-instrumentation-haystack (>=0.47.1)"]
lancedb = ["opentelemetry-instrumentation-lancedb (>=0.47.1)"]
langchain = ["opentelemetry-instrumentation-langchain (>=0.47.1)"]
llamaindex = ["opentelemetry-instrumentation-llamaindex (>=0.47.1)"]
marqo = ["opentelemetry-instrumentation-marqo (>=0.47.1)"]
mcp = ["opentelemetry-instrumentation-mcp (>=0.47.1)"]
milvus = ["opentelemetry-instrumentation-milvus (>=0.47.1)"]
mistralai = ["opentelemetry-instrumentation-mistralai (>=0.47.1)"]
ollama = ["opentelemetry-instrumentation-ollama (>=0.47.1)"]
pinecone = ["opentelemetry-instrumentation-pinecone (>=0.47.1)"]
qdrant = ["opentelemetry-instrumentation-qdrant (>=0.47.1)"]
replicate = ["opentelemetry-instrumentation-replicate (>=0.47.1)"]
sagemaker = ["opentelemetry-instrumentation-sagemaker (>=0.47.1)"]
together = ["opentelemetry-instrumentation-together (>=0.47.1)"]
transformers = ["opentelemetry-instrumentation-transformers (>=0.47.1)"]
vertexai = ["opentelemetry-instrumentation-vertexai (>=0.47.1)"]
watsonx = ["opentelemetry-instrumentation-watsonx (>=0.47.1)"]
weaviate = ["opentelemetry-instrumentation-weaviate (>=0.47.1)"]
[[package]]
name = "lxml"
version = "6.0.1"
@ -5737,13 +5820,15 @@ llama = ["llama-index (>=0.12.29,<0.13.0)", "llama-index-core (>=0.12.29,<0.13.0
[[package]]
name = "openhands-agent-server"
version = "1.0.0a3"
version = "1.3.0"
description = "OpenHands Agent Server - REST/WebSocket interface for OpenHands AI Agent"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = []
develop = false
files = [
{file = "openhands_agent_server-1.3.0-py3-none-any.whl", hash = "sha256:2f87f790c740dc3fb81821c5f9fa375af875fbb937ebca3baa6dc5c035035b3c"},
{file = "openhands_agent_server-1.3.0.tar.gz", hash = "sha256:0a83ae77373f5c41d0ba0e22d8f0f6144d54d55784183a50b7c098c96cd5135c"},
]
[package.dependencies]
aiosqlite = ">=0.19"
@ -5756,16 +5841,9 @@ uvicorn = ">=0.31.1"
websockets = ">=12"
wsproto = ">=1.2.0"
[package.source]
type = "git"
url = "https://github.com/All-Hands-AI/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
subdirectory = "openhands-agent-server"
[[package]]
name = "openhands-ai"
version = "0.59.0"
version = "0.62.0"
description = "OpenHands: Code Less, Make More"
optional = false
python-versions = "^3.12,<3.14"
@ -5782,6 +5860,7 @@ bashlex = "^0.18"
boto3 = "*"
browsergym-core = "0.13.3"
deprecated = "*"
deprecation = "^2.1.0"
dirhash = "*"
docker = "*"
fastapi = "*"
@ -5801,13 +5880,14 @@ jupyter_kernel_gateway = "*"
kubernetes = "^33.1.0"
libtmux = ">=0.46.2"
litellm = ">=1.74.3, <1.78.0, !=1.64.4, !=1.67.*"
lmnr = "^0.7.20"
memory-profiler = "^0.61.0"
numpy = "*"
openai = "1.99.9"
openhands-aci = "0.3.2"
openhands-agent-server = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-agent-server"}
openhands-sdk = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-sdk"}
openhands-tools = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-tools"}
openhands-agent-server = "1.3.0"
openhands-sdk = "1.3.0"
openhands-tools = "1.3.0"
opentelemetry-api = "^1.33.1"
opentelemetry-exporter-otlp-proto-grpc = "^1.33.1"
pathspec = "^0.12.1"
@ -5863,18 +5943,22 @@ url = ".."
[[package]]
name = "openhands-sdk"
version = "1.0.0a3"
version = "1.3.0"
description = "OpenHands SDK - Core functionality for building AI agents"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = []
develop = false
files = [
{file = "openhands_sdk-1.3.0-py3-none-any.whl", hash = "sha256:feee838346f8e60ea3e4d3391de7cb854314eb8b3c9e3dbbb56f98a784aadc56"},
{file = "openhands_sdk-1.3.0.tar.gz", hash = "sha256:2d060803a78de462121b56dea717a66356922deb02276f37b29fae8af66343fb"},
]
[package.dependencies]
deprecation = ">=2.1.0"
fastmcp = ">=2.11.3"
httpx = ">=0.27.0"
litellm = ">=1.77.7.dev9"
lmnr = ">=0.7.20"
pydantic = ">=2.11.7"
python-frontmatter = ">=1.1.0"
python-json-logger = ">=3.3.0"
@ -5884,40 +5968,28 @@ websockets = ">=12"
[package.extras]
boto3 = ["boto3 (>=1.35.0)"]
[package.source]
type = "git"
url = "https://github.com/All-Hands-AI/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
subdirectory = "openhands-sdk"
[[package]]
name = "openhands-tools"
version = "1.0.0a3"
version = "1.3.0"
description = "OpenHands Tools - Runtime tools for AI agents"
optional = false
python-versions = ">=3.12"
groups = ["main"]
files = []
develop = false
files = [
{file = "openhands_tools-1.3.0-py3-none-any.whl", hash = "sha256:f31056d87c3058ac92709f9161c7c602daeee3ed0cb4439097b43cda105ed03e"},
{file = "openhands_tools-1.3.0.tar.gz", hash = "sha256:3da46f09e28593677d3e17252ce18584fcc13caab1a73213e66bd7edca2cebe0"},
]
[package.dependencies]
bashlex = ">=0.18"
binaryornot = ">=0.4.4"
browser-use = ">=0.7.7"
browser-use = ">=0.8.0"
cachetools = "*"
func-timeout = ">=4.3.5"
libtmux = ">=0.46.2"
openhands-sdk = "*"
pydantic = ">=2.11.7"
[package.source]
type = "git"
url = "https://github.com/All-Hands-AI/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
subdirectory = "openhands-tools"
[[package]]
name = "openpyxl"
version = "3.1.5"
@ -5988,6 +6060,62 @@ opentelemetry-proto = "1.36.0"
opentelemetry-sdk = ">=1.36.0,<1.37.0"
typing-extensions = ">=4.6.0"
[[package]]
name = "opentelemetry-exporter-otlp-proto-http"
version = "1.36.0"
description = "OpenTelemetry Collector Protobuf over HTTP Exporter"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "opentelemetry_exporter_otlp_proto_http-1.36.0-py3-none-any.whl", hash = "sha256:3d769f68e2267e7abe4527f70deb6f598f40be3ea34c6adc35789bea94a32902"},
{file = "opentelemetry_exporter_otlp_proto_http-1.36.0.tar.gz", hash = "sha256:dd3637f72f774b9fc9608ab1ac479f8b44d09b6fb5b2f3df68a24ad1da7d356e"},
]
[package.dependencies]
googleapis-common-protos = ">=1.52,<2.0"
opentelemetry-api = ">=1.15,<2.0"
opentelemetry-exporter-otlp-proto-common = "1.36.0"
opentelemetry-proto = "1.36.0"
opentelemetry-sdk = ">=1.36.0,<1.37.0"
requests = ">=2.7,<3.0"
typing-extensions = ">=4.5.0"
[[package]]
name = "opentelemetry-instrumentation"
version = "0.57b0"
description = "Instrumentation Tools & Auto Instrumentation for OpenTelemetry Python"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "opentelemetry_instrumentation-0.57b0-py3-none-any.whl", hash = "sha256:9109280f44882e07cec2850db28210b90600ae9110b42824d196de357cbddf7e"},
{file = "opentelemetry_instrumentation-0.57b0.tar.gz", hash = "sha256:f2a30135ba77cdea2b0e1df272f4163c154e978f57214795d72f40befd4fcf05"},
]
[package.dependencies]
opentelemetry-api = ">=1.4,<2.0"
opentelemetry-semantic-conventions = "0.57b0"
packaging = ">=18.0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-instrumentation-threading"
version = "0.57b0"
description = "Thread context propagation support for OpenTelemetry"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "opentelemetry_instrumentation_threading-0.57b0-py3-none-any.whl", hash = "sha256:adfd64857c8c78d6111cf80552311e1713bad64272dd81abdd61f07b892a161b"},
{file = "opentelemetry_instrumentation_threading-0.57b0.tar.gz", hash = "sha256:06fa4c98d6bfe4670e7532497670ac202db42afa647ff770aedce0e422421c6e"},
]
[package.dependencies]
opentelemetry-api = ">=1.12,<2.0"
opentelemetry-instrumentation = "0.57b0"
wrapt = ">=1.0.0,<2.0.0"
[[package]]
name = "opentelemetry-proto"
version = "1.36.0"
@ -6036,6 +6164,115 @@ files = [
opentelemetry-api = "1.36.0"
typing-extensions = ">=4.5.0"
[[package]]
name = "opentelemetry-semantic-conventions-ai"
version = "0.4.13"
description = "OpenTelemetry Semantic Conventions Extension for Large Language Models"
optional = false
python-versions = "<4,>=3.9"
groups = ["main"]
files = [
{file = "opentelemetry_semantic_conventions_ai-0.4.13-py3-none-any.whl", hash = "sha256:883a30a6bb5deaec0d646912b5f9f6dcbb9f6f72557b73d0f2560bf25d13e2d5"},
{file = "opentelemetry_semantic_conventions_ai-0.4.13.tar.gz", hash = "sha256:94efa9fb4ffac18c45f54a3a338ffeb7eedb7e1bb4d147786e77202e159f0036"},
]
[[package]]
name = "orjson"
version = "3.11.4"
description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "orjson-3.11.4-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:e3aa2118a3ece0d25489cbe48498de8a5d580e42e8d9979f65bf47900a15aba1"},
{file = "orjson-3.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a69ab657a4e6733133a3dca82768f2f8b884043714e8d2b9ba9f52b6efef5c44"},
{file = "orjson-3.11.4-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3740bffd9816fc0326ddc406098a3a8f387e42223f5f455f2a02a9f834ead80c"},
{file = "orjson-3.11.4-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65fd2f5730b1bf7f350c6dc896173d3460d235c4be007af73986d7cd9a2acd23"},
{file = "orjson-3.11.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fdc3ae730541086158d549c97852e2eea6820665d4faf0f41bf99df41bc11ea"},
{file = "orjson-3.11.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e10b4d65901da88845516ce9f7f9736f9638d19a1d483b3883dc0182e6e5edba"},
{file = "orjson-3.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb6a03a678085f64b97f9d4a9ae69376ce91a3a9e9b56a82b1580d8e1d501aff"},
{file = "orjson-3.11.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:2c82e4f0b1c712477317434761fbc28b044c838b6b1240d895607441412371ac"},
{file = "orjson-3.11.4-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:d58c166a18f44cc9e2bad03a327dc2d1a3d2e85b847133cfbafd6bfc6719bd79"},
{file = "orjson-3.11.4-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:94f206766bf1ea30e1382e4890f763bd1eefddc580e08fec1ccdc20ddd95c827"},
{file = "orjson-3.11.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:41bf25fb39a34cf8edb4398818523277ee7096689db352036a9e8437f2f3ee6b"},
{file = "orjson-3.11.4-cp310-cp310-win32.whl", hash = "sha256:fa9627eba4e82f99ca6d29bc967f09aba446ee2b5a1ea728949ede73d313f5d3"},
{file = "orjson-3.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:23ef7abc7fca96632d8174ac115e668c1e931b8fe4dde586e92a500bf1914dcc"},
{file = "orjson-3.11.4-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:5e59d23cd93ada23ec59a96f215139753fbfe3a4d989549bcb390f8c00370b39"},
{file = "orjson-3.11.4-cp311-cp311-macosx_15_0_arm64.whl", hash = "sha256:5c3aedecfc1beb988c27c79d52ebefab93b6c3921dbec361167e6559aba2d36d"},
{file = "orjson-3.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da9e5301f1c2caa2a9a4a303480d79c9ad73560b2e7761de742ab39fe59d9175"},
{file = "orjson-3.11.4-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8873812c164a90a79f65368f8f96817e59e35d0cc02786a5356f0e2abed78040"},
{file = "orjson-3.11.4-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5d7feb0741ebb15204e748f26c9638e6665a5fa93c37a2c73d64f1669b0ddc63"},
{file = "orjson-3.11.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:01ee5487fefee21e6910da4c2ee9eef005bee568a0879834df86f888d2ffbdd9"},
{file = "orjson-3.11.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d40d46f348c0321df01507f92b95a377240c4ec31985225a6668f10e2676f9a"},
{file = "orjson-3.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95713e5fc8af84d8edc75b785d2386f653b63d62b16d681687746734b4dfc0be"},
{file = "orjson-3.11.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ad73ede24f9083614d6c4ca9a85fe70e33be7bf047ec586ee2363bc7418fe4d7"},
{file = "orjson-3.11.4-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:842289889de515421f3f224ef9c1f1efb199a32d76d8d2ca2706fa8afe749549"},
{file = "orjson-3.11.4-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:3b2427ed5791619851c52a1261b45c233930977e7de8cf36de05636c708fa905"},
{file = "orjson-3.11.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3c36e524af1d29982e9b190573677ea02781456b2e537d5840e4538a5ec41907"},
{file = "orjson-3.11.4-cp311-cp311-win32.whl", hash = "sha256:87255b88756eab4a68ec61837ca754e5d10fa8bc47dc57f75cedfeaec358d54c"},
{file = "orjson-3.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:e2d5d5d798aba9a0e1fede8d853fa899ce2cb930ec0857365f700dffc2c7af6a"},
{file = "orjson-3.11.4-cp311-cp311-win_arm64.whl", hash = "sha256:6bb6bb41b14c95d4f2702bce9975fda4516f1db48e500102fc4d8119032ff045"},
{file = "orjson-3.11.4-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:d4371de39319d05d3f482f372720b841c841b52f5385bd99c61ed69d55d9ab50"},
{file = "orjson-3.11.4-cp312-cp312-macosx_15_0_arm64.whl", hash = "sha256:e41fd3b3cac850eaae78232f37325ed7d7436e11c471246b87b2cd294ec94853"},
{file = "orjson-3.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:600e0e9ca042878c7fdf189cf1b028fe2c1418cc9195f6cb9824eb6ed99cb938"},
{file = "orjson-3.11.4-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7bbf9b333f1568ef5da42bc96e18bf30fd7f8d54e9ae066d711056add508e415"},
{file = "orjson-3.11.4-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4806363144bb6e7297b8e95870e78d30a649fdc4e23fc84daa80c8ebd366ce44"},
{file = "orjson-3.11.4-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ad355e8308493f527d41154e9053b86a5be892b3b359a5c6d5d95cda23601cb2"},
{file = "orjson-3.11.4-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c8a7517482667fb9f0ff1b2f16fe5829296ed7a655d04d68cd9711a4d8a4e708"},
{file = "orjson-3.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:97eb5942c7395a171cbfecc4ef6701fc3c403e762194683772df4c54cfbb2210"},
{file = "orjson-3.11.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:149d95d5e018bdd822e3f38c103b1a7c91f88d38a88aada5c4e9b3a73a244241"},
{file = "orjson-3.11.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:624f3951181eb46fc47dea3d221554e98784c823e7069edb5dbd0dc826ac909b"},
{file = "orjson-3.11.4-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:03bfa548cf35e3f8b3a96c4e8e41f753c686ff3d8e182ce275b1751deddab58c"},
{file = "orjson-3.11.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:525021896afef44a68148f6ed8a8bf8375553d6066c7f48537657f64823565b9"},
{file = "orjson-3.11.4-cp312-cp312-win32.whl", hash = "sha256:b58430396687ce0f7d9eeb3dd47761ca7d8fda8e9eb92b3077a7a353a75efefa"},
{file = "orjson-3.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:c6dbf422894e1e3c80a177133c0dda260f81428f9de16d61041949f6a2e5c140"},
{file = "orjson-3.11.4-cp312-cp312-win_arm64.whl", hash = "sha256:d38d2bc06d6415852224fcc9c0bfa834c25431e466dc319f0edd56cca81aa96e"},
{file = "orjson-3.11.4-cp313-cp313-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:2d6737d0e616a6e053c8b4acc9eccea6b6cce078533666f32d140e4f85002534"},
{file = "orjson-3.11.4-cp313-cp313-macosx_15_0_arm64.whl", hash = "sha256:afb14052690aa328cc118a8e09f07c651d301a72e44920b887c519b313d892ff"},
{file = "orjson-3.11.4-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:38aa9e65c591febb1b0aed8da4d469eba239d434c218562df179885c94e1a3ad"},
{file = "orjson-3.11.4-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f2cf4dfaf9163b0728d061bebc1e08631875c51cd30bf47cb9e3293bfbd7dcd5"},
{file = "orjson-3.11.4-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:89216ff3dfdde0e4070932e126320a1752c9d9a758d6a32ec54b3b9334991a6a"},
{file = "orjson-3.11.4-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9daa26ca8e97fae0ce8aa5d80606ef8f7914e9b129b6b5df9104266f764ce436"},
{file = "orjson-3.11.4-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5c8b2769dc31883c44a9cd126560327767f848eb95f99c36c9932f51090bfce9"},
{file = "orjson-3.11.4-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1469d254b9884f984026bd9b0fa5bbab477a4bfe558bba6848086f6d43eb5e73"},
{file = "orjson-3.11.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:68e44722541983614e37117209a194e8c3ad07838ccb3127d96863c95ec7f1e0"},
{file = "orjson-3.11.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:8e7805fda9672c12be2f22ae124dcd7b03928d6c197544fe12174b86553f3196"},
{file = "orjson-3.11.4-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:04b69c14615fb4434ab867bf6f38b2d649f6f300af30a6705397e895f7aec67a"},
{file = "orjson-3.11.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:639c3735b8ae7f970066930e58cf0ed39a852d417c24acd4a25fc0b3da3c39a6"},
{file = "orjson-3.11.4-cp313-cp313-win32.whl", hash = "sha256:6c13879c0d2964335491463302a6ca5ad98105fc5db3565499dcb80b1b4bd839"},
{file = "orjson-3.11.4-cp313-cp313-win_amd64.whl", hash = "sha256:09bf242a4af98732db9f9a1ec57ca2604848e16f132e3f72edfd3c5c96de009a"},
{file = "orjson-3.11.4-cp313-cp313-win_arm64.whl", hash = "sha256:a85f0adf63319d6c1ba06fb0dbf997fced64a01179cf17939a6caca662bf92de"},
{file = "orjson-3.11.4-cp314-cp314-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:42d43a1f552be1a112af0b21c10a5f553983c2a0938d2bbb8ecd8bc9fb572803"},
{file = "orjson-3.11.4-cp314-cp314-macosx_15_0_arm64.whl", hash = "sha256:26a20f3fbc6c7ff2cb8e89c4c5897762c9d88cf37330c6a117312365d6781d54"},
{file = "orjson-3.11.4-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6e3f20be9048941c7ffa8fc523ccbd17f82e24df1549d1d1fe9317712d19938e"},
{file = "orjson-3.11.4-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:aac364c758dc87a52e68e349924d7e4ded348dedff553889e4d9f22f74785316"},
{file = "orjson-3.11.4-cp314-cp314-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d5c54a6d76e3d741dcc3f2707f8eeb9ba2a791d3adbf18f900219b62942803b1"},
{file = "orjson-3.11.4-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f28485bdca8617b79d44627f5fb04336897041dfd9fa66d383a49d09d86798bc"},
{file = "orjson-3.11.4-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bfc2a484cad3585e4ba61985a6062a4c2ed5c7925db6d39f1fa267c9d166487f"},
{file = "orjson-3.11.4-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e34dbd508cb91c54f9c9788923daca129fe5b55c5b4eebe713bf5ed3791280cf"},
{file = "orjson-3.11.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:b13c478fa413d4b4ee606ec8e11c3b2e52683a640b006bb586b3041c2ca5f606"},
{file = "orjson-3.11.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:724ca721ecc8a831b319dcd72cfa370cc380db0bf94537f08f7edd0a7d4e1780"},
{file = "orjson-3.11.4-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:977c393f2e44845ce1b540e19a786e9643221b3323dae190668a98672d43fb23"},
{file = "orjson-3.11.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1e539e382cf46edec157ad66b0b0872a90d829a6b71f17cb633d6c160a223155"},
{file = "orjson-3.11.4-cp314-cp314-win32.whl", hash = "sha256:d63076d625babab9db5e7836118bdfa086e60f37d8a174194ae720161eb12394"},
{file = "orjson-3.11.4-cp314-cp314-win_amd64.whl", hash = "sha256:0a54d6635fa3aaa438ae32e8570b9f0de36f3f6562c308d2a2a452e8b0592db1"},
{file = "orjson-3.11.4-cp314-cp314-win_arm64.whl", hash = "sha256:78b999999039db3cf58f6d230f524f04f75f129ba3d1ca2ed121f8657e575d3d"},
{file = "orjson-3.11.4-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:405261b0a8c62bcbd8e2931c26fdc08714faf7025f45531541e2b29e544b545b"},
{file = "orjson-3.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:af02ff34059ee9199a3546f123a6ab4c86caf1708c79042caf0820dc290a6d4f"},
{file = "orjson-3.11.4-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0b2eba969ea4203c177c7b38b36c69519e6067ee68c34dc37081fac74c796e10"},
{file = "orjson-3.11.4-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0baa0ea43cfa5b008a28d3c07705cf3ada40e5d347f0f44994a64b1b7b4b5350"},
{file = "orjson-3.11.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80fd082f5dcc0e94657c144f1b2a3a6479c44ad50be216cf0c244e567f5eae19"},
{file = "orjson-3.11.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1e3704d35e47d5bee811fb1cbd8599f0b4009b14d451c4c57be5a7e25eb89a13"},
{file = "orjson-3.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:caa447f2b5356779d914658519c874cf3b7629e99e63391ed519c28c8aea4919"},
{file = "orjson-3.11.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:bba5118143373a86f91dadb8df41d9457498226698ebdf8e11cbb54d5b0e802d"},
{file = "orjson-3.11.4-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:622463ab81d19ef3e06868b576551587de8e4d518892d1afab71e0fbc1f9cffc"},
{file = "orjson-3.11.4-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:3e0a700c4b82144b72946b6629968df9762552ee1344bfdb767fecdd634fbd5a"},
{file = "orjson-3.11.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:6e18a5c15e764e5f3fc569b47872450b4bcea24f2a6354c0a0e95ad21045d5a9"},
{file = "orjson-3.11.4-cp39-cp39-win32.whl", hash = "sha256:fb1c37c71cad991ef4d89c7a634b5ffb4447dbd7ae3ae13e8f5ee7f1775e7ab1"},
{file = "orjson-3.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:e2985ce8b8c42d00492d0ed79f2bd2b6460d00f2fa671dfde4bf2e02f49bf5c6"},
{file = "orjson-3.11.4.tar.gz", hash = "sha256:39485f4ab4c9b30a3943cfe99e1a213c4776fb69e8abd68f66b83d5a0b0fdc6d"},
]
[[package]]
name = "packaging"
version = "25.0"
@ -6252,6 +6489,21 @@ files = [
[package.dependencies]
ptyprocess = ">=0.5"
[[package]]
name = "pfzy"
version = "0.3.4"
description = "Python port of the fzy fuzzy string matching algorithm"
optional = false
python-versions = ">=3.7,<4.0"
groups = ["main"]
files = [
{file = "pfzy-0.3.4-py3-none-any.whl", hash = "sha256:5f50d5b2b3207fa72e7ec0ef08372ef652685470974a107d0d4999fc5a903a96"},
{file = "pfzy-0.3.4.tar.gz", hash = "sha256:717ea765dd10b63618e7298b2d98efd819e0b30cd5905c9707223dceeb94b3f1"},
]
[package.extras]
docs = ["Sphinx (>=4.1.2,<5.0.0)", "furo (>=2021.8.17-beta.43,<2022.0.0)", "myst-parser (>=0.15.1,<0.16.0)", "sphinx-autobuild (>=2021.3.14,<2022.0.0)", "sphinx-copybutton (>=0.4.0,<0.5.0)"]
[[package]]
name = "pg8000"
version = "1.31.5"

View File

@ -11,7 +11,7 @@ description = "Deploy OpenHands"
authors = [ "OpenHands" ]
license = "POLYFORM"
readme = "README.md"
repository = "https://github.com/All-Hands-AI/OpenHands"
repository = "https://github.com/OpenHands/OpenHands"
packages = [
{ include = "server" },
{ include = "storage" },

View File

@ -30,3 +30,11 @@ JIRA_DC_CLIENT_SECRET = os.getenv('JIRA_DC_CLIENT_SECRET', '').strip()
JIRA_DC_BASE_URL = os.getenv('JIRA_DC_BASE_URL', '').strip()
JIRA_DC_ENABLE_OAUTH = os.getenv('JIRA_DC_ENABLE_OAUTH', '1') in ('1', 'true')
AUTH_URL = os.getenv('AUTH_URL', '').rstrip('/')
ROLE_CHECK_ENABLED = os.getenv('ROLE_CHECK_ENABLED', 'false').lower() in (
'1',
'true',
't',
'yes',
'y',
'on',
)

View File

@ -31,7 +31,7 @@ from openhands.integrations.provider import (
)
from openhands.server.settings import Settings
from openhands.server.user_auth.user_auth import AuthType, UserAuth
from openhands.storage.data_models.user_secrets import UserSecrets
from openhands.storage.data_models.secrets import Secrets
from openhands.storage.settings.settings_store import SettingsStore
token_manager = TokenManager()
@ -52,7 +52,7 @@ class SaasUserAuth(UserAuth):
settings_store: SaasSettingsStore | None = None
secrets_store: SaasSecretsStore | None = None
_settings: Settings | None = None
_user_secrets: UserSecrets | None = None
_secrets: Secrets | None = None
accepted_tos: bool | None = None
auth_type: AuthType = AuthType.COOKIE
@ -119,13 +119,13 @@ class SaasUserAuth(UserAuth):
self.secrets_store = secrets_store
return secrets_store
async def get_user_secrets(self):
user_secrets = self._user_secrets
async def get_secrets(self):
user_secrets = self._secrets
if user_secrets:
return user_secrets
secrets_store = await self.get_secrets_store()
user_secrets = await secrets_store.load()
self._user_secrets = user_secrets
self._secrets = user_secrets
return user_secrets
async def get_access_token(self) -> SecretStr | None:
@ -148,7 +148,7 @@ class SaasUserAuth(UserAuth):
if not access_token:
raise AuthError()
user_secrets = await self.get_user_secrets()
user_secrets = await self.get_secrets()
try:
# TODO: I think we can do this in a single request if we refactor
@ -203,6 +203,15 @@ class SaasUserAuth(UserAuth):
self.settings_store = settings_store
return settings_store
async def get_mcp_api_key(self) -> str:
api_key_store = ApiKeyStore.get_instance()
mcp_api_key = api_key_store.retrieve_mcp_api_key(self.user_id)
if not mcp_api_key:
mcp_api_key = api_key_store.create_api_key(
self.user_id, 'MCP_API_KEY', None
)
return mcp_api_key
@classmethod
async def get_instance(cls, request: Request) -> UserAuth:
logger.debug('saas_user_auth_get_instance')
@ -243,7 +252,12 @@ def get_api_key_from_header(request: Request):
# This is a temp hack
# Streamable HTTP MCP Client works via redirect requests, but drops the Authorization header for reason
# We include `X-Session-API-Key` header by default due to nested runtimes, so it used as a drop in replacement here
return request.headers.get('X-Session-API-Key')
session_api_key = request.headers.get('X-Session-API-Key')
if session_api_key:
return session_api_key
# Fallback to X-Access-Token header as an additional option
return request.headers.get('X-Access-Token')
async def saas_user_auth_from_bearer(request: Request) -> SaasUserAuth | None:

View File

@ -37,6 +37,7 @@ from storage.offline_token_store import OfflineTokenStore
from tenacity import RetryCallState, retry, retry_if_exception_type, stop_after_attempt
from openhands.integrations.service_types import ProviderType
from openhands.utils.http_session import httpx_verify_option
def _before_sleep_callback(retry_state: RetryCallState) -> None:
@ -191,7 +192,7 @@ class TokenManager:
access_token: str,
idp: ProviderType,
) -> dict[str, str | int]:
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
base_url = KEYCLOAK_SERVER_URL_EXT if self.external else KEYCLOAK_SERVER_URL
url = f'{base_url}/realms/{KEYCLOAK_REALM_NAME}/broker/{idp.value}/token'
headers = {
@ -350,7 +351,7 @@ class TokenManager:
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
}
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.post(url, data=payload)
response.raise_for_status()
logger.info('Successfully refreshed GitHub token')
@ -376,7 +377,7 @@ class TokenManager:
'refresh_token': refresh_token,
'grant_type': 'refresh_token',
}
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.post(url, data=payload)
response.raise_for_status()
logger.info('Successfully refreshed GitLab token')
@ -404,7 +405,7 @@ class TokenManager:
'refresh_token': refresh_token,
}
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.post(url, data=data, headers=headers)
response.raise_for_status()
logger.info('Successfully refreshed Bitbucket token')

View File

@ -50,7 +50,7 @@ SUBSCRIPTION_PRICE_DATA = {
},
}
DEFAULT_INITIAL_BUDGET = float(os.environ.get('DEFAULT_INITIAL_BUDGET', '20'))
DEFAULT_INITIAL_BUDGET = float(os.environ.get('DEFAULT_INITIAL_BUDGET', '10'))
STRIPE_API_KEY = os.environ.get('STRIPE_API_KEY', None)
STRIPE_WEBHOOK_SECRET = os.environ.get('STRIPE_WEBHOOK_SECRET', None)
REQUIRE_PAYMENT = os.environ.get('REQUIRE_PAYMENT', '0') in ('1', 'true')

View File

@ -3,43 +3,45 @@ from datetime import UTC, datetime
import httpx
from fastapi import APIRouter, Depends, HTTPException, status
from pydantic import BaseModel, field_validator
from server.config import get_config
from server.constants import LITE_LLM_API_KEY, LITE_LLM_API_URL
from storage.api_key_store import ApiKeyStore
from storage.database import session_maker
from storage.user_settings import UserSettings
from storage.saas_settings_store import SaasSettingsStore
from openhands.core.logger import openhands_logger as logger
from openhands.server.user_auth import get_user_id
from openhands.utils.async_utils import call_sync_from_async
from openhands.utils.http_session import httpx_verify_option
# Helper functions for BYOR API key management
async def get_byor_key_from_db(user_id: str) -> str | None:
"""Get the BYOR key from the database for a user."""
config = get_config()
settings_store = SaasSettingsStore(
user_id=user_id, session_maker=session_maker, config=config
)
def _get_byor_key():
with session_maker() as session:
user_db_settings = (
session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == user_id)
.first()
)
if user_db_settings and user_db_settings.llm_api_key_for_byor:
return user_db_settings.llm_api_key_for_byor
return None
return await call_sync_from_async(_get_byor_key)
user_db_settings = await call_sync_from_async(
settings_store.get_user_settings_by_keycloak_id, user_id
)
if user_db_settings and user_db_settings.llm_api_key_for_byor:
return user_db_settings.llm_api_key_for_byor
return None
async def store_byor_key_in_db(user_id: str, key: str) -> None:
"""Store the BYOR key in the database for a user."""
config = get_config()
settings_store = SaasSettingsStore(
user_id=user_id, session_maker=session_maker, config=config
)
def _update_user_settings():
with session_maker() as session:
user_db_settings = (
session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == user_id)
.first()
user_db_settings = settings_store.get_user_settings_by_keycloak_id(
user_id, session
)
if user_db_settings:
user_db_settings.llm_api_key_for_byor = key
@ -67,9 +69,10 @@ async def generate_byor_key(user_id: str) -> str | None:
try:
async with httpx.AsyncClient(
verify=httpx_verify_option(),
headers={
'x-goog-api-key': LITE_LLM_API_KEY,
}
},
) as client:
response = await client.post(
f'{LITE_LLM_API_URL}/key/generate',
@ -119,9 +122,10 @@ async def delete_byor_key_from_litellm(user_id: str, byor_key: str) -> bool:
try:
async with httpx.AsyncClient(
verify=httpx_verify_option(),
headers={
'x-goog-api-key': LITE_LLM_API_KEY,
}
},
) as client:
# Delete the key directly using the key value
delete_url = f'{LITE_LLM_API_URL}/key/delete'

View File

@ -12,14 +12,16 @@ from server.auth.constants import (
KEYCLOAK_CLIENT_ID,
KEYCLOAK_REALM_NAME,
KEYCLOAK_SERVER_URL_EXT,
ROLE_CHECK_ENABLED,
)
from server.auth.gitlab_sync import schedule_gitlab_repo_sync
from server.auth.saas_user_auth import SaasUserAuth
from server.auth.token_manager import TokenManager
from server.config import sign_token
from server.config import get_config, sign_token
from server.constants import IS_FEATURE_ENV
from server.routes.event_webhook import _get_session_api_key, _get_user_id
from storage.database import session_maker
from storage.saas_settings_store import SaasSettingsStore
from storage.user_settings import UserSettings
from openhands.core.logger import openhands_logger as logger
@ -131,6 +133,12 @@ async def keycloak_callback(
user_info = await token_manager.get_user_info(keycloak_access_token)
logger.debug(f'user_info: {user_info}')
if ROLE_CHECK_ENABLED and 'roles' not in user_info:
return JSONResponse(
status_code=status.HTTP_401_UNAUTHORIZED,
content={'error': 'Missing required role'},
)
if 'sub' not in user_info or 'preferred_username' not in user_info:
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST,
@ -212,16 +220,14 @@ async def keycloak_callback(
f'&state={state}'
)
has_accepted_tos = False
with session_maker() as session:
user_settings = (
session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == user_id)
.first()
)
has_accepted_tos = (
user_settings is not None and user_settings.accepted_tos is not None
)
config = get_config()
settings_store = SaasSettingsStore(
user_id=user_id, session_maker=session_maker, config=config
)
user_settings = settings_store.get_user_settings_by_keycloak_id(user_id)
has_accepted_tos = (
user_settings is not None and user_settings.accepted_tos is not None
)
# If the user hasn't accepted the TOS, redirect to the TOS page
if not has_accepted_tos:

View File

@ -11,6 +11,7 @@ from fastapi import APIRouter, Depends, HTTPException, Request, status
from fastapi.responses import JSONResponse, RedirectResponse
from integrations import stripe_service
from pydantic import BaseModel
from server.config import get_config
from server.constants import (
LITE_LLM_API_KEY,
LITE_LLM_API_URL,
@ -22,15 +23,47 @@ from server.constants import (
from server.logger import logger
from storage.billing_session import BillingSession
from storage.database import session_maker
from storage.saas_settings_store import SaasSettingsStore
from storage.subscription_access import SubscriptionAccess
from storage.user_settings import UserSettings
from openhands.server.user_auth import get_user_id
from openhands.utils.http_session import httpx_verify_option
stripe.api_key = STRIPE_API_KEY
billing_router = APIRouter(prefix='/api/billing')
# TODO: Add a new app_mode named "ON_PREM" to support self-hosted customers instead of doing this
# and members should comment out the "validate_saas_environment" function if they are developing and testing locally.
def is_all_hands_saas_environment(request: Request) -> bool:
"""Check if the current domain is an All Hands SaaS environment.
Args:
request: FastAPI Request object
Returns:
True if the current domain contains "all-hands.dev" or "openhands.dev" postfix
"""
hostname = request.url.hostname or ''
return hostname.endswith('all-hands.dev') or hostname.endswith('openhands.dev')
def validate_saas_environment(request: Request) -> None:
"""Validate that the request is coming from an All Hands SaaS environment.
Args:
request: FastAPI Request object
Raises:
HTTPException: If the request is not from an All Hands SaaS environment
"""
if not is_all_hands_saas_environment(request):
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail='Checkout sessions are only available for All Hands SaaS environments',
)
class BillingSessionType(Enum):
DIRECT_PAYMENT = 'DIRECT_PAYMENT'
MONTHLY_SUBSCRIPTION = 'MONTHLY_SUBSCRIPTION'
@ -78,7 +111,7 @@ def calculate_credits(user_info: LiteLlmUserInfo) -> float:
async def get_credits(user_id: str = Depends(get_user_id)) -> GetCreditsResponse:
if not stripe_service.STRIPE_API_KEY:
return GetCreditsResponse()
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
user_json = await _get_litellm_user(client, user_id)
credits = calculate_credits(user_json['user_info'])
return GetCreditsResponse(credits=Decimal('{:.2f}'.format(credits)))
@ -196,6 +229,8 @@ async def cancel_subscription(user_id: str = Depends(get_user_id)) -> JSONRespon
async def create_customer_setup_session(
request: Request, user_id: str = Depends(get_user_id)
) -> CreateBillingSessionResponse:
validate_saas_environment(request)
customer_id = await stripe_service.find_or_create_customer(user_id)
checkout_session = await stripe.checkout.Session.create_async(
customer=customer_id,
@ -214,6 +249,8 @@ async def create_checkout_session(
request: Request,
user_id: str = Depends(get_user_id),
) -> CreateBillingSessionResponse:
validate_saas_environment(request)
customer_id = await stripe_service.find_or_create_customer(user_id)
checkout_session = await stripe.checkout.Session.create_async(
customer=customer_id,
@ -268,6 +305,8 @@ async def create_subscription_checkout_session(
billing_session_type: BillingSessionType = BillingSessionType.MONTHLY_SUBSCRIPTION,
user_id: str = Depends(get_user_id),
) -> CreateBillingSessionResponse:
validate_saas_environment(request)
# Prevent duplicate subscriptions for the same user
with session_maker() as session:
now = datetime.now(UTC)
@ -343,6 +382,8 @@ async def create_subscription_checkout_session_via_get(
user_id: str = Depends(get_user_id),
) -> RedirectResponse:
"""Create a subscription checkout session using a GET request (For easier copy / paste to URL bar)."""
validate_saas_environment(request)
response = await create_subscription_checkout_session(
request, billing_session_type, user_id
)
@ -390,7 +431,7 @@ async def success_callback(session_id: str, request: Request):
)
raise HTTPException(status.HTTP_400_BAD_REQUEST)
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
# Update max budget in litellm
user_json = await _get_litellm_user(client, billing_session.user_id)
amount_subtotal = stripe_session.amount_subtotal or 0
@ -578,11 +619,14 @@ async def stripe_webhook(request: Request) -> JSONResponse:
def reset_user_to_free_tier_settings(user_id: str) -> None:
"""Reset user settings to free tier defaults when subscription ends."""
config = get_config()
settings_store = SaasSettingsStore(
user_id=user_id, session_maker=session_maker, config=config
)
with session_maker() as session:
user_settings = (
session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == user_id)
.first()
user_settings = settings_store.get_user_settings_by_keycloak_id(
user_id, session
)
if user_settings:

View File

@ -11,6 +11,7 @@ from fastapi.responses import RedirectResponse
from server.logger import logger
from openhands.server.shared import config
from openhands.utils.http_session import httpx_verify_option
GITHUB_PROXY_ENDPOINTS = bool(os.environ.get('GITHUB_PROXY_ENDPOINTS'))
@ -87,7 +88,7 @@ def add_github_proxy_routes(app: FastAPI):
]
body = urlencode(query_params, doseq=True)
url = 'https://github.com/login/oauth/access_token'
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.post(url, content=body)
return Response(
response.content,
@ -101,7 +102,7 @@ def add_github_proxy_routes(app: FastAPI):
logger.info(f'github_proxy_post:1:{path}')
body = await request.body()
url = f'https://github.com/{path}'
async with httpx.AsyncClient() as client:
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
response = await client.post(url, content=body, headers=request.headers)
return Response(
response.content,

View File

@ -52,6 +52,7 @@ from openhands.storage.locations import (
get_conversation_events_dir,
)
from openhands.utils.async_utils import call_sync_from_async
from openhands.utils.http_session import httpx_verify_option
from openhands.utils.import_utils import get_impl
from openhands.utils.shutdown_listener import should_continue
from openhands.utils.utils import create_registry_and_conversation_stats
@ -266,9 +267,10 @@ class SaasNestedConversationManager(ConversationManager):
):
logger.info('starting_nested_conversation', extra={'sid': sid})
async with httpx.AsyncClient(
verify=httpx_verify_option(),
headers={
'X-Session-API-Key': session_api_key,
}
},
) as client:
await self._setup_nested_settings(client, api_url, settings)
await self._setup_provider_tokens(client, api_url, settings)
@ -484,9 +486,10 @@ class SaasNestedConversationManager(ConversationManager):
raise ValueError(f'no_such_conversation:{sid}')
nested_url = self._get_nested_url_for_runtime(runtime['runtime_id'], sid)
async with httpx.AsyncClient(
verify=httpx_verify_option(),
headers={
'X-Session-API-Key': runtime['session_api_key'],
}
},
) as client:
response = await client.post(f'{nested_url}/events', json=data)
response.raise_for_status()
@ -551,9 +554,10 @@ class SaasNestedConversationManager(ConversationManager):
return None
async with httpx.AsyncClient(
verify=httpx_verify_option(),
headers={
'X-Session-API-Key': session_api_key,
}
},
) as client:
# Query the nested runtime for conversation info
response = await client.get(nested_url)
@ -828,6 +832,7 @@ class SaasNestedConversationManager(ConversationManager):
@contextlib.asynccontextmanager
async def _httpx_client(self):
async with httpx.AsyncClient(
verify=httpx_verify_option(),
headers={'X-API-Key': self.config.sandbox.api_key or ''},
timeout=_HTTP_TIMEOUT,
) as client:

View File

@ -35,6 +35,7 @@ class SaasConversationStore(ConversationStore):
session.query(StoredConversationMetadata)
.filter(StoredConversationMetadata.user_id == self.user_id)
.filter(StoredConversationMetadata.conversation_id == conversation_id)
.filter(StoredConversationMetadata.conversation_version == 'V0')
)
def _to_external_model(self, conversation_metadata: StoredConversationMetadata):
@ -59,6 +60,7 @@ class SaasConversationStore(ConversationStore):
kwargs.pop('reasoning_tokens', None)
kwargs.pop('context_window', None)
kwargs.pop('per_turn_token', None)
kwargs.pop('parent_conversation_id', None)
return ConversationMetadata(**kwargs)
@ -123,6 +125,7 @@ class SaasConversationStore(ConversationStore):
conversations = (
session.query(StoredConversationMetadata)
.filter(StoredConversationMetadata.user_id == self.user_id)
.filter(StoredConversationMetadata.conversation_version == 'V0')
.order_by(StoredConversationMetadata.created_at.desc())
.offset(offset)
.limit(limit + 1)

View File

@ -7,11 +7,11 @@ from dataclasses import dataclass
from cryptography.fernet import Fernet
from sqlalchemy.orm import sessionmaker
from storage.database import session_maker
from storage.stored_user_secrets import StoredUserSecrets
from storage.stored_custom_secrets import StoredCustomSecrets
from openhands.core.config.openhands_config import OpenHandsConfig
from openhands.core.logger import openhands_logger as logger
from openhands.storage.data_models.user_secrets import UserSecrets
from openhands.storage.data_models.secrets import Secrets
from openhands.storage.secrets.secrets_store import SecretsStore
@ -21,20 +21,20 @@ class SaasSecretsStore(SecretsStore):
session_maker: sessionmaker
config: OpenHandsConfig
async def load(self) -> UserSecrets | None:
async def load(self) -> Secrets | None:
if not self.user_id:
return None
with self.session_maker() as session:
# Fetch all secrets for the given user ID
settings = (
session.query(StoredUserSecrets)
.filter(StoredUserSecrets.keycloak_user_id == self.user_id)
session.query(StoredCustomSecrets)
.filter(StoredCustomSecrets.keycloak_user_id == self.user_id)
.all()
)
if not settings:
return UserSecrets()
return Secrets()
kwargs = {}
for secret in settings:
@ -45,14 +45,14 @@ class SaasSecretsStore(SecretsStore):
self._decrypt_kwargs(kwargs)
return UserSecrets(custom_secrets=kwargs) # type: ignore[arg-type]
return Secrets(custom_secrets=kwargs) # type: ignore[arg-type]
async def store(self, item: UserSecrets):
async def store(self, item: Secrets):
with self.session_maker() as session:
# Incoming secrets are always the most updated ones
# Delete all existing records and override with incoming ones
session.query(StoredUserSecrets).filter(
StoredUserSecrets.keycloak_user_id == self.user_id
session.query(StoredCustomSecrets).filter(
StoredCustomSecrets.keycloak_user_id == self.user_id
).delete()
# Prepare the new secrets data
@ -74,7 +74,7 @@ class SaasSecretsStore(SecretsStore):
# Add the new secrets
for secret_name, secret_value, description in secret_tuples:
new_secret = StoredUserSecrets(
new_secret = StoredCustomSecrets(
keycloak_user_id=self.user_id,
secret_name=secret_name,
secret_value=secret_value,

View File

@ -24,7 +24,6 @@ from server.constants import (
from server.logger import logger
from sqlalchemy.orm import sessionmaker
from storage.database import session_maker
from storage.stored_settings import StoredSettings
from storage.user_settings import UserSettings
from openhands.core.config.openhands_config import OpenHandsConfig
@ -32,6 +31,7 @@ from openhands.server.settings import Settings
from openhands.storage import get_file_store
from openhands.storage.settings.settings_store import SettingsStore
from openhands.utils.async_utils import call_sync_from_async
from openhands.utils.http_session import httpx_verify_option
@dataclass
@ -40,15 +40,46 @@ class SaasSettingsStore(SettingsStore):
session_maker: sessionmaker
config: OpenHandsConfig
def get_user_settings_by_keycloak_id(
self, keycloak_user_id: str, session=None
) -> UserSettings | None:
"""
Get UserSettings by keycloak_user_id.
Args:
keycloak_user_id: The keycloak user ID to search for
session: Optional existing database session. If not provided, creates a new one.
Returns:
UserSettings object if found, None otherwise
"""
if not keycloak_user_id:
return None
def _get_settings():
if session:
# Use provided session
return (
session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == keycloak_user_id)
.first()
)
else:
# Create new session
with self.session_maker() as new_session:
return (
new_session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == keycloak_user_id)
.first()
)
return _get_settings()
async def load(self) -> Settings | None:
if not self.user_id:
return None
with self.session_maker() as session:
settings = (
session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == self.user_id)
.first()
)
settings = self.get_user_settings_by_keycloak_id(self.user_id, session)
if not settings or settings.user_version != CURRENT_USER_SETTINGS_VERSION:
logger.info(
@ -66,18 +97,18 @@ class SaasSettingsStore(SettingsStore):
return settings
async def store(self, item: Settings):
# Check if provider is OpenHands and generate API key if needed
if item and self._is_openhands_provider(item):
await self._ensure_openhands_api_key(item)
with self.session_maker() as session:
existing = None
kwargs = {}
if item:
kwargs = item.model_dump(context={'expose_secrets': True})
self._encrypt_kwargs(kwargs)
query = session.query(UserSettings).filter(
UserSettings.keycloak_user_id == self.user_id
)
# First check if we have an existing entry in the new table
existing = query.first()
existing = self.get_user_settings_by_keycloak_id(self.user_id, session)
kwargs = {
key: value
@ -144,33 +175,6 @@ class SaasSettingsStore(SettingsStore):
await self.store(settings)
return settings
def load_legacy_db_settings(self, github_user_id: str) -> Settings | None:
if not github_user_id:
return None
with self.session_maker() as session:
settings = (
session.query(StoredSettings)
.filter(StoredSettings.id == github_user_id)
.first()
)
if settings is None:
return None
logger.info(
'saas_settings_store:load_legacy_db_settings:found',
extra={'github_user_id': github_user_id},
)
kwargs = {
c.name: getattr(settings, c.name)
for c in StoredSettings.__table__.columns
if c.name in Settings.model_fields
}
self._decrypt_kwargs(kwargs)
del kwargs['secrets_store']
settings = Settings(**kwargs)
return settings
async def load_legacy_file_store_settings(self, github_user_id: str):
if not github_user_id:
return None
@ -216,9 +220,10 @@ class SaasSettingsStore(SettingsStore):
)
async with httpx.AsyncClient(
verify=httpx_verify_option(),
headers={
'x-goog-api-key': LITE_LLM_API_KEY,
}
},
) as client:
# Get the previous max budget to prevent accidental loss
# In Litellm a get always succeeds, regardless of whether the user actually exists
@ -235,10 +240,8 @@ class SaasSettingsStore(SettingsStore):
spend = user_info.get('spend') or 0
with session_maker() as session:
user_settings = (
session.query(UserSettings)
.filter(UserSettings.keycloak_user_id == self.user_id)
.first()
user_settings = self.get_user_settings_by_keycloak_id(
self.user_id, session
)
# In upgrade to V4, we no longer use billing margin, but instead apply this directly
# in litellm. The default billing marign was 2 before this (hence the magic numbers below)
@ -369,6 +372,30 @@ class SaasSettingsStore(SettingsStore):
def _should_encrypt(self, key: str) -> bool:
return key in ('llm_api_key', 'llm_api_key_for_byor', 'search_api_key')
def _is_openhands_provider(self, item: Settings) -> bool:
"""Check if the settings use the OpenHands provider."""
return bool(item.llm_model and item.llm_model.startswith('openhands/'))
async def _ensure_openhands_api_key(self, item: Settings) -> None:
"""Generate and set the OpenHands API key for the given settings.
First checks if an existing key with the OpenHands alias exists,
and reuses it if found. Otherwise, generates a new key.
"""
# Generate new key if none exists
generated_key = await self._generate_openhands_key()
if generated_key:
item.llm_api_key = SecretStr(generated_key)
logger.info(
'saas_settings_store:store:generated_openhands_key',
extra={'user_id': self.user_id},
)
else:
logger.warning(
'saas_settings_store:store:failed_to_generate_openhands_key',
extra={'user_id': self.user_id},
)
async def _create_user_in_lite_llm(
self, client: httpx.AsyncClient, email: str | None, max_budget: int, spend: int
):
@ -391,3 +418,55 @@ class SaasSettingsStore(SettingsStore):
},
)
return response
async def _generate_openhands_key(self) -> str | None:
"""Generate a new OpenHands provider key for a user."""
if not (LITE_LLM_API_KEY and LITE_LLM_API_URL):
logger.warning(
'saas_settings_store:_generate_openhands_key:litellm_config_not_found',
extra={'user_id': self.user_id},
)
return None
try:
async with httpx.AsyncClient(
verify=httpx_verify_option(),
headers={
'x-goog-api-key': LITE_LLM_API_KEY,
},
) as client:
response = await client.post(
f'{LITE_LLM_API_URL}/key/generate',
json={
'user_id': self.user_id,
'metadata': {'type': 'openhands'},
},
)
response.raise_for_status()
response_json = response.json()
key = response_json.get('key')
if key:
logger.info(
'saas_settings_store:_generate_openhands_key:success',
extra={
'user_id': self.user_id,
'key_length': len(key) if key else 0,
'key_prefix': (
key[:10] + '...' if key and len(key) > 10 else key
),
},
)
return key
else:
logger.error(
'saas_settings_store:_generate_openhands_key:no_key_in_response',
extra={'user_id': self.user_id, 'response_json': response_json},
)
return None
except Exception as e:
logger.exception(
'saas_settings_store:_generate_openhands_key:error',
extra={'user_id': self.user_id, 'error': str(e)},
)
return None

View File

@ -2,8 +2,8 @@ from sqlalchemy import Column, Identity, Integer, String
from storage.base import Base
class StoredUserSecrets(Base): # type: ignore
__tablename__ = 'user_secrets'
class StoredCustomSecrets(Base): # type: ignore
__tablename__ = 'custom_secrets'
id = Column(Integer, Identity(), primary_key=True)
keycloak_user_id = Column(String, nullable=True, index=True)
secret_name = Column(String, nullable=False)

View File

@ -1,29 +0,0 @@
import uuid
from sqlalchemy import JSON, Boolean, Column, Float, Integer, String
from storage.base import Base
class StoredSettings(Base): # type: ignore
"""
Legacy user settings storage. This should be considered deprecated - use UserSettings isntead
"""
__tablename__ = 'settings'
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
language = Column(String, nullable=True)
agent = Column(String, nullable=True)
max_iterations = Column(Integer, nullable=True)
security_analyzer = Column(String, nullable=True)
confirmation_mode = Column(Boolean, nullable=True, default=False)
llm_model = Column(String, nullable=True)
llm_api_key = Column(String, nullable=True)
llm_base_url = Column(String, nullable=True)
remote_runtime_resource_factor = Column(Integer, nullable=True)
enable_default_condenser = Column(Boolean, nullable=False, default=True)
user_consents_to_analytics = Column(Boolean, nullable=True)
margin = Column(Float, nullable=True)
enable_sound_notifications = Column(Boolean, nullable=True, default=False)
sandbox_base_container_image = Column(String, nullable=True)
sandbox_runtime_container_image = Column(String, nullable=True)
secrets_store = Column(JSON, nullable=True)

View File

@ -0,0 +1,98 @@
"""SQLAlchemy model for telemetry identity.
This model stores persistent identity information that must survive container restarts
for the OpenHands Enterprise Telemetry Service.
"""
from datetime import UTC, datetime
from typing import Optional
from sqlalchemy import CheckConstraint, Column, DateTime, Integer, String
from storage.base import Base
class TelemetryIdentity(Base): # type: ignore
"""Stores persistent identity information for telemetry.
This table is designed to contain exactly one row (enforced by database constraint)
that maintains only the identity data that cannot be reliably recomputed:
- customer_id: Established relationship with Replicated
- instance_id: Generated once, must remain stable
Operational data like timestamps are derived from the telemetry_metrics table.
"""
__tablename__ = 'telemetry_replicated_identity'
__table_args__ = (CheckConstraint('id = 1', name='single_identity_row'),)
id = Column(Integer, primary_key=True, default=1)
customer_id = Column(String(255), nullable=True)
instance_id = Column(String(255), nullable=True)
created_at = Column(
DateTime(timezone=True),
default=lambda: datetime.now(UTC),
nullable=False,
)
updated_at = Column(
DateTime(timezone=True),
default=lambda: datetime.now(UTC),
onupdate=lambda: datetime.now(UTC),
nullable=False,
)
def __init__(
self,
customer_id: Optional[str] = None,
instance_id: Optional[str] = None,
**kwargs,
):
"""Initialize telemetry identity.
Args:
customer_id: Unique identifier for the customer
instance_id: Unique identifier for this OpenHands instance
**kwargs: Additional keyword arguments for SQLAlchemy
"""
super().__init__(**kwargs)
# Set defaults for fields that would normally be set by SQLAlchemy
now = datetime.now(UTC)
if not hasattr(self, 'created_at') or self.created_at is None:
self.created_at = now
if not hasattr(self, 'updated_at') or self.updated_at is None:
self.updated_at = now
# Force id to be 1 to maintain single-row constraint
self.id = 1
self.customer_id = customer_id
self.instance_id = instance_id
def set_customer_info(
self,
customer_id: Optional[str] = None,
instance_id: Optional[str] = None,
) -> None:
"""Update customer and instance identification information.
Args:
customer_id: Unique identifier for the customer
instance_id: Unique identifier for this OpenHands instance
"""
if customer_id is not None:
self.customer_id = customer_id
if instance_id is not None:
self.instance_id = instance_id
@property
def has_customer_info(self) -> bool:
"""Check if customer identification information is configured."""
return bool(self.customer_id and self.instance_id)
def __repr__(self) -> str:
return (
f"<TelemetryIdentity(customer_id='{self.customer_id}', "
f"instance_id='{self.instance_id}')>"
)
class Config:
from_attributes = True

View File

@ -0,0 +1,112 @@
"""SQLAlchemy model for telemetry metrics data.
This model stores individual metric collection records with upload tracking
and retry logic for the OpenHands Enterprise Telemetry Service.
"""
import uuid
from datetime import UTC, datetime
from typing import Any, Dict, Optional
from sqlalchemy import JSON, Column, DateTime, Integer, String, Text
from storage.base import Base
class TelemetryMetrics(Base): # type: ignore
"""Stores collected telemetry metrics with upload tracking.
Each record represents a single metrics collection event with associated
metadata for upload status and retry logic.
"""
__tablename__ = 'telemetry_metrics'
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
collected_at = Column(
DateTime(timezone=True),
nullable=False,
default=lambda: datetime.now(UTC),
index=True,
)
metrics_data = Column(JSON, nullable=False)
uploaded_at = Column(DateTime(timezone=True), nullable=True, index=True)
upload_attempts = Column(Integer, nullable=False, default=0)
last_upload_error = Column(Text, nullable=True)
created_at = Column(
DateTime(timezone=True),
default=lambda: datetime.now(UTC),
nullable=False,
)
updated_at = Column(
DateTime(timezone=True),
default=lambda: datetime.now(UTC),
onupdate=lambda: datetime.now(UTC),
nullable=False,
)
def __init__(
self,
metrics_data: Dict[str, Any],
collected_at: Optional[datetime] = None,
**kwargs,
):
"""Initialize a new telemetry metrics record.
Args:
metrics_data: Dictionary containing the collected metrics
collected_at: Timestamp when metrics were collected (defaults to now)
**kwargs: Additional keyword arguments for SQLAlchemy
"""
super().__init__(**kwargs)
# Set defaults for fields that would normally be set by SQLAlchemy
now = datetime.now(UTC)
if not hasattr(self, 'id') or self.id is None:
self.id = str(uuid.uuid4())
if not hasattr(self, 'upload_attempts') or self.upload_attempts is None:
self.upload_attempts = 0
if not hasattr(self, 'created_at') or self.created_at is None:
self.created_at = now
if not hasattr(self, 'updated_at') or self.updated_at is None:
self.updated_at = now
self.metrics_data = metrics_data
if collected_at:
self.collected_at = collected_at
elif not hasattr(self, 'collected_at') or self.collected_at is None:
self.collected_at = now
def mark_uploaded(self) -> None:
"""Mark this metrics record as successfully uploaded."""
self.uploaded_at = datetime.now(UTC)
self.last_upload_error = None
def mark_upload_failed(self, error_message: str) -> None:
"""Mark this metrics record as having failed upload.
Args:
error_message: Description of the upload failure
"""
self.upload_attempts += 1
self.last_upload_error = error_message
self.uploaded_at = None
@property
def is_uploaded(self) -> bool:
"""Check if this metrics record has been successfully uploaded."""
return self.uploaded_at is not None
@property
def needs_retry(self) -> bool:
"""Check if this metrics record needs upload retry (failed but not too many attempts)."""
return not self.is_uploaded and self.upload_attempts < 3
def __repr__(self) -> str:
return (
f"<TelemetryMetrics(id='{self.id}', "
f"collected_at='{self.collected_at}', "
f'uploaded={self.is_uploaded})>'
)
class Config:
from_attributes = True

View File

@ -38,3 +38,4 @@ class UserSettings(Base): # type: ignore
email_verified = Column(Boolean, nullable=True)
git_user_name = Column(String, nullable=True)
git_user_email = Column(String, nullable=True)
v1_enabled = Column(Boolean, nullable=True)

View File

@ -17,7 +17,6 @@ from storage.github_app_installation import GithubAppInstallation
from storage.maintenance_task import MaintenanceTask, MaintenanceTaskStatus
from storage.stored_conversation_metadata import StoredConversationMetadata
from storage.stored_offline_token import StoredOfflineToken
from storage.stored_settings import StoredSettings
from storage.stripe_customer import StripeCustomer
from storage.user_settings import UserSettings
@ -85,7 +84,7 @@ def add_minimal_fixtures(session_maker):
updated_at=datetime.fromisoformat('2025-03-08'),
)
)
session.add(StoredSettings(id='mock-user-id', user_consents_to_analytics=True))
session.add(
StripeCustomer(
keycloak_user_id='mock-user-id',

View File

@ -92,11 +92,8 @@ def test_unknown_variant_returns_original_agent_without_changes(monkeypatch):
assert getattr(result, 'condenser', None) is None
@patch('experiments.experiment_manager.handle_condenser_max_step_experiment__v1')
@patch('experiments.experiment_manager.ENABLE_EXPERIMENT_MANAGER', False)
def test_run_agent_variant_tests_v1_noop_when_manager_disabled(
mock_handle_condenser,
):
def test_run_agent_variant_tests_v1_noop_when_manager_disabled():
"""If ENABLE_EXPERIMENT_MANAGER is False, the method returns the exact same agent and does not call the handler."""
agent = make_agent()
conv_id = uuid4()
@ -109,8 +106,6 @@ def test_run_agent_variant_tests_v1_noop_when_manager_disabled(
# Same object returned (no copy)
assert result is agent
# Handler should not have been called
mock_handle_condenser.assert_not_called()
@patch('experiments.experiment_manager.ENABLE_EXPERIMENT_MANAGER', True)
@ -131,7 +126,3 @@ def test_run_agent_variant_tests_v1_calls_handler_and_sets_system_prompt(monkeyp
# Should be a different instance than the original (copied after handler runs)
assert result is not agent
assert result.system_prompt_filename == 'system_prompt_long_horizon.j2'
# The condenser returned by the handler must be preserved after the system-prompt override copy
assert isinstance(result.condenser, LLMSummarizingCondenser)
assert result.condenser.max_size == 80

View File

@ -309,7 +309,7 @@ class TestJiraViewEdgeCases:
mock_agent_loop_info,
):
"""Test conversation creation when user has no secrets"""
new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
new_conversation_view.saas_user_auth.get_secrets.return_value = None
mock_create_conversation.return_value = mock_agent_loop_info
mock_store.create_conversation = AsyncMock()

View File

@ -309,7 +309,7 @@ class TestJiraDcViewEdgeCases:
mock_agent_loop_info,
):
"""Test conversation creation when user has no secrets"""
new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
new_conversation_view.saas_user_auth.get_secrets.return_value = None
mock_create_conversation.return_value = mock_agent_loop_info
mock_store.create_conversation = AsyncMock()

View File

@ -309,7 +309,7 @@ class TestLinearViewEdgeCases:
mock_agent_loop_info,
):
"""Test conversation creation when user has no secrets"""
new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
new_conversation_view.saas_user_auth.get_secrets.return_value = None
mock_create_conversation.return_value = mock_agent_loop_info
mock_store.create_conversation = AsyncMock()

View File

@ -0,0 +1 @@
# Storage unit tests

View File

@ -0,0 +1,129 @@
"""Unit tests for TelemetryIdentity model.
Tests the persistent identity storage for the OpenHands Enterprise Telemetry Service.
"""
from datetime import datetime
from storage.telemetry_identity import TelemetryIdentity
class TestTelemetryIdentity:
"""Test cases for TelemetryIdentity model."""
def test_create_identity_with_defaults(self):
"""Test creating identity with default values."""
identity = TelemetryIdentity()
assert identity.id == 1
assert identity.customer_id is None
assert identity.instance_id is None
assert isinstance(identity.created_at, datetime)
assert isinstance(identity.updated_at, datetime)
def test_create_identity_with_values(self):
"""Test creating identity with specific values."""
customer_id = 'cust_123'
instance_id = 'inst_456'
identity = TelemetryIdentity(customer_id=customer_id, instance_id=instance_id)
assert identity.id == 1
assert identity.customer_id == customer_id
assert identity.instance_id == instance_id
def test_set_customer_info(self):
"""Test updating customer information."""
identity = TelemetryIdentity()
# Update customer info
identity.set_customer_info(
customer_id='new_customer', instance_id='new_instance'
)
assert identity.customer_id == 'new_customer'
assert identity.instance_id == 'new_instance'
def test_set_customer_info_partial(self):
"""Test partial updates of customer information."""
identity = TelemetryIdentity(
customer_id='original_customer', instance_id='original_instance'
)
# Update only customer_id
identity.set_customer_info(customer_id='updated_customer')
assert identity.customer_id == 'updated_customer'
assert identity.instance_id == 'original_instance'
# Update only instance_id
identity.set_customer_info(instance_id='updated_instance')
assert identity.customer_id == 'updated_customer'
assert identity.instance_id == 'updated_instance'
def test_set_customer_info_with_none(self):
"""Test that None values don't overwrite existing data."""
identity = TelemetryIdentity(
customer_id='existing_customer', instance_id='existing_instance'
)
# Call with None values - should not change existing data
identity.set_customer_info(customer_id=None, instance_id=None)
assert identity.customer_id == 'existing_customer'
assert identity.instance_id == 'existing_instance'
def test_has_customer_info_property(self):
"""Test has_customer_info property logic."""
identity = TelemetryIdentity()
# Initially false (both None)
assert not identity.has_customer_info
# Still false with only customer_id
identity.customer_id = 'customer_123'
assert not identity.has_customer_info
# Still false with only instance_id
identity.customer_id = None
identity.instance_id = 'instance_456'
assert not identity.has_customer_info
# True when both are set
identity.customer_id = 'customer_123'
identity.instance_id = 'instance_456'
assert identity.has_customer_info
def test_has_customer_info_with_empty_strings(self):
"""Test has_customer_info with empty strings."""
identity = TelemetryIdentity(customer_id='', instance_id='')
# Empty strings should be falsy
assert not identity.has_customer_info
def test_repr_method(self):
"""Test string representation of identity."""
identity = TelemetryIdentity(
customer_id='test_customer', instance_id='test_instance'
)
repr_str = repr(identity)
assert 'TelemetryIdentity' in repr_str
assert 'test_customer' in repr_str
assert 'test_instance' in repr_str
def test_id_forced_to_one(self):
"""Test that ID is always forced to 1."""
identity = TelemetryIdentity()
assert identity.id == 1
# Even if we try to set a different ID in constructor
identity2 = TelemetryIdentity(customer_id='test')
assert identity2.id == 1
def test_timestamps_are_set(self):
"""Test that timestamps are properly set."""
identity = TelemetryIdentity()
assert identity.created_at is not None
assert identity.updated_at is not None
assert isinstance(identity.created_at, datetime)
assert isinstance(identity.updated_at, datetime)

View File

@ -0,0 +1,190 @@
"""Unit tests for TelemetryMetrics model."""
import uuid
from datetime import UTC, datetime
from storage.telemetry_metrics import TelemetryMetrics
class TestTelemetryMetrics:
"""Test cases for TelemetryMetrics model."""
def test_init_with_metrics_data(self):
"""Test initialization with metrics data."""
metrics_data = {
'cpu_usage': 75.5,
'memory_usage': 1024,
'active_sessions': 5,
}
metrics = TelemetryMetrics(metrics_data=metrics_data)
assert metrics.metrics_data == metrics_data
assert metrics.upload_attempts == 0
assert metrics.uploaded_at is None
assert metrics.last_upload_error is None
assert metrics.collected_at is not None
assert metrics.created_at is not None
assert metrics.updated_at is not None
def test_init_with_custom_collected_at(self):
"""Test initialization with custom collected_at timestamp."""
metrics_data = {'test': 'value'}
custom_time = datetime(2023, 1, 1, 12, 0, 0, tzinfo=UTC)
metrics = TelemetryMetrics(metrics_data=metrics_data, collected_at=custom_time)
assert metrics.collected_at == custom_time
def test_mark_uploaded(self):
"""Test marking metrics as uploaded."""
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
# Initially not uploaded
assert not metrics.is_uploaded
assert metrics.uploaded_at is None
# Mark as uploaded
metrics.mark_uploaded()
assert metrics.is_uploaded
def test_mark_upload_failed(self):
"""Test marking upload as failed."""
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
error_message = 'Network timeout'
# Initially no failures
assert metrics.upload_attempts == 0
assert metrics.last_upload_error is None
# Mark as failed
metrics.mark_upload_failed(error_message)
assert metrics.upload_attempts == 1
assert metrics.last_upload_error == error_message
assert metrics.uploaded_at is None
assert not metrics.is_uploaded
def test_multiple_upload_failures(self):
"""Test multiple upload failures increment attempts."""
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
metrics.mark_upload_failed('Error 1')
assert metrics.upload_attempts == 1
metrics.mark_upload_failed('Error 2')
assert metrics.upload_attempts == 2
assert metrics.last_upload_error == 'Error 2'
def test_is_uploaded_property(self):
"""Test is_uploaded property."""
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
# Initially not uploaded
assert not metrics.is_uploaded
# After marking uploaded
metrics.mark_uploaded()
assert metrics.is_uploaded
def test_needs_retry_property(self):
"""Test needs_retry property logic."""
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
# Initially needs retry (0 attempts, not uploaded)
assert metrics.needs_retry
# After 1 failure, still needs retry
metrics.mark_upload_failed('Error 1')
assert metrics.needs_retry
# After 2 failures, still needs retry
metrics.mark_upload_failed('Error 2')
assert metrics.needs_retry
# After 3 failures, no more retries
metrics.mark_upload_failed('Error 3')
assert not metrics.needs_retry
# Reset and test successful upload
metrics2 = TelemetryMetrics(metrics_data={'test': 'data'}) # type: ignore[unreachable]
metrics2.mark_uploaded()
# After upload, needs_retry should be False since is_uploaded is True
def test_upload_failure_clears_uploaded_at(self):
"""Test that upload failure clears uploaded_at timestamp."""
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
# Mark as uploaded first
metrics.mark_uploaded()
assert metrics.uploaded_at is not None
# Mark as failed - should clear uploaded_at
metrics.mark_upload_failed('Network error')
assert metrics.uploaded_at is None
def test_successful_upload_clears_error(self):
"""Test that successful upload clears error message."""
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
# Mark as failed first
metrics.mark_upload_failed('Network error')
assert metrics.last_upload_error == 'Network error'
# Mark as uploaded - should clear error
metrics.mark_uploaded()
assert metrics.last_upload_error is None
def test_uuid_generation(self):
"""Test that each instance gets a unique UUID."""
metrics1 = TelemetryMetrics(metrics_data={'test': 'data1'})
metrics2 = TelemetryMetrics(metrics_data={'test': 'data2'})
assert metrics1.id != metrics2.id
assert isinstance(uuid.UUID(metrics1.id), uuid.UUID)
assert isinstance(uuid.UUID(metrics2.id), uuid.UUID)
def test_repr(self):
"""Test string representation."""
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
repr_str = repr(metrics)
assert 'TelemetryMetrics' in repr_str
assert metrics.id in repr_str
assert str(metrics.collected_at) in repr_str
assert 'uploaded=False' in repr_str
# Test after upload
metrics.mark_uploaded()
repr_str = repr(metrics)
assert 'uploaded=True' in repr_str
def test_complex_metrics_data(self):
"""Test with complex nested metrics data."""
complex_data = {
'system': {
'cpu': {'usage': 75.5, 'cores': 8},
'memory': {'total': 16384, 'used': 8192},
},
'sessions': [
{'id': 'session1', 'duration': 3600},
{'id': 'session2', 'duration': 1800},
],
'timestamp': '2023-01-01T12:00:00Z',
}
metrics = TelemetryMetrics(metrics_data=complex_data)
assert metrics.metrics_data == complex_data
def test_empty_metrics_data(self):
"""Test with empty metrics data."""
metrics = TelemetryMetrics(metrics_data={})
assert metrics.metrics_data == {}
def test_config_class(self):
"""Test that Config class is properly set."""
assert hasattr(TelemetryMetrics, 'Config')
assert TelemetryMetrics.Config.from_attributes is True

View File

@ -36,6 +36,46 @@ def session_maker(engine):
return sessionmaker(bind=engine)
@pytest.fixture
def mock_request():
"""Create a mock request object with proper URL structure for testing."""
return Request(
scope={
'type': 'http',
'path': '/api/billing/test',
'server': ('test.com', 80),
}
)
@pytest.fixture
def mock_checkout_request():
"""Create a mock request object for checkout session tests."""
request = Request(
scope={
'type': 'http',
'path': '/api/billing/create-checkout-session',
'server': ('test.com', 80),
}
)
request._base_url = URL('http://test.com/')
return request
@pytest.fixture
def mock_subscription_request():
"""Create a mock request object for subscription checkout session tests."""
request = Request(
scope={
'type': 'http',
'path': '/api/billing/subscription-checkout-session',
'server': ('test.com', 80),
}
)
request._base_url = URL('http://test.com/')
return request
@pytest.mark.asyncio
async def test_get_credits_lite_llm_error():
mock_request = Request(scope={'type': 'http', 'state': {'user_id': 'mock_user'}})
@ -90,14 +130,10 @@ async def test_get_credits_success():
@pytest.mark.asyncio
async def test_create_checkout_session_stripe_error(session_maker):
async def test_create_checkout_session_stripe_error(
session_maker, mock_checkout_request
):
"""Test handling of Stripe API errors."""
mock_request = Request(
scope={
'type': 'http',
}
)
mock_request._base_url = URL('http://test.com/')
mock_customer = stripe.Customer(
id='mock-customer', metadata={'user_id': 'mock-user'}
@ -118,17 +154,16 @@ async def test_create_checkout_session_stripe_error(session_maker):
'server.auth.token_manager.TokenManager.get_user_info_from_user_id',
AsyncMock(return_value={'email': 'testy@tester.com'}),
),
patch('server.routes.billing.validate_saas_environment'),
):
await create_checkout_session(
CreateCheckoutSessionRequest(amount=25), mock_request, 'mock_user'
CreateCheckoutSessionRequest(amount=25), mock_checkout_request, 'mock_user'
)
@pytest.mark.asyncio
async def test_create_checkout_session_success(session_maker):
async def test_create_checkout_session_success(session_maker, mock_checkout_request):
"""Test successful creation of checkout session."""
mock_request = Request(scope={'type': 'http'})
mock_request._base_url = URL('http://test.com/')
mock_session = MagicMock()
mock_session.url = 'https://checkout.stripe.com/test-session'
@ -152,12 +187,13 @@ async def test_create_checkout_session_success(session_maker):
'server.auth.token_manager.TokenManager.get_user_info_from_user_id',
AsyncMock(return_value={'email': 'testy@tester.com'}),
),
patch('server.routes.billing.validate_saas_environment'),
):
mock_db_session = MagicMock()
mock_session_maker.return_value.__enter__.return_value = mock_db_session
result = await create_checkout_session(
CreateCheckoutSessionRequest(amount=25), mock_request, 'mock_user'
CreateCheckoutSessionRequest(amount=25), mock_checkout_request, 'mock_user'
)
assert isinstance(result, CreateBillingSessionResponse)
@ -590,7 +626,9 @@ async def test_cancel_subscription_stripe_error():
@pytest.mark.asyncio
async def test_create_subscription_checkout_session_duplicate_prevention():
async def test_create_subscription_checkout_session_duplicate_prevention(
mock_subscription_request,
):
"""Test that creating a subscription when user already has active subscription raises error."""
from datetime import UTC, datetime
@ -609,11 +647,9 @@ async def test_create_subscription_checkout_session_duplicate_prevention():
cancelled_at=None,
)
mock_request = Request(scope={'type': 'http'})
mock_request._base_url = URL('http://test.com/')
with (
patch('server.routes.billing.session_maker') as mock_session_maker,
patch('server.routes.billing.validate_saas_environment'),
):
# Setup mock session to return existing active subscription
mock_session = MagicMock()
@ -623,7 +659,7 @@ async def test_create_subscription_checkout_session_duplicate_prevention():
# Call the function and expect HTTPException
with pytest.raises(HTTPException) as exc_info:
await create_subscription_checkout_session(
mock_request, user_id='test_user'
mock_subscription_request, user_id='test_user'
)
assert exc_info.value.status_code == 400
@ -634,10 +670,10 @@ async def test_create_subscription_checkout_session_duplicate_prevention():
@pytest.mark.asyncio
async def test_create_subscription_checkout_session_allows_after_cancellation():
async def test_create_subscription_checkout_session_allows_after_cancellation(
mock_subscription_request,
):
"""Test that creating a subscription is allowed when previous subscription was cancelled."""
mock_request = Request(scope={'type': 'http'})
mock_request._base_url = URL('http://test.com/')
mock_session_obj = MagicMock()
mock_session_obj.url = 'https://checkout.stripe.com/test-session'
@ -657,6 +693,7 @@ async def test_create_subscription_checkout_session_allows_after_cancellation():
'server.routes.billing.SUBSCRIPTION_PRICE_DATA',
{'MONTHLY_SUBSCRIPTION': {'unit_amount': 2000}},
),
patch('server.routes.billing.validate_saas_environment'),
):
# Setup mock session - the query should return None because cancelled subscriptions are filtered out
mock_session = MagicMock()
@ -665,7 +702,7 @@ async def test_create_subscription_checkout_session_allows_after_cancellation():
# Should succeed
result = await create_subscription_checkout_session(
mock_request, user_id='test_user'
mock_subscription_request, user_id='test_user'
)
assert isinstance(result, CreateBillingSessionResponse)
@ -673,10 +710,10 @@ async def test_create_subscription_checkout_session_allows_after_cancellation():
@pytest.mark.asyncio
async def test_create_subscription_checkout_session_success_no_existing():
async def test_create_subscription_checkout_session_success_no_existing(
mock_subscription_request,
):
"""Test successful subscription creation when no existing subscription."""
mock_request = Request(scope={'type': 'http'})
mock_request._base_url = URL('http://test.com/')
mock_session_obj = MagicMock()
mock_session_obj.url = 'https://checkout.stripe.com/test-session'
@ -696,6 +733,7 @@ async def test_create_subscription_checkout_session_success_no_existing():
'server.routes.billing.SUBSCRIPTION_PRICE_DATA',
{'MONTHLY_SUBSCRIPTION': {'unit_amount': 2000}},
),
patch('server.routes.billing.validate_saas_environment'),
):
# Setup mock session to return no existing subscription
mock_session = MagicMock()
@ -704,7 +742,7 @@ async def test_create_subscription_checkout_session_success_no_existing():
# Should succeed
result = await create_subscription_checkout_session(
mock_request, user_id='test_user'
mock_subscription_request, user_id='test_user'
)
assert isinstance(result, CreateBillingSessionResponse)

View File

@ -1,7 +1,9 @@
from unittest import TestCase, mock
from unittest.mock import MagicMock, patch
from integrations.github.github_view import GithubFactory, get_oh_labels
from integrations.github.github_view import GithubFactory, GithubIssue, get_oh_labels
from integrations.models import Message, SourceType
from integrations.types import UserData
class TestGithubLabels(TestCase):
@ -75,3 +77,128 @@ class TestGithubCommentCaseInsensitivity(TestCase):
self.assertTrue(GithubFactory.is_issue_comment(message_lower))
self.assertTrue(GithubFactory.is_issue_comment(message_upper))
self.assertTrue(GithubFactory.is_issue_comment(message_mixed))
class TestGithubV1ConversationRouting(TestCase):
"""Test V1 conversation routing logic in GitHub integration."""
def setUp(self):
"""Set up test fixtures."""
# Create a proper UserData instance instead of MagicMock
user_data = UserData(
user_id=123, username='testuser', keycloak_user_id='test-keycloak-id'
)
# Create a mock raw_payload
raw_payload = Message(
source=SourceType.GITHUB,
message={
'payload': {
'action': 'opened',
'issue': {'number': 123},
}
},
)
self.github_issue = GithubIssue(
user_info=user_data,
full_repo_name='test/repo',
issue_number=123,
installation_id=456,
conversation_id='test-conversation-id',
should_extract=True,
send_summary_instruction=False,
is_public_repo=True,
raw_payload=raw_payload,
uuid='test-uuid',
title='Test Issue',
description='Test issue description',
previous_comments=[],
)
@patch('integrations.github.github_view.get_user_v1_enabled_setting')
@patch.object(GithubIssue, '_create_v0_conversation')
@patch.object(GithubIssue, '_create_v1_conversation')
async def test_create_new_conversation_routes_to_v0_when_disabled(
self, mock_create_v1, mock_create_v0, mock_get_v1_setting
):
"""Test that conversation creation routes to V0 when v1_enabled is False."""
# Mock v1_enabled as False
mock_get_v1_setting.return_value = False
mock_create_v0.return_value = None
mock_create_v1.return_value = None
# Mock parameters
jinja_env = MagicMock()
git_provider_tokens = MagicMock()
conversation_metadata = MagicMock()
# Call the method
await self.github_issue.create_new_conversation(
jinja_env, git_provider_tokens, conversation_metadata
)
# Verify V0 was called and V1 was not
mock_create_v0.assert_called_once_with(
jinja_env, git_provider_tokens, conversation_metadata
)
mock_create_v1.assert_not_called()
@patch('integrations.github.github_view.get_user_v1_enabled_setting')
@patch.object(GithubIssue, '_create_v0_conversation')
@patch.object(GithubIssue, '_create_v1_conversation')
async def test_create_new_conversation_routes_to_v1_when_enabled(
self, mock_create_v1, mock_create_v0, mock_get_v1_setting
):
"""Test that conversation creation routes to V1 when v1_enabled is True."""
# Mock v1_enabled as True
mock_get_v1_setting.return_value = True
mock_create_v0.return_value = None
mock_create_v1.return_value = None
# Mock parameters
jinja_env = MagicMock()
git_provider_tokens = MagicMock()
conversation_metadata = MagicMock()
# Call the method
await self.github_issue.create_new_conversation(
jinja_env, git_provider_tokens, conversation_metadata
)
# Verify V1 was called and V0 was not
mock_create_v1.assert_called_once_with(
jinja_env, git_provider_tokens, conversation_metadata
)
mock_create_v0.assert_not_called()
@patch('integrations.github.github_view.get_user_v1_enabled_setting')
@patch.object(GithubIssue, '_create_v0_conversation')
@patch.object(GithubIssue, '_create_v1_conversation')
async def test_create_new_conversation_fallback_on_v1_setting_error(
self, mock_create_v1, mock_create_v0, mock_get_v1_setting
):
"""Test that conversation creation falls back to V0 when _create_v1_conversation fails."""
# Mock v1_enabled as True so V1 is attempted
mock_get_v1_setting.return_value = True
# Mock _create_v1_conversation to raise an exception
mock_create_v1.side_effect = Exception('V1 conversation creation failed')
mock_create_v0.return_value = None
# Mock parameters
jinja_env = MagicMock()
git_provider_tokens = MagicMock()
conversation_metadata = MagicMock()
# Call the method
await self.github_issue.create_new_conversation(
jinja_env, git_provider_tokens, conversation_metadata
)
# Verify V1 was attempted first, then V0 was called as fallback
mock_create_v1.assert_called_once_with(
jinja_env, git_provider_tokens, conversation_metadata
)
mock_create_v0.assert_called_once_with(
jinja_env, git_provider_tokens, conversation_metadata
)

View File

@ -8,8 +8,8 @@ pytestmark = pytest.mark.asyncio
# Mock the call_sync_from_async function to return the result of the function directly
def mock_call_sync_from_async(func):
return func()
def mock_call_sync_from_async(func, *args, **kwargs):
return func(*args, **kwargs)
@pytest.fixture

View File

@ -5,11 +5,11 @@ from unittest.mock import MagicMock
import pytest
from pydantic import SecretStr
from storage.saas_secrets_store import SaasSecretsStore
from storage.stored_user_secrets import StoredUserSecrets
from storage.stored_custom_secrets import StoredCustomSecrets
from openhands.core.config.openhands_config import OpenHandsConfig
from openhands.integrations.provider import CustomSecret
from openhands.storage.data_models.user_secrets import UserSecrets
from openhands.storage.data_models.secrets import Secrets
@pytest.fixture
@ -27,8 +27,8 @@ def secrets_store(session_maker, mock_config):
class TestSaasSecretsStore:
@pytest.mark.asyncio
async def test_store_and_load(self, secrets_store):
# Create a UserSecrets object with some test data
user_secrets = UserSecrets(
# Create a Secrets object with some test data
user_secrets = Secrets(
custom_secrets=MappingProxyType(
{
'api_token': CustomSecret.from_value(
@ -60,8 +60,8 @@ class TestSaasSecretsStore:
@pytest.mark.asyncio
async def test_encryption_decryption(self, secrets_store):
# Create a UserSecrets object with sensitive data
user_secrets = UserSecrets(
# Create a Secrets object with sensitive data
user_secrets = Secrets(
custom_secrets=MappingProxyType(
{
'api_token': CustomSecret.from_value(
@ -87,8 +87,8 @@ class TestSaasSecretsStore:
# Verify the data is encrypted in the database
with secrets_store.session_maker() as session:
stored = (
session.query(StoredUserSecrets)
.filter(StoredUserSecrets.keycloak_user_id == 'user-id')
session.query(StoredCustomSecrets)
.filter(StoredCustomSecrets.keycloak_user_id == 'user-id')
.first()
)
@ -154,7 +154,7 @@ class TestSaasSecretsStore:
@pytest.mark.asyncio
async def test_update_existing_secrets(self, secrets_store):
# Create and store initial secrets
initial_secrets = UserSecrets(
initial_secrets = Secrets(
custom_secrets=MappingProxyType(
{
'api_token': CustomSecret.from_value(
@ -169,7 +169,7 @@ class TestSaasSecretsStore:
await secrets_store.store(initial_secrets)
# Create and store updated secrets
updated_secrets = UserSecrets(
updated_secrets = Secrets(
custom_secrets=MappingProxyType(
{
'api_token': CustomSecret.from_value(

View File

@ -8,7 +8,6 @@ from server.constants import (
LITE_LLM_TEAM_ID,
)
from storage.saas_settings_store import SaasSettingsStore
from storage.stored_settings import StoredSettings
from storage.user_settings import UserSettings
from openhands.core.config.openhands_config import OpenHandsConfig
@ -244,7 +243,7 @@ async def test_update_settings_with_litellm_default(
# Check that the URL and most of the JSON payload match what we expect
assert call_args['json']['user_email'] == 'testy@tester.com'
assert call_args['json']['models'] == []
assert call_args['json']['max_budget'] == 20.0
assert call_args['json']['max_budget'] == 10.0
assert call_args['json']['user_id'] == 'user-id'
assert call_args['json']['teams'] == ['test_team']
assert call_args['json']['auto_create_key'] is True
@ -303,26 +302,6 @@ async def test_create_default_settings_require_payment_disabled(
assert settings.language == 'en'
@pytest.mark.asyncio
async def test_create_default_settings_with_existing_llm_key(
settings_store, mock_stripe, mock_github_user, mock_litellm_api, session_maker
):
# Test that existing llm_api_key is preserved and not overwritten with litellm default
with (
patch('storage.saas_settings_store.REQUIRE_PAYMENT', False),
patch('storage.saas_settings_store.LITE_LLM_API_KEY', 'mock-api-key'),
patch('storage.saas_settings_store.session_maker', session_maker),
):
with settings_store.session_maker() as session:
kwargs = {'id': '12345', 'language': 'en', 'llm_api_key': 'existing_key'}
settings_store._encrypt_kwargs(kwargs)
session.merge(StoredSettings(**kwargs))
session.commit()
updated_settings = await settings_store.create_default_settings(None)
assert updated_settings is not None
assert updated_settings.llm_api_key.get_secret_value() == 'test_api_key'
@pytest.mark.asyncio
async def test_create_default_lite_llm_settings_no_api_config(settings_store):
with (

View File

@ -535,3 +535,115 @@ def test_get_api_key_from_header_with_invalid_authorization_format():
# Assert that None was returned
assert api_key is None
def test_get_api_key_from_header_with_x_access_token():
"""Test that get_api_key_from_header extracts API key from X-Access-Token header."""
# Create a mock request with X-Access-Token header
mock_request = MagicMock(spec=Request)
mock_request.headers = {'X-Access-Token': 'access_token_key'}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key was correctly extracted
assert api_key == 'access_token_key'
def test_get_api_key_from_header_priority_authorization_over_x_access_token():
"""Test that Authorization header takes priority over X-Access-Token header."""
# Create a mock request with both headers
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': 'Bearer auth_api_key',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from Authorization header was used
assert api_key == 'auth_api_key'
def test_get_api_key_from_header_priority_x_session_over_x_access_token():
"""Test that X-Session-API-Key header takes priority over X-Access-Token header."""
# Create a mock request with both headers
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'X-Session-API-Key': 'session_api_key',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from X-Session-API-Key header was used
assert api_key == 'session_api_key'
def test_get_api_key_from_header_all_three_headers():
"""Test header priority when all three headers are present."""
# Create a mock request with all three headers
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': 'Bearer auth_api_key',
'X-Session-API-Key': 'session_api_key',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from Authorization header was used (highest priority)
assert api_key == 'auth_api_key'
def test_get_api_key_from_header_invalid_authorization_fallback_to_x_access_token():
"""Test that invalid Authorization header falls back to X-Access-Token."""
# Create a mock request with invalid Authorization header and X-Access-Token
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': 'InvalidFormat api_key',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from X-Access-Token header was used
assert api_key == 'access_token_key'
def test_get_api_key_from_header_empty_headers():
"""Test that empty header values are handled correctly."""
# Create a mock request with empty header values
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': '',
'X-Session-API-Key': '',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that the API key from X-Access-Token header was used
assert api_key == 'access_token_key'
def test_get_api_key_from_header_bearer_with_empty_token():
"""Test that Bearer header with empty token falls back to other headers."""
# Create a mock request with Bearer header with empty token
mock_request = MagicMock(spec=Request)
mock_request.headers = {
'Authorization': 'Bearer ',
'X-Access-Token': 'access_token_key',
}
# Call the function
api_key = get_api_key_from_header(mock_request)
# Assert that empty string from Bearer is returned (current behavior)
# This tests the current implementation behavior
assert api_key == ''

View File

@ -14,7 +14,7 @@ def slack_manager():
@pytest.mark.parametrize(
'message,expected',
[
('All-Hands-AI/Openhands', 'All-Hands-AI/Openhands'),
('OpenHands/Openhands', 'OpenHands/Openhands'),
('deploy repo', 'deploy'),
('use hello world', None),
],

View File

@ -13,7 +13,6 @@ from integrations.stripe_service import (
)
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from storage.stored_settings import Base as StoredBase
from storage.stripe_customer import Base as StripeCustomerBase
from storage.stripe_customer import StripeCustomer
from storage.user_settings import Base as UserBase
@ -22,7 +21,7 @@ from storage.user_settings import Base as UserBase
@pytest.fixture
def engine():
engine = create_engine('sqlite:///:memory:')
StoredBase.metadata.create_all(engine)
UserBase.metadata.create_all(engine)
StripeCustomerBase.metadata.create_all(engine)
return engine

View File

@ -74,8 +74,8 @@ def test_infer_repo_from_message():
# Single GitHub URLs
('Clone https://github.com/demo123/demo1.git', ['demo123/demo1']),
(
'Check out https://github.com/All-Hands-AI/OpenHands.git for details',
['All-Hands-AI/OpenHands'],
'Check out https://github.com/OpenHands/OpenHands.git for details',
['OpenHands/OpenHands'],
),
('Visit https://github.com/microsoft/vscode', ['microsoft/vscode']),
# Single GitLab URLs
@ -92,7 +92,7 @@ def test_infer_repo_from_message():
['atlassian/atlassian-connect-express'],
),
# Single direct owner/repo mentions
('Please deploy the All-Hands-AI/OpenHands repo', ['All-Hands-AI/OpenHands']),
('Please deploy the OpenHands/OpenHands repo', ['OpenHands/OpenHands']),
('I need help with the microsoft/vscode repository', ['microsoft/vscode']),
('Check facebook/react for examples', ['facebook/react']),
('The torvalds/linux kernel', ['torvalds/linux']),

View File

@ -6,14 +6,14 @@ This folder contains code and resources to run experiments and evaluations.
### Setup
Before starting evaluation, follow the instructions [here](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md) to setup your local development environment and LLM.
Before starting evaluation, follow the instructions [here](https://github.com/OpenHands/OpenHands/blob/main/Development.md) to setup your local development environment and LLM.
Once you are done with setup, you can follow the benchmark-specific instructions in each subdirectory of the [evaluation directory](#supported-benchmarks).
Generally these will involve running `run_infer.py` to perform inference with the agents.
### Implementing and Evaluating an Agent
To add an agent to OpenHands, you will need to implement it in the [agenthub directory](https://github.com/All-Hands-AI/OpenHands/tree/main/openhands/agenthub). There is a README there with more information.
To add an agent to OpenHands, you will need to implement it in the [agenthub directory](https://github.com/OpenHands/OpenHands/tree/main/openhands/agenthub). There is a README there with more information.
To evaluate an agent, you can provide the agent's name to the `run_infer.py` program.

View File

@ -109,7 +109,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()

View File

@ -124,7 +124,7 @@ if __name__ == '__main__':
)
args, _ = parser.parse_known_args()
# Check https://github.com/All-Hands-AI/OpenHands/blob/main/evaluation/swe_bench/README.md#configure-openhands-and-your-llm
# Check https://github.com/OpenHands/OpenHands/blob/main/evaluation/swe_bench/README.md#configure-openhands-and-your-llm
# for details of how to set `llm_config`
if args.llm_config:
specified_llm_config = get_llm_config_arg(args.llm_config)

View File

@ -15,7 +15,7 @@ python evaluation/benchmarks/multi_swe_bench/scripts/data/data_change.py
## Docker image download
Please download the multi-swe-bench dokcer images from [here](https://github.com/multi-swe-bench/multi-swe-bench?tab=readme-ov-file#run-evaluation).
Please download the multi-swe-bench docker images from [here](https://github.com/multi-swe-bench/multi-swe-bench?tab=readme-ov-file#run-evaluation).
## Generate patch
@ -47,7 +47,7 @@ For debugging purposes, you can set `export EVAL_SKIP_MAXIMUM_RETRIES_EXCEEDED=t
The results will be generated in evaluation/evaluation_outputs/outputs/XXX/CodeActAgent/YYY/output.jsonl, you can refer to the [example](examples/output.jsonl).
## Runing evaluation
## Running evaluation
First, install [multi-swe-bench](https://github.com/multi-swe-bench/multi-swe-bench).

Some files were not shown because too many files have changed in this diff Show More