diff --git a/.github/scripts/update_pr_description.sh b/.github/scripts/update_pr_description.sh
index 783cf54d82..f1a092d6cf 100755
--- a/.github/scripts/update_pr_description.sh
+++ b/.github/scripts/update_pr_description.sh
@@ -13,12 +13,12 @@ DOCKER_RUN_COMMAND="docker run -it --rm \
-p 3000:3000 \
-v /var/run/docker.sock:/var/run/docker.sock \
--add-host host.docker.internal:host-gateway \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:${SHORT_SHA}-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/openhands/runtime:${SHORT_SHA}-nikolaik \
--name openhands-app-${SHORT_SHA} \
- docker.all-hands.dev/all-hands-ai/openhands:${SHORT_SHA}"
+ docker.all-hands.dev/openhands/openhands:${SHORT_SHA}"
# Define the uvx command
-UVX_RUN_COMMAND="uvx --python 3.12 --from git+https://github.com/All-Hands-AI/OpenHands@${BRANCH_NAME}#subdirectory=openhands-cli openhands"
+UVX_RUN_COMMAND="uvx --python 3.12 --from git+https://github.com/OpenHands/OpenHands@${BRANCH_NAME}#subdirectory=openhands-cli openhands"
# Get the current PR body
PR_BODY=$(gh pr view "$PR_NUMBER" --json body --jq .body)
diff --git a/.github/workflows/dispatch-to-docs.yml b/.github/workflows/dispatch-to-docs.yml
index b784f67392..301cab5fa5 100644
--- a/.github/workflows/dispatch-to-docs.yml
+++ b/.github/workflows/dispatch-to-docs.yml
@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
- repo: ["All-Hands-AI/docs"]
+ repo: ["OpenHands/docs"]
steps:
- name: Push to docs repo
uses: peter-evans/repository-dispatch@v3
diff --git a/.github/workflows/enterprise-preview.yml b/.github/workflows/enterprise-preview.yml
index 9a66fda825..e31222827b 100644
--- a/.github/workflows/enterprise-preview.yml
+++ b/.github/workflows/enterprise-preview.yml
@@ -26,4 +26,4 @@ jobs:
-H "Authorization: Bearer ${{ secrets.PAT_TOKEN }}" \
-H "Accept: application/vnd.github+json" \
-d "{\"ref\": \"main\", \"inputs\": {\"openhandsPrNumber\": \"${{ github.event.pull_request.number }}\", \"deployEnvironment\": \"feature\", \"enterpriseImageTag\": \"pr-${{ github.event.pull_request.number }}\" }}" \
- https://api.github.com/repos/All-Hands-AI/deploy/actions/workflows/deploy.yaml/dispatches
+ https://api.github.com/repos/OpenHands/deploy/actions/workflows/deploy.yaml/dispatches
diff --git a/.github/workflows/ghcr-build.yml b/.github/workflows/ghcr-build.yml
index 7675911076..c84560ab6a 100644
--- a/.github/workflows/ghcr-build.yml
+++ b/.github/workflows/ghcr-build.yml
@@ -252,7 +252,7 @@ jobs:
-H "Authorization: Bearer ${{ secrets.PAT_TOKEN }}" \
-H "Accept: application/vnd.github+json" \
-d "{\"ref\": \"main\", \"inputs\": {\"openhandsPrNumber\": \"${{ github.event.pull_request.number }}\", \"deployEnvironment\": \"feature\", \"enterpriseImageTag\": \"pr-${{ github.event.pull_request.number }}\" }}" \
- https://api.github.com/repos/All-Hands-AI/deploy/actions/workflows/deploy.yaml/dispatches
+ https://api.github.com/repos/OpenHands/deploy/actions/workflows/deploy.yaml/dispatches
# Run unit tests with the Docker runtime Docker images as root
test_runtime_root:
diff --git a/.github/workflows/openhands-resolver.yml b/.github/workflows/openhands-resolver.yml
index 1012df45ca..cfb7298974 100644
--- a/.github/workflows/openhands-resolver.yml
+++ b/.github/workflows/openhands-resolver.yml
@@ -201,7 +201,7 @@ jobs:
issue_number: ${{ env.ISSUE_NUMBER }},
owner: context.repo.owner,
repo: context.repo.repo,
- body: `[OpenHands](https://github.com/All-Hands-AI/OpenHands) started fixing the ${issueType}! You can monitor the progress [here](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}).`
+ body: `[OpenHands](https://github.com/OpenHands/OpenHands) started fixing the ${issueType}! You can monitor the progress [here](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}).`
});
- name: Install OpenHands
@@ -233,7 +233,7 @@ jobs:
if (isExperimentalLabel || isIssueCommentExperimental || isReviewCommentExperimental) {
console.log("Installing experimental OpenHands...");
- await exec.exec("pip install git+https://github.com/all-hands-ai/openhands.git");
+ await exec.exec("pip install git+https://github.com/openhands/openhands.git");
} else {
console.log("Installing from requirements.txt...");
diff --git a/.github/workflows/run-eval.yml b/.github/workflows/run-eval.yml
index 6bca1df097..d586a0b0a6 100644
--- a/.github/workflows/run-eval.yml
+++ b/.github/workflows/run-eval.yml
@@ -101,7 +101,7 @@ jobs:
-H "Authorization: Bearer ${{ secrets.PAT_TOKEN }}" \
-H "Accept: application/vnd.github+json" \
-d "{\"ref\": \"main\", \"inputs\": {\"github-repo\": \"${{ steps.eval_params.outputs.repo_url }}\", \"github-branch\": \"${{ steps.eval_params.outputs.eval_branch }}\", \"pr-number\": \"${PR_NUMBER}\", \"eval-instances\": \"${{ steps.eval_params.outputs.eval_instances }}\"}}" \
- https://api.github.com/repos/All-Hands-AI/evaluation/actions/workflows/create-branch.yml/dispatches
+ https://api.github.com/repos/OpenHands/evaluation/actions/workflows/create-branch.yml/dispatches
# Send Slack message
if [[ "${{ github.event_name }}" == "pull_request" ]]; then
diff --git a/.openhands/microagents/repo.md b/.openhands/microagents/repo.md
index 9fbe104109..ceb87bc2f7 100644
--- a/.openhands/microagents/repo.md
+++ b/.openhands/microagents/repo.md
@@ -83,6 +83,116 @@ VSCode Extension:
- Use `vscode.window.createOutputChannel()` for debug logging instead of `showErrorMessage()` popups
- Pre-commit process runs both frontend and backend checks when committing extension changes
+## Enterprise Directory
+
+The `enterprise/` directory contains additional functionality that extends the open-source OpenHands codebase. This includes:
+- Authentication and user management (Keycloak integration)
+- Database migrations (Alembic)
+- Integration services (GitHub, GitLab, Jira, Linear, Slack)
+- Billing and subscription management (Stripe)
+- Telemetry and analytics (PostHog, custom metrics framework)
+
+### Enterprise Development Setup
+
+**Prerequisites:**
+- Python 3.12
+- Poetry (for dependency management)
+- Node.js 22.x (for frontend)
+- Docker (optional)
+
+**Setup Steps:**
+1. First, build the main OpenHands project: `make build`
+2. Then install enterprise dependencies: `cd enterprise && poetry install --with dev,test` (This can take a very long time. Be patient.)
+3. Set up enterprise pre-commit hooks: `poetry run pre-commit install --config ./dev_config/python/.pre-commit-config.yaml`
+
+**Running Enterprise Tests:**
+```bash
+# Enterprise unit tests (full suite)
+PYTHONPATH=".:$PYTHONPATH" poetry run --project=enterprise pytest --forked -n auto -s -p no:ddtrace -p no:ddtrace.pytest_bdd -p no:ddtrace.pytest_benchmark ./enterprise/tests/unit --cov=enterprise --cov-branch
+
+# Test specific modules (faster for development)
+cd enterprise
+PYTHONPATH=".:$PYTHONPATH" poetry run pytest tests/unit/telemetry/ --confcutdir=tests/unit/telemetry
+
+# Enterprise linting (IMPORTANT: use --show-diff-on-failure to match GitHub CI)
+poetry run pre-commit run --all-files --show-diff-on-failure --config ./dev_config/python/.pre-commit-config.yaml
+```
+
+**Running Enterprise Server:**
+```bash
+cd enterprise
+make start-backend # Development mode with hot reload
+# or
+make run # Full application (backend + frontend)
+```
+
+**Key Configuration Files:**
+- `enterprise/pyproject.toml` - Enterprise-specific dependencies
+- `enterprise/Makefile` - Enterprise build and run commands
+- `enterprise/dev_config/python/` - Linting and type checking configuration
+- `enterprise/migrations/` - Database migration files
+
+**Database Migrations:**
+Enterprise uses Alembic for database migrations. When making schema changes:
+1. Create migration files in `enterprise/migrations/versions/`
+2. Test migrations thoroughly
+3. The CI will check for migration conflicts on PRs
+
+**Integration Development:**
+The enterprise codebase includes integrations for:
+- **GitHub** - PR management, webhooks, app installations
+- **GitLab** - Similar to GitHub but for GitLab instances
+- **Jira** - Issue tracking and project management
+- **Linear** - Modern issue tracking
+- **Slack** - Team communication and notifications
+
+Each integration follows a consistent pattern with service classes, storage models, and API endpoints.
+
+**Important Notes:**
+- Enterprise code is licensed under Polyform Free Trial License (30-day limit)
+- The enterprise server extends the OSS server through dynamic imports
+- Database changes require careful migration planning in `enterprise/migrations/`
+- Always test changes in both OSS and enterprise contexts
+- Use the enterprise-specific Makefile commands for development
+
+**Enterprise Testing Best Practices:**
+
+**Database Testing:**
+- Use SQLite in-memory databases (`sqlite:///:memory:`) for unit tests instead of real PostgreSQL
+- Create module-specific `conftest.py` files with database fixtures
+- Mock external database connections in unit tests to avoid dependency on running services
+- Use real database connections only for integration tests
+
+**Import Patterns:**
+- Use relative imports without `enterprise.` prefix in enterprise code
+- Example: `from storage.database import session_maker` not `from enterprise.storage.database import session_maker`
+- This ensures code works in both OSS and enterprise contexts
+
+**Test Structure:**
+- Place tests in `enterprise/tests/unit/` following the same structure as the source code
+- Use `--confcutdir=tests/unit/[module]` when testing specific modules
+- Create comprehensive fixtures for complex objects (databases, external services)
+- Write platform-agnostic tests (avoid hardcoded OS-specific assertions)
+
+**Mocking Strategy:**
+- Use `AsyncMock` for async operations and `MagicMock` for complex objects
+- Mock all external dependencies (databases, APIs, file systems) in unit tests
+- Use `patch` with correct import paths (e.g., `telemetry.registry.logger` not `enterprise.telemetry.registry.logger`)
+- Test both success and failure scenarios with proper error handling
+
+**Coverage Goals:**
+- Aim for 90%+ test coverage on new enterprise modules
+- Focus on critical business logic and error handling paths
+- Use `--cov-report=term-missing` to identify uncovered lines
+
+**Troubleshooting:**
+- If tests fail, ensure all dependencies are installed: `poetry install --with dev,test`
+- For database issues, check migration status and run migrations if needed
+- For frontend issues, ensure the main OpenHands frontend is built: `make build`
+- Check logs in the `logs/` directory for runtime issues
+- If tests fail with import errors, verify `PYTHONPATH=".:$PYTHONPATH"` is set
+- **If GitHub CI fails but local linting passes**: Always use `--show-diff-on-failure` flag to match CI behavior exactly
+
## Template for Github Pull Request
If you are starting a pull request (PR), please follow the template in `.github/pull_request_template.md`.
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 046d897c0d..aa03899d44 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -124,7 +124,7 @@ These Slack etiquette guidelines are designed to foster an inclusive, respectful
- Post questions or discussions in the most relevant channel (e.g., for [slack - #general](https://openhands-ai.slack.com/archives/C06P5NCGSFP) for general topics, [slack - #questions](https://openhands-ai.slack.com/archives/C06U8UTKSAD) for queries/questions.
- When asking for help or raising issues, include necessary details like links, screenshots, or clear explanations to provide context.
- Keep discussions in public channels whenever possible to allow others to benefit from the conversation, unless the matter is sensitive or private.
-- Always adhere to [our standards](https://github.com/All-Hands-AI/OpenHands/blob/main/CODE_OF_CONDUCT.md#our-standards) to ensure a welcoming and collaborative environment.
+- Always adhere to [our standards](https://github.com/OpenHands/OpenHands/blob/main/CODE_OF_CONDUCT.md#our-standards) to ensure a welcoming and collaborative environment.
- If you choose to mute a channel, consider setting up alerts for topics that still interest you to stay engaged. For Slack, Go to Settings → Notifications → My Keywords to add specific keywords that will notify you when mentioned. For example, if you're here for discussions about LLMs, mute the channel if it’s too busy, but set notifications to alert you only when “LLMs” appears in messages.
## Attribution
diff --git a/COMMUNITY.md b/COMMUNITY.md
index 6edb4dff31..1c49b3932e 100644
--- a/COMMUNITY.md
+++ b/COMMUNITY.md
@@ -8,7 +8,7 @@ If this resonates with you, we'd love to have you join us in our quest!
## 🤝 How to Join
-Check out our [How to Join the Community section.](https://github.com/All-Hands-AI/OpenHands?tab=readme-ov-file#-how-to-join-the-community)
+Check out our [How to Join the Community section.](https://github.com/OpenHands/OpenHands?tab=readme-ov-file#-how-to-join-the-community)
## 💪 Becoming a Contributor
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 39c7341dcf..a605abaf64 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -13,15 +13,15 @@ To understand the codebase, please refer to the README in each module:
## Setting up Your Development Environment
-We have a separate doc [Development.md](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md) that tells you how to set up a development workflow.
+We have a separate doc [Development.md](https://github.com/OpenHands/OpenHands/blob/main/Development.md) that tells you how to set up a development workflow.
## How Can I Contribute?
There are many ways that you can contribute:
-1. **Download and use** OpenHands, and send [issues](https://github.com/All-Hands-AI/OpenHands/issues) when you encounter something that isn't working or a feature that you'd like to see.
+1. **Download and use** OpenHands, and send [issues](https://github.com/OpenHands/OpenHands/issues) when you encounter something that isn't working or a feature that you'd like to see.
2. **Send feedback** after each session by [clicking the thumbs-up thumbs-down buttons](https://docs.all-hands.dev/usage/feedback), so we can see where things are working and failing, and also build an open dataset for training code agents.
-3. **Improve the Codebase** by sending [PRs](#sending-pull-requests-to-openhands) (see details below). In particular, we have some [good first issues](https://github.com/All-Hands-AI/OpenHands/labels/good%20first%20issue) that may be ones to start on.
+3. **Improve the Codebase** by sending [PRs](#sending-pull-requests-to-openhands) (see details below). In particular, we have some [good first issues](https://github.com/OpenHands/OpenHands/labels/good%20first%20issue) that may be ones to start on.
## What Can I Build?
Here are a few ways you can help improve the codebase.
@@ -35,7 +35,7 @@ of the application, please open an issue first, or better, join the #eng-ui-ux c
to gather consensus from our design team first.
#### Improving the agent
-Our main agent is the CodeAct agent. You can [see its prompts here](https://github.com/All-Hands-AI/OpenHands/tree/main/openhands/agenthub/codeact_agent).
+Our main agent is the CodeAct agent. You can [see its prompts here](https://github.com/OpenHands/OpenHands/tree/main/openhands/agenthub/codeact_agent).
Changes to these prompts, and to the underlying behavior in Python, can have a huge impact on user experience.
You can try modifying the prompts to see how they change the behavior of the agent as you use the app
@@ -54,7 +54,7 @@ The agent needs a place to run code and commands. When you run OpenHands on your
to do this by default. But there are other ways of creating a sandbox for the agent.
If you work for a company that provides a cloud-based runtime, you could help us add support for that runtime
-by implementing the [interface specified here](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/runtime/base.py).
+by implementing the [interface specified here](https://github.com/OpenHands/OpenHands/blob/main/openhands/runtime/base.py).
#### Testing
When you write code, it is also good to write tests. Please navigate to the [`./tests`](./tests) folder to see existing test suites.
@@ -84,7 +84,7 @@ For example, a PR title could be:
- `refactor: modify package path`
- `feat(frontend): xxxx`, where `(frontend)` means that this PR mainly focuses on the frontend component.
-You may also check out previous PRs in the [PR list](https://github.com/All-Hands-AI/OpenHands/pulls).
+You may also check out previous PRs in the [PR list](https://github.com/OpenHands/OpenHands/pulls).
### Pull Request description
- If your PR is small (such as a typo fix), you can go brief.
@@ -97,7 +97,7 @@ please include a short message that we can add to our changelog.
### Opening Issues
-If you notice any bugs or have any feature requests please open them via the [issues page](https://github.com/All-Hands-AI/OpenHands/issues). We will triage based on how critical the bug is or how potentially useful the improvement is, discuss, and implement the ones that the community has interest/effort for.
+If you notice any bugs or have any feature requests please open them via the [issues page](https://github.com/OpenHands/OpenHands/issues). We will triage based on how critical the bug is or how potentially useful the improvement is, discuss, and implement the ones that the community has interest/effort for.
Further, if you see an issue you like, please leave a "thumbs-up" or a comment, which will help us prioritize.
diff --git a/CREDITS.md b/CREDITS.md
index 873742b7e0..3dc74fe103 100644
--- a/CREDITS.md
+++ b/CREDITS.md
@@ -2,7 +2,7 @@
## Contributors
-We would like to thank all the [contributors](https://github.com/All-Hands-AI/OpenHands/graphs/contributors) who have helped make OpenHands possible. We greatly appreciate your dedication and hard work.
+We would like to thank all the [contributors](https://github.com/OpenHands/OpenHands/graphs/contributors) who have helped make OpenHands possible. We greatly appreciate your dedication and hard work.
## Open Source Projects
@@ -14,7 +14,7 @@ OpenHands includes and adapts the following open source projects. We are gratefu
#### [Aider](https://github.com/paul-gauthier/aider)
- License: Apache License 2.0
- - Description: AI pair programming tool. OpenHands has adapted and integrated its linter module for code-related tasks in [`agentskills utilities`](https://github.com/All-Hands-AI/OpenHands/tree/main/openhands/runtime/plugins/agent_skills/utils/aider)
+ - Description: AI pair programming tool. OpenHands has adapted and integrated its linter module for code-related tasks in [`agentskills utilities`](https://github.com/OpenHands/OpenHands/tree/main/openhands/runtime/plugins/agent_skills/utils/aider)
#### [BrowserGym](https://github.com/ServiceNow/BrowserGym)
- License: Apache License 2.0
diff --git a/Development.md b/Development.md
index 98e7f827f9..31451091bb 100644
--- a/Development.md
+++ b/Development.md
@@ -2,7 +2,7 @@
This guide is for people working on OpenHands and editing the source code.
If you wish to contribute your changes, check out the
-[CONTRIBUTING.md](https://github.com/All-Hands-AI/OpenHands/blob/main/CONTRIBUTING.md)
+[CONTRIBUTING.md](https://github.com/OpenHands/OpenHands/blob/main/CONTRIBUTING.md)
on how to clone and setup the project initially before moving on. Otherwise,
you can clone the OpenHands project directly.
@@ -193,7 +193,7 @@ Here's a guide to the important documentation files in the repository:
- [/README.md](./README.md): Main project overview, features, and basic setup instructions
- [/Development.md](./Development.md) (this file): Comprehensive guide for developers working on OpenHands
- [/CONTRIBUTING.md](./CONTRIBUTING.md): Guidelines for contributing to the project, including code style and PR process
-- [/docs/DOC_STYLE_GUIDE.md](./docs/DOC_STYLE_GUIDE.md): Standards for writing and maintaining project documentation
+- [DOC_STYLE_GUIDE.md](https://github.com/All-Hands-AI/docs/blob/main/openhands/DOC_STYLE_GUIDE.md): Standards for writing and maintaining project documentation
- [/openhands/README.md](./openhands/README.md): Details about the backend Python implementation
- [/frontend/README.md](./frontend/README.md): Frontend React application setup and development guide
- [/containers/README.md](./containers/README.md): Information about Docker containers and deployment
diff --git a/README.md b/README.md
index a336a38635..96fe34aee9 100644
--- a/README.md
+++ b/README.md
@@ -7,26 +7,26 @@
@@ -119,7 +119,7 @@ system requirements and more information.
> It is not appropriate for multi-tenant deployments where multiple users share the same instance. There is no built-in authentication, isolation, or scalability.
>
> If you're interested in running OpenHands in a multi-tenant environment, check out the source-available, commercially-licensed
-> [OpenHands Cloud Helm Chart](https://github.com/all-Hands-AI/OpenHands-cloud)
+> [OpenHands Cloud Helm Chart](https://github.com/openHands/OpenHands-cloud)
You can [connect OpenHands to your local filesystem](https://docs.all-hands.dev/usage/runtimes/docker#connecting-to-your-filesystem),
interact with it via a [friendly CLI](https://docs.all-hands.dev/usage/how-to/cli-mode),
@@ -128,7 +128,7 @@ or run it on tagged issues with [a github action](https://docs.all-hands.dev/usa
Visit [Running OpenHands](https://docs.all-hands.dev/usage/installation) for more information and setup instructions.
-If you want to modify the OpenHands source code, check out [Development.md](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md).
+If you want to modify the OpenHands source code, check out [Development.md](https://github.com/OpenHands/OpenHands/blob/main/Development.md).
Having issues? The [Troubleshooting Guide](https://docs.all-hands.dev/usage/troubleshooting) can help.
@@ -146,17 +146,17 @@ OpenHands is a community-driven project, and we welcome contributions from every
through Slack, so this is the best place to start, but we also are happy to have you contact us on Github:
- [Join our Slack workspace](https://all-hands.dev/joinslack) - Here we talk about research, architecture, and future development.
-- [Read or post Github Issues](https://github.com/All-Hands-AI/OpenHands/issues) - Check out the issues we're working on, or add your own ideas.
+- [Read or post Github Issues](https://github.com/OpenHands/OpenHands/issues) - Check out the issues we're working on, or add your own ideas.
See more about the community in [COMMUNITY.md](./COMMUNITY.md) or find details on contributing in [CONTRIBUTING.md](./CONTRIBUTING.md).
## 📈 Progress
-See the monthly OpenHands roadmap [here](https://github.com/orgs/All-Hands-AI/projects/1) (updated at the maintainer's meeting at the end of each month).
+See the monthly OpenHands roadmap [here](https://github.com/orgs/OpenHands/projects/1) (updated at the maintainer's meeting at the end of each month).
-
-
+
+
diff --git a/config.template.toml b/config.template.toml
index e7b7836dcd..68b4eed281 100644
--- a/config.template.toml
+++ b/config.template.toml
@@ -189,7 +189,7 @@ model = "gpt-4o"
# Whether to use native tool calling if supported by the model. Can be true, false, or None by default, which chooses the model's default behavior based on the evaluation.
# ATTENTION: Based on evaluation, enabling native function calling may lead to worse results
# in some scenarios. Use with caution and consider testing with your specific use case.
-# https://github.com/All-Hands-AI/OpenHands/pull/4711
+# https://github.com/OpenHands/OpenHands/pull/4711
#native_tool_calling = None
diff --git a/containers/app/config.sh b/containers/app/config.sh
index 6ed9ac329e..41d00c84a0 100644
--- a/containers/app/config.sh
+++ b/containers/app/config.sh
@@ -1,4 +1,4 @@
DOCKER_REGISTRY=ghcr.io
-DOCKER_ORG=all-hands-ai
+DOCKER_ORG=openhands
DOCKER_IMAGE=openhands
DOCKER_BASE_DIR="."
diff --git a/containers/runtime/config.sh b/containers/runtime/config.sh
index 99d2eb66cc..d0250b6dfe 100644
--- a/containers/runtime/config.sh
+++ b/containers/runtime/config.sh
@@ -1,5 +1,5 @@
DOCKER_REGISTRY=ghcr.io
-DOCKER_ORG=all-hands-ai
+DOCKER_ORG=openhands
DOCKER_BASE_DIR="./containers/runtime"
DOCKER_IMAGE=runtime
# These variables will be appended by the runtime_build.py script
diff --git a/docker-compose.yml b/docker-compose.yml
index 8e3767518e..f88a2d1c7f 100644
--- a/docker-compose.yml
+++ b/docker-compose.yml
@@ -7,7 +7,7 @@ services:
image: openhands:latest
container_name: openhands-app-${DATE:-}
environment:
- - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/all-hands-ai/runtime:0.59-nikolaik}
+ - SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/openhands/runtime:0.59-nikolaik}
#- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} # enable this only if you want a specific non-root sandbox user but you will have to manually adjust permissions of ~/.openhands for this user
- WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace}
ports:
diff --git a/enterprise/README.md b/enterprise/README.md
index a70abc39a8..8be6f3bd8a 100644
--- a/enterprise/README.md
+++ b/enterprise/README.md
@@ -8,7 +8,7 @@
This directory contains the enterprise server used by [OpenHands Cloud](https://github.com/All-Hands-AI/OpenHands-Cloud/). The official, public version of OpenHands Cloud is available at
[app.all-hands.dev](https://app.all-hands.dev).
-You may also want to check out the MIT-licensed [OpenHands](https://github.com/All-Hands-AI/OpenHands)
+You may also want to check out the MIT-licensed [OpenHands](https://github.com/OpenHands/OpenHands)
## Extension of OpenHands (OSS)
@@ -16,7 +16,7 @@ The code in `/enterprise` directory builds on top of open source (OSS) code, ext
- Enterprise stacks on top of OSS. For example, the middleware in enterprise is stacked right on top of the middlewares in OSS. In `SAAS`, the middleware from BOTH repos will be present and running (which can sometimes cause conflicts)
-- Enterprise overrides the implementation in OSS (only one is present at a time). For example, the server config SaasServerConfig which overrides [`ServerConfig`](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/server/config/server_config.py#L8) on OSS. This is done through dynamic imports ([see here](https://github.com/All-Hands-AI/OpenHands/blob/main/openhands/server/config/server_config.py#L37-#L45))
+- Enterprise overrides the implementation in OSS (only one is present at a time). For example, the server config SaasServerConfig which overrides [`ServerConfig`](https://github.com/OpenHands/OpenHands/blob/main/openhands/server/config/server_config.py#L8) on OSS. This is done through dynamic imports ([see here](https://github.com/OpenHands/OpenHands/blob/main/openhands/server/config/server_config.py#L37-#L45))
Key areas that change on `SAAS` are
diff --git a/enterprise/doc/design-doc/openhands-enterprise-telemetry-design.md b/enterprise/doc/design-doc/openhands-enterprise-telemetry-design.md
new file mode 100644
index 0000000000..4fc9f72c00
--- /dev/null
+++ b/enterprise/doc/design-doc/openhands-enterprise-telemetry-design.md
@@ -0,0 +1,856 @@
+# OpenHands Enterprise Usage Telemetry Service
+
+## Table of Contents
+
+1. [Introduction](#1-introduction)
+ - 1.1 [Problem Statement](#11-problem-statement)
+ - 1.2 [Proposed Solution](#12-proposed-solution)
+2. [User Interface](#2-user-interface)
+ - 2.1 [License Warning Banner](#21-license-warning-banner)
+ - 2.2 [Administrator Experience](#22-administrator-experience)
+3. [Other Context](#3-other-context)
+ - 3.1 [Replicated Platform Integration](#31-replicated-platform-integration)
+ - 3.2 [Administrator Email Detection Strategy](#32-administrator-email-detection-strategy)
+ - 3.3 [Metrics Collection Framework](#33-metrics-collection-framework)
+4. [Technical Design](#4-technical-design)
+ - 4.1 [Database Schema](#41-database-schema)
+ - 4.1.1 [Telemetry Metrics Table](#411-telemetry-metrics-table)
+ - 4.1.2 [Telemetry Identity Table](#412-telemetry-identity-table)
+ - 4.2 [Metrics Collection Framework](#42-metrics-collection-framework)
+ - 4.2.1 [Base Collector Interface](#421-base-collector-interface)
+ - 4.2.2 [Collector Registry](#422-collector-registry)
+ - 4.2.3 [Example Collector Implementation](#423-example-collector-implementation)
+ - 4.3 [Collection and Upload System](#43-collection-and-upload-system)
+ - 4.3.1 [Metrics Collection Processor](#431-metrics-collection-processor)
+ - 4.3.2 [Replicated Upload Processor](#432-replicated-upload-processor)
+ - 4.4 [License Warning System](#44-license-warning-system)
+ - 4.4.1 [License Status Endpoint](#441-license-status-endpoint)
+ - 4.4.2 [UI Integration](#442-ui-integration)
+ - 4.5 [Cronjob Configuration](#45-cronjob-configuration)
+ - 4.5.1 [Collection Cronjob](#451-collection-cronjob)
+ - 4.5.2 [Upload Cronjob](#452-upload-cronjob)
+5. [Implementation Plan](#5-implementation-plan)
+ - 5.1 [Database Schema and Models (M1)](#51-database-schema-and-models-m1)
+ - 5.1.1 [OpenHands - Database Migration](#511-openhands---database-migration)
+ - 5.1.2 [OpenHands - Model Tests](#512-openhands---model-tests)
+ - 5.2 [Metrics Collection Framework (M2)](#52-metrics-collection-framework-m2)
+ - 5.2.1 [OpenHands - Core Collection Framework](#521-openhands---core-collection-framework)
+ - 5.2.2 [OpenHands - Example Collectors](#522-openhands---example-collectors)
+ - 5.2.3 [OpenHands - Framework Tests](#523-openhands---framework-tests)
+ - 5.3 [Collection and Upload Processors (M3)](#53-collection-and-upload-processors-m3)
+ - 5.3.1 [OpenHands - Collection Processor](#531-openhands---collection-processor)
+ - 5.3.2 [OpenHands - Upload Processor](#532-openhands---upload-processor)
+ - 5.3.3 [OpenHands - Integration Tests](#533-openhands---integration-tests)
+ - 5.4 [License Warning API (M4)](#54-license-warning-api-m4)
+ - 5.4.1 [OpenHands - License Status API](#541-openhands---license-status-api)
+ - 5.4.2 [OpenHands - API Integration](#542-openhands---api-integration)
+ - 5.5 [UI Warning Banner (M5)](#55-ui-warning-banner-m5)
+ - 5.5.1 [OpenHands - UI Warning Banner](#551-openhands---ui-warning-banner)
+ - 5.5.2 [OpenHands - UI Integration](#552-openhands---ui-integration)
+ - 5.6 [Helm Chart Deployment Configuration (M6)](#56-helm-chart-deployment-configuration-m6)
+ - 5.6.1 [OpenHands-Cloud - Cronjob Manifests](#561-openhands-cloud---cronjob-manifests)
+ - 5.6.2 [OpenHands-Cloud - Configuration Management](#562-openhands-cloud---configuration-management)
+ - 5.7 [Documentation and Enhanced Collectors (M7)](#57-documentation-and-enhanced-collectors-m7)
+ - 5.7.1 [OpenHands - Advanced Collectors](#571-openhands---advanced-collectors)
+ - 5.7.2 [OpenHands - Monitoring and Testing](#572-openhands---monitoring-and-testing)
+ - 5.7.3 [OpenHands - Technical Documentation](#573-openhands---technical-documentation)
+
+## 1. Introduction
+
+### 1.1 Problem Statement
+
+OpenHands Enterprise (OHE) helm charts are publicly available but not open source, creating a visibility gap for the sales team. Unknown users can install and use OHE without the vendor's knowledge, preventing proper customer engagement and sales pipeline management. Without usage telemetry, the vendor cannot identify potential customers, track installation health, or proactively support users who may need assistance.
+
+### 1.2 Proposed Solution
+
+We propose implementing a comprehensive telemetry service that leverages the Replicated metrics platform and Python SDK to track OHE installations and usage. The solution provides automatic customer discovery, instance monitoring, and usage metrics collection while maintaining a clear license compliance pathway.
+
+The system consists of three main components: (1) a pluggable metrics collection framework that allows developers to easily define and register custom metrics collectors, (2) automated cronjobs that periodically collect metrics and upload them to Replicated's vendor portal, and (3) a license compliance warning system that displays UI notifications when telemetry uploads fail, indicating potential license expiration.
+
+The design ensures that telemetry cannot be easily disabled without breaking core OHE functionality by tying the warning system to environment variables that are essential for OHE operation. This approach balances user transparency with business requirements for customer visibility.
+
+## 2. User Interface
+
+### 2.1 License Warning Banner
+
+When telemetry uploads fail for more than 4 days, users will see a prominent warning banner in the OpenHands Enterprise UI:
+
+```
+⚠️ Your OpenHands Enterprise license will expire in 30 days. Please contact support if this issue persists.
+```
+
+The banner appears at the top of all pages and cannot be permanently dismissed while the condition persists. Users can temporarily dismiss it, but it will reappear on page refresh until telemetry uploads resume successfully.
+
+### 2.2 Administrator Experience
+
+System administrators will not need to configure the telemetry system manually. The service automatically:
+
+1. **Detects OHE installations** using existing required environment variables (`GITHUB_APP_CLIENT_ID`, `KEYCLOAK_SERVER_URL`, etc.)
+
+2. **Generates unique customer identifiers** using administrator contact information:
+ - Customer email: Determined by the following priority order:
+ 1. `OPENHANDS_ADMIN_EMAIL` environment variable (if set in helm values)
+ 2. Email of the first user who accepted Terms of Service (earliest `accepted_tos` timestamp)
+ - Instance ID: Automatically generated by Replicated SDK using machine fingerprinting (IOPlatformUUID on macOS, D-Bus machine ID on Linux, Machine GUID on Windows)
+ - **No Fallback**: If neither email source is available, telemetry collection is skipped until at least one user exists
+
+3. **Collects and uploads metrics transparently** in the background via weekly collection and daily upload cronjobs
+
+4. **Displays warnings only when necessary** for license compliance - no notifications appear during normal operation
+
+## 3. Other Context
+
+### 3.1 Replicated Platform Integration
+
+The Replicated platform provides vendor-hosted infrastructure for collecting customer and instance telemetry. The Python SDK handles authentication, state management, and reliable metric delivery. Key concepts:
+
+- **Customer**: Represents a unique OHE installation, identified by email or installation fingerprint
+- **Instance**: Represents a specific deployment of OHE for a customer
+- **Metrics**: Custom key-value data points collected from the installation
+- **Status**: Instance health indicators (running, degraded, updating, etc.)
+
+The SDK automatically handles machine fingerprinting, local state caching, and retry logic for failed uploads.
+
+### 3.2 Administrator Email Detection Strategy
+
+To identify the appropriate administrator contact for sales outreach, the system uses a three-tier approach that avoids performance penalties on user authentication:
+
+**Tier 1: Explicit Configuration** - The `OPENHANDS_ADMIN_EMAIL` environment variable allows administrators to explicitly specify the contact email during deployment.
+
+**Tier 2: First Active User Detection** - If no explicit email is configured, the system identifies the first user who accepted Terms of Service (earliest `accepted_tos` timestamp with a valid email). This represents the first person to actively engage with the system and is very likely the administrator or installer.
+
+**No Fallback Needed** - If neither email source is available, telemetry collection is skipped entirely. This ensures we only report meaningful usage data when there are actual active users.
+
+**Performance Optimization**: The admin email determination is performed only during telemetry upload attempts, ensuring zero performance impact on user login flows.
+
+### 3.3 Metrics Collection Framework
+
+The proposed collector framework allows developers to define metrics in a single file change:
+
+```python
+@register_collector("user_activity")
+class UserActivityCollector(MetricsCollector):
+ def collect(self) -> Dict[str, Any]:
+ # Query database and return metrics
+ return {"active_users_7d": count, "conversations_created": total}
+```
+
+Collectors are automatically discovered and executed by the collection cronjob, making the system extensible without modifying core collection logic.
+
+## 4. Technical Design
+
+### 4.1 Database Schema
+
+#### 4.1.1 Telemetry Metrics Table
+
+Stores collected metrics with transmission status tracking:
+
+```sql
+CREATE TABLE telemetry_metrics (
+ id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
+ collected_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
+ metrics_data JSONB NOT NULL,
+ uploaded_at TIMESTAMP WITH TIME ZONE NULL,
+ upload_attempts INTEGER DEFAULT 0,
+ last_upload_error TEXT NULL,
+ created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
+);
+
+CREATE INDEX idx_telemetry_metrics_collected_at ON telemetry_metrics(collected_at);
+CREATE INDEX idx_telemetry_metrics_uploaded_at ON telemetry_metrics(uploaded_at);
+```
+
+#### 4.1.2 Telemetry Identity Table
+
+Stores persistent identity information that must survive container restarts:
+
+```sql
+CREATE TABLE telemetry_identity (
+ id INTEGER PRIMARY KEY DEFAULT 1,
+ customer_id VARCHAR(255) NULL,
+ instance_id VARCHAR(255) NULL,
+ created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
+ updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
+ CONSTRAINT single_identity_row CHECK (id = 1)
+);
+```
+
+**Design Rationale:**
+- **Separation of Concerns**: Identity data (customer_id, instance_id) is separated from operational data
+- **Persistent vs Computed**: Only data that cannot be reliably recomputed is persisted
+- **Upload Tracking**: Upload timestamps are tied directly to the metrics they represent
+- **Simplified Queries**: System state can be derived from metrics table (e.g., `MAX(uploaded_at)` for last successful upload)
+
+### 4.2 Metrics Collection Framework
+
+#### 4.2.1 Base Collector Interface
+
+```python
+from abc import ABC, abstractmethod
+from typing import Dict, Any, List
+from dataclasses import dataclass
+
+@dataclass
+class MetricResult:
+ key: str
+ value: Any
+
+class MetricsCollector(ABC):
+ """Base class for metrics collectors."""
+
+ @abstractmethod
+ def collect(self) -> List[MetricResult]:
+ """Collect metrics and return results."""
+ pass
+
+ @property
+ @abstractmethod
+ def collector_name(self) -> str:
+ """Unique name for this collector."""
+ pass
+
+ def should_collect(self) -> bool:
+ """Override to add collection conditions."""
+ return True
+```
+
+#### 4.2.2 Collector Registry
+
+```python
+from typing import Dict, Type, List
+import importlib
+import pkgutil
+
+class CollectorRegistry:
+ """Registry for metrics collectors."""
+
+ def __init__(self):
+ self._collectors: Dict[str, Type[MetricsCollector]] = {}
+
+ def register(self, collector_class: Type[MetricsCollector]) -> None:
+ """Register a collector class."""
+ collector = collector_class()
+ self._collectors[collector.collector_name] = collector_class
+
+ def get_all_collectors(self) -> List[MetricsCollector]:
+ """Get instances of all registered collectors."""
+ return [cls() for cls in self._collectors.values()]
+
+ def discover_collectors(self, package_path: str) -> None:
+ """Auto-discover collectors in a package."""
+ # Implementation to scan for @register_collector decorators
+ pass
+
+# Global registry instance
+collector_registry = CollectorRegistry()
+
+def register_collector(name: str):
+ """Decorator to register a collector."""
+ def decorator(cls: Type[MetricsCollector]) -> Type[MetricsCollector]:
+ collector_registry.register(cls)
+ return cls
+ return decorator
+```
+
+#### 4.2.3 Example Collector Implementation
+
+```python
+@register_collector("system_metrics")
+class SystemMetricsCollector(MetricsCollector):
+ """Collects basic system and usage metrics."""
+
+ @property
+ def collector_name(self) -> str:
+ return "system_metrics"
+
+ def collect(self) -> List[MetricResult]:
+ results = []
+
+ # Collect user count
+ with session_maker() as session:
+ user_count = session.query(UserSettings).count()
+ results.append(MetricResult(
+ key="total_users",
+ value=user_count
+ ))
+
+ # Collect conversation count (last 30 days)
+ thirty_days_ago = datetime.now(timezone.utc) - timedelta(days=30)
+ conversation_count = session.query(StoredConversationMetadata)\
+ .filter(StoredConversationMetadata.created_at >= thirty_days_ago)\
+ .count()
+
+ results.append(MetricResult(
+ key="conversations_30d",
+ value=conversation_count
+ ))
+
+ return results
+```
+
+### 4.3 Collection and Upload System
+
+#### 4.3.1 Metrics Collection Processor
+
+```python
+class TelemetryCollectionProcessor(MaintenanceTaskProcessor):
+ """Maintenance task processor for collecting metrics."""
+
+ collection_interval_days: int = 7
+
+ async def __call__(self, task: MaintenanceTask) -> dict:
+ """Collect metrics from all registered collectors."""
+
+ # Check if collection is needed
+ if not self._should_collect():
+ return {"status": "skipped", "reason": "too_recent"}
+
+ # Collect metrics from all registered collectors
+ all_metrics = {}
+ collector_results = {}
+
+ for collector in collector_registry.get_all_collectors():
+ try:
+ if collector.should_collect():
+ results = collector.collect()
+ for result in results:
+ all_metrics[result.key] = result.value
+ collector_results[collector.collector_name] = len(results)
+ except Exception as e:
+ logger.error(f"Collector {collector.collector_name} failed: {e}")
+ collector_results[collector.collector_name] = f"error: {e}"
+
+ # Store metrics in database
+ with session_maker() as session:
+ telemetry_record = TelemetryMetrics(
+ metrics_data=all_metrics,
+ collected_at=datetime.now(timezone.utc)
+ )
+ session.add(telemetry_record)
+ session.commit()
+
+ # Note: No need to track last_collection_at separately
+ # Can be derived from MAX(collected_at) in telemetry_metrics
+
+ return {
+ "status": "completed",
+ "metrics_collected": len(all_metrics),
+ "collectors_run": collector_results
+ }
+
+ def _should_collect(self) -> bool:
+ """Check if collection is needed based on interval."""
+ with session_maker() as session:
+ # Get last collection time from metrics table
+ last_collected = session.query(func.max(TelemetryMetrics.collected_at)).scalar()
+ if not last_collected:
+ return True
+
+ time_since_last = datetime.now(timezone.utc) - last_collected
+ return time_since_last.days >= self.collection_interval_days
+```
+
+#### 4.3.2 Replicated Upload Processor
+
+```python
+from replicated import AsyncReplicatedClient, InstanceStatus
+
+class TelemetryUploadProcessor(MaintenanceTaskProcessor):
+ """Maintenance task processor for uploading metrics to Replicated."""
+
+ replicated_publishable_key: str
+ replicated_app_slug: str
+
+ async def __call__(self, task: MaintenanceTask) -> dict:
+ """Upload pending metrics to Replicated."""
+
+ # Get pending metrics
+ with session_maker() as session:
+ pending_metrics = session.query(TelemetryMetrics)\
+ .filter(TelemetryMetrics.uploaded_at.is_(None))\
+ .order_by(TelemetryMetrics.collected_at)\
+ .all()
+
+ if not pending_metrics:
+ return {"status": "no_pending_metrics"}
+
+ # Get admin email - skip if not available
+ admin_email = self._get_admin_email()
+ if not admin_email:
+ logger.info("Skipping telemetry upload - no admin email available")
+ return {
+ "status": "skipped",
+ "reason": "no_admin_email",
+ "total_processed": 0
+ }
+
+ uploaded_count = 0
+ failed_count = 0
+
+ async with AsyncReplicatedClient(
+ publishable_key=self.replicated_publishable_key,
+ app_slug=self.replicated_app_slug
+ ) as client:
+
+ # Get or create customer and instance
+ customer = await client.customer.get_or_create(
+ email_address=admin_email
+ )
+ instance = await customer.get_or_create_instance()
+
+ # Store customer/instance IDs for future use
+ await self._update_telemetry_identity(customer.customer_id, instance.instance_id)
+
+ # Upload each metric batch
+ for metric_record in pending_metrics:
+ try:
+ # Send individual metrics
+ for key, value in metric_record.metrics_data.items():
+ await instance.send_metric(key, value)
+
+ # Update instance status
+ await instance.set_status(InstanceStatus.RUNNING)
+
+ # Mark as uploaded
+ with session_maker() as session:
+ record = session.query(TelemetryMetrics)\
+ .filter(TelemetryMetrics.id == metric_record.id)\
+ .first()
+ if record:
+ record.uploaded_at = datetime.now(timezone.utc)
+ session.commit()
+
+ uploaded_count += 1
+
+ except Exception as e:
+ logger.error(f"Failed to upload metrics {metric_record.id}: {e}")
+
+ # Update error info
+ with session_maker() as session:
+ record = session.query(TelemetryMetrics)\
+ .filter(TelemetryMetrics.id == metric_record.id)\
+ .first()
+ if record:
+ record.upload_attempts += 1
+ record.last_upload_error = str(e)
+ session.commit()
+
+ failed_count += 1
+
+ # Note: No need to track last_successful_upload_at separately
+ # Can be derived from MAX(uploaded_at) in telemetry_metrics
+
+ return {
+ "status": "completed",
+ "uploaded": uploaded_count,
+ "failed": failed_count,
+ "total_processed": len(pending_metrics)
+ }
+
+ def _get_admin_email(self) -> str | None:
+ """Get administrator email for customer identification."""
+ # 1. Check environment variable first
+ env_admin_email = os.getenv('OPENHANDS_ADMIN_EMAIL')
+ if env_admin_email:
+ logger.info("Using admin email from environment variable")
+ return env_admin_email
+
+ # 2. Use first active user's email (earliest accepted_tos)
+ with session_maker() as session:
+ first_user = session.query(UserSettings)\
+ .filter(UserSettings.email.isnot(None))\
+ .filter(UserSettings.accepted_tos.isnot(None))\
+ .order_by(UserSettings.accepted_tos.asc())\
+ .first()
+
+ if first_user and first_user.email:
+ logger.info(f"Using first active user email: {first_user.email}")
+ return first_user.email
+
+ # No admin email available - skip telemetry
+ logger.info("No admin email available - skipping telemetry collection")
+ return None
+
+ async def _update_telemetry_identity(self, customer_id: str, instance_id: str) -> None:
+ """Update or create telemetry identity record."""
+ with session_maker() as session:
+ identity = session.query(TelemetryIdentity).first()
+ if not identity:
+ identity = TelemetryIdentity()
+ session.add(identity)
+
+ identity.customer_id = customer_id
+ identity.instance_id = instance_id
+ session.commit()
+```
+
+### 4.4 License Warning System
+
+#### 4.4.1 License Status Endpoint
+
+```python
+from fastapi import APIRouter
+from datetime import datetime, timezone, timedelta
+
+license_router = APIRouter()
+
+@license_router.get("/license-status")
+async def get_license_status():
+ """Get license warning status for UI display."""
+
+ # Only show warnings for OHE installations
+ if not _is_openhands_enterprise():
+ return {"warn": False, "message": ""}
+
+ with session_maker() as session:
+ # Get last successful upload time from metrics table
+ last_upload = session.query(func.max(TelemetryMetrics.uploaded_at))\
+ .filter(TelemetryMetrics.uploaded_at.isnot(None))\
+ .scalar()
+
+ if not last_upload:
+ # No successful uploads yet - show warning after 4 days
+ return {
+ "warn": True,
+ "message": "OpenHands Enterprise license verification pending. Please ensure network connectivity."
+ }
+
+ # Check if last successful upload was more than 4 days ago
+ days_since_upload = (datetime.now(timezone.utc) - last_upload).days
+
+ if days_since_upload > 4:
+ # Find oldest unsent batch
+ oldest_unsent = session.query(TelemetryMetrics)\
+ .filter(TelemetryMetrics.uploaded_at.is_(None))\
+ .order_by(TelemetryMetrics.collected_at)\
+ .first()
+
+ if oldest_unsent:
+ # Calculate expiration date (oldest unsent + 34 days)
+ expiration_date = oldest_unsent.collected_at + timedelta(days=34)
+ days_until_expiration = (expiration_date - datetime.now(timezone.utc)).days
+
+ if days_until_expiration <= 0:
+ message = "Your OpenHands Enterprise license has expired. Please contact support immediately."
+ else:
+ message = f"Your OpenHands Enterprise license will expire in {days_until_expiration} days. Please contact support if this issue persists."
+
+ return {"warn": True, "message": message}
+
+ return {"warn": False, "message": ""}
+
+def _is_openhands_enterprise() -> bool:
+ """Detect if this is an OHE installation."""
+ # Check for required OHE environment variables
+ required_vars = [
+ 'GITHUB_APP_CLIENT_ID',
+ 'KEYCLOAK_SERVER_URL',
+ 'KEYCLOAK_REALM_NAME'
+ ]
+
+ return all(os.getenv(var) for var in required_vars)
+```
+
+#### 4.4.2 UI Integration
+
+The frontend will poll the license status endpoint and display warnings using the existing banner component pattern:
+
+```typescript
+// New component: LicenseWarningBanner.tsx
+interface LicenseStatus {
+ warn: boolean;
+ message: string;
+}
+
+export function LicenseWarningBanner() {
+ const [licenseStatus, setLicenseStatus] = useState({ warn: false, message: "" });
+
+ useEffect(() => {
+ const checkLicenseStatus = async () => {
+ try {
+ const response = await fetch('/api/license-status');
+ const status = await response.json();
+ setLicenseStatus(status);
+ } catch (error) {
+ console.error('Failed to check license status:', error);
+ }
+ };
+
+ // Check immediately and then every hour
+ checkLicenseStatus();
+ const interval = setInterval(checkLicenseStatus, 60 * 60 * 1000);
+
+ return () => clearInterval(interval);
+ }, []);
+
+ if (!licenseStatus.warn) {
+ return null;
+ }
+
+ return (
+
+
+
+ {licenseStatus.message}
+
+
+ );
+}
+```
+
+### 4.5 Cronjob Configuration
+
+The cronjob configurations will be deployed via the OpenHands-Cloud helm charts.
+
+#### 4.5.1 Collection Cronjob
+
+The collection cronjob runs weekly to gather metrics:
+
+```yaml
+# charts/openhands/templates/telemetry-collection-cronjob.yaml
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ include "openhands.fullname" . }}-telemetry-collection
+ labels:
+ {{- include "openhands.labels" . | nindent 4 }}
+spec:
+ schedule: "0 2 * * 0" # Weekly on Sunday at 2 AM
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: telemetry-collector
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ env:
+ {{- include "openhands.env" . | nindent 12 }}
+ command:
+ - python
+ - -c
+ - |
+ from enterprise.storage.maintenance_task import MaintenanceTask, MaintenanceTaskStatus
+ from enterprise.storage.database import session_maker
+ from enterprise.server.telemetry.collection_processor import TelemetryCollectionProcessor
+
+ # Create collection task
+ processor = TelemetryCollectionProcessor()
+ task = MaintenanceTask()
+ task.set_processor(processor)
+ task.status = MaintenanceTaskStatus.PENDING
+
+ with session_maker() as session:
+ session.add(task)
+ session.commit()
+ restartPolicy: OnFailure
+```
+
+#### 4.5.2 Upload Cronjob
+
+The upload cronjob runs daily to send metrics to Replicated:
+
+```yaml
+# charts/openhands/templates/telemetry-upload-cronjob.yaml
+apiVersion: batch/v1
+kind: CronJob
+metadata:
+ name: {{ include "openhands.fullname" . }}-telemetry-upload
+ labels:
+ {{- include "openhands.labels" . | nindent 4 }}
+spec:
+ schedule: "0 3 * * *" # Daily at 3 AM
+ jobTemplate:
+ spec:
+ template:
+ spec:
+ containers:
+ - name: telemetry-uploader
+ image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
+ env:
+ {{- include "openhands.env" . | nindent 12 }}
+ - name: REPLICATED_PUBLISHABLE_KEY
+ valueFrom:
+ secretKeyRef:
+ name: {{ include "openhands.fullname" . }}-replicated-config
+ key: publishable-key
+ - name: REPLICATED_APP_SLUG
+ value: {{ .Values.telemetry.replicatedAppSlug | default "openhands-enterprise" | quote }}
+ command:
+ - python
+ - -c
+ - |
+ from enterprise.storage.maintenance_task import MaintenanceTask, MaintenanceTaskStatus
+ from enterprise.storage.database import session_maker
+ from enterprise.server.telemetry.upload_processor import TelemetryUploadProcessor
+ import os
+
+ # Create upload task
+ processor = TelemetryUploadProcessor(
+ replicated_publishable_key=os.getenv('REPLICATED_PUBLISHABLE_KEY'),
+ replicated_app_slug=os.getenv('REPLICATED_APP_SLUG', 'openhands-enterprise')
+ )
+ task = MaintenanceTask()
+ task.set_processor(processor)
+ task.status = MaintenanceTaskStatus.PENDING
+
+ with session_maker() as session:
+ session.add(task)
+ session.commit()
+ restartPolicy: OnFailure
+```
+
+## 5. Implementation Plan
+
+All implementation must pass existing lints and tests. New functionality requires comprehensive unit tests with >90% coverage. Integration tests should verify end-to-end telemetry flow including collection, storage, upload, and warning display.
+
+### 5.1 Database Schema and Models (M1)
+
+**Repository**: OpenHands
+Establish the foundational database schema and SQLAlchemy models for telemetry data storage.
+
+#### 5.1.1 OpenHands - Database Migration
+
+- [ ] `enterprise/migrations/versions/077_create_telemetry_tables.py`
+- [ ] `enterprise/storage/telemetry_metrics.py`
+- [ ] `enterprise/storage/telemetry_config.py`
+
+#### 5.1.2 OpenHands - Model Tests
+
+- [ ] `enterprise/tests/unit/storage/test_telemetry_metrics.py`
+- [ ] `enterprise/tests/unit/storage/test_telemetry_config.py`
+
+**Demo**: Database tables created and models can store/retrieve telemetry data.
+
+### 5.2 Metrics Collection Framework (M2)
+
+**Repository**: OpenHands
+Implement the pluggable metrics collection system with registry and base classes.
+
+#### 5.2.1 OpenHands - Core Collection Framework
+
+- [ ] `enterprise/server/telemetry/__init__.py`
+- [ ] `enterprise/server/telemetry/collector_base.py`
+- [ ] `enterprise/server/telemetry/collector_registry.py`
+- [ ] `enterprise/server/telemetry/decorators.py`
+
+#### 5.2.2 OpenHands - Example Collectors
+
+- [ ] `enterprise/server/telemetry/collectors/__init__.py`
+- [ ] `enterprise/server/telemetry/collectors/system_metrics.py`
+- [ ] `enterprise/server/telemetry/collectors/user_activity.py`
+
+#### 5.2.3 OpenHands - Framework Tests
+
+- [ ] `enterprise/tests/unit/telemetry/test_collector_base.py`
+- [ ] `enterprise/tests/unit/telemetry/test_collector_registry.py`
+- [ ] `enterprise/tests/unit/telemetry/test_system_metrics.py`
+
+**Demo**: Developers can create new collectors with a single file change using the @register_collector decorator.
+
+### 5.3 Collection and Upload Processors (M3)
+
+**Repository**: OpenHands
+Implement maintenance task processors for collecting metrics and uploading to Replicated.
+
+#### 5.3.1 OpenHands - Collection Processor
+
+- [ ] `enterprise/server/telemetry/collection_processor.py`
+- [ ] `enterprise/tests/unit/telemetry/test_collection_processor.py`
+
+#### 5.3.2 OpenHands - Upload Processor
+
+- [ ] `enterprise/server/telemetry/upload_processor.py`
+- [ ] `enterprise/tests/unit/telemetry/test_upload_processor.py`
+
+#### 5.3.3 OpenHands - Integration Tests
+
+- [ ] `enterprise/tests/integration/test_telemetry_flow.py`
+
+**Demo**: Metrics are automatically collected weekly and uploaded daily to Replicated vendor portal.
+
+### 5.4 License Warning API (M4)
+
+**Repository**: OpenHands
+Implement the license status endpoint for the warning system.
+
+#### 5.4.1 OpenHands - License Status API
+
+- [ ] `enterprise/server/routes/license.py`
+- [ ] `enterprise/tests/unit/routes/test_license.py`
+
+#### 5.4.2 OpenHands - API Integration
+
+- [ ] Update `enterprise/saas_server.py` to include license router
+
+**Demo**: License status API returns warning status based on telemetry upload success.
+
+### 5.5 UI Warning Banner (M5)
+
+**Repository**: OpenHands
+Implement the frontend warning banner component and integration.
+
+#### 5.5.1 OpenHands - UI Warning Banner
+
+- [ ] `frontend/src/components/features/license/license-warning-banner.tsx`
+- [ ] `frontend/src/components/features/license/license-warning-banner.test.tsx`
+
+#### 5.5.2 OpenHands - UI Integration
+
+- [ ] Update main UI layout to include license warning banner
+- [ ] Add license status polling service
+
+**Demo**: License warnings appear in UI when telemetry uploads fail for >4 days, with accurate expiration countdown.
+
+### 5.6 Helm Chart Deployment Configuration (M6)
+
+**Repository**: OpenHands-Cloud
+Create Kubernetes cronjob configurations and deployment scripts.
+
+#### 5.6.1 OpenHands-Cloud - Cronjob Manifests
+
+- [ ] `charts/openhands/templates/telemetry-collection-cronjob.yaml`
+- [ ] `charts/openhands/templates/telemetry-upload-cronjob.yaml`
+
+#### 5.6.2 OpenHands-Cloud - Configuration Management
+
+- [ ] `charts/openhands/templates/replicated-secret.yaml`
+- [ ] Update `charts/openhands/values.yaml` with telemetry configuration options:
+ ```yaml
+ # Add to values.yaml
+ telemetry:
+ enabled: true
+ replicatedAppSlug: "openhands-enterprise"
+ adminEmail: "" # Optional: admin email for customer identification
+
+ # Add to deployment environment variables
+ env:
+ OPENHANDS_ADMIN_EMAIL: "{{ .Values.telemetry.adminEmail }}"
+ ```
+
+**Demo**: Complete telemetry system deployed via helm chart with configurable collection intervals and Replicated integration.
+
+### 5.7 Documentation and Enhanced Collectors (M7)
+
+**Repository**: OpenHands
+Add comprehensive metrics collectors, monitoring capabilities, and documentation.
+
+#### 5.7.1 OpenHands - Advanced Collectors
+
+- [ ] `enterprise/server/telemetry/collectors/conversation_metrics.py`
+- [ ] `enterprise/server/telemetry/collectors/integration_usage.py`
+- [ ] `enterprise/server/telemetry/collectors/performance_metrics.py`
+
+#### 5.7.2 OpenHands - Monitoring and Testing
+
+- [ ] `enterprise/server/telemetry/monitoring.py`
+- [ ] `enterprise/tests/e2e/test_telemetry_system.py`
+- [ ] Performance tests for large-scale metric collection
+
+#### 5.7.3 OpenHands - Technical Documentation
+
+- [ ] `enterprise/server/telemetry/README.md`
+- [ ] Update deployment documentation with telemetry configuration instructions
+- [ ] Add troubleshooting guide for telemetry issues
+
+**Demo**: Rich telemetry data flowing to vendor portal with comprehensive monitoring, alerting for system health, and complete documentation.
diff --git a/enterprise/integrations/github/github_manager.py b/enterprise/integrations/github/github_manager.py
index a83bd54f02..1d16dd40d7 100644
--- a/enterprise/integrations/github/github_manager.py
+++ b/enterprise/integrations/github/github_manager.py
@@ -31,7 +31,7 @@ from server.utils.conversation_callback_utils import register_callback_processor
from openhands.core.logger import openhands_logger as logger
from openhands.integrations.provider import ProviderToken, ProviderType
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.utils.async_utils import call_sync_from_async
@@ -250,7 +250,7 @@ class GithubManager(Manager):
f'[GitHub] Creating new conversation for user {user_info.username}'
)
- secret_store = UserSecrets(
+ secret_store = Secrets(
provider_tokens=MappingProxyType(
{
ProviderType.GITHUB: ProviderToken(
diff --git a/enterprise/integrations/gitlab/gitlab_manager.py b/enterprise/integrations/gitlab/gitlab_manager.py
index b7296f13e1..4ab3644250 100644
--- a/enterprise/integrations/gitlab/gitlab_manager.py
+++ b/enterprise/integrations/gitlab/gitlab_manager.py
@@ -25,7 +25,7 @@ from openhands.core.logger import openhands_logger as logger
from openhands.integrations.gitlab.gitlab_service import GitLabServiceImpl
from openhands.integrations.provider import ProviderToken, ProviderType
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
class GitlabManager(Manager):
@@ -198,7 +198,7 @@ class GitlabManager(Manager):
f'[GitLab] Creating new conversation for user {user_info.username}'
)
- secret_store = UserSecrets(
+ secret_store = Secrets(
provider_tokens=MappingProxyType(
{
ProviderType.GITLAB: ProviderToken(
diff --git a/enterprise/integrations/jira/jira_view.py b/enterprise/integrations/jira/jira_view.py
index eeff968ec3..c410175606 100644
--- a/enterprise/integrations/jira/jira_view.py
+++ b/enterprise/integrations/jira/jira_view.py
@@ -57,7 +57,7 @@ class JiraNewConversationView(JiraViewInterface):
raise StartingConvoException('No repository selected for this conversation')
provider_tokens = await self.saas_user_auth.get_provider_tokens()
- user_secrets = await self.saas_user_auth.get_user_secrets()
+ user_secrets = await self.saas_user_auth.get_secrets()
instructions, user_msg = self._get_instructions(jinja_env)
try:
diff --git a/enterprise/integrations/jira_dc/jira_dc_view.py b/enterprise/integrations/jira_dc/jira_dc_view.py
index c60cbfc982..177d071288 100644
--- a/enterprise/integrations/jira_dc/jira_dc_view.py
+++ b/enterprise/integrations/jira_dc/jira_dc_view.py
@@ -60,7 +60,7 @@ class JiraDcNewConversationView(JiraDcViewInterface):
raise StartingConvoException('No repository selected for this conversation')
provider_tokens = await self.saas_user_auth.get_provider_tokens()
- user_secrets = await self.saas_user_auth.get_user_secrets()
+ user_secrets = await self.saas_user_auth.get_secrets()
instructions, user_msg = self._get_instructions(jinja_env)
try:
diff --git a/enterprise/integrations/linear/linear_view.py b/enterprise/integrations/linear/linear_view.py
index a0cf69a5f8..0641c64200 100644
--- a/enterprise/integrations/linear/linear_view.py
+++ b/enterprise/integrations/linear/linear_view.py
@@ -57,7 +57,7 @@ class LinearNewConversationView(LinearViewInterface):
raise StartingConvoException('No repository selected for this conversation')
provider_tokens = await self.saas_user_auth.get_provider_tokens()
- user_secrets = await self.saas_user_auth.get_user_secrets()
+ user_secrets = await self.saas_user_auth.get_secrets()
instructions, user_msg = self._get_instructions(jinja_env)
try:
diff --git a/enterprise/integrations/slack/slack_manager.py b/enterprise/integrations/slack/slack_manager.py
index d496d972f0..1fd4e20759 100644
--- a/enterprise/integrations/slack/slack_manager.py
+++ b/enterprise/integrations/slack/slack_manager.py
@@ -87,7 +87,7 @@ class SlackManager(Manager):
return slack_user, saas_user_auth
def _infer_repo_from_message(self, user_msg: str) -> str | None:
- # Regular expression to match patterns like "All-Hands-AI/OpenHands" or "deploy repo"
+ # Regular expression to match patterns like "OpenHands/OpenHands" or "deploy repo"
pattern = r'([a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)|([a-zA-Z0-9_-]+)(?=\s+repo)'
match = re.search(pattern, user_msg)
diff --git a/enterprise/integrations/slack/slack_view.py b/enterprise/integrations/slack/slack_view.py
index 6c8f82ed9f..bdcc4bf80a 100644
--- a/enterprise/integrations/slack/slack_view.py
+++ b/enterprise/integrations/slack/slack_view.py
@@ -188,7 +188,7 @@ class SlackNewConversationView(SlackViewInterface):
self._verify_necessary_values_are_set()
provider_tokens = await self.saas_user_auth.get_provider_tokens()
- user_secrets = await self.saas_user_auth.get_user_secrets()
+ user_secrets = await self.saas_user_auth.get_secrets()
user_instructions, conversation_instructions = self._get_instructions(jinja)
# Determine git provider from repository
diff --git a/enterprise/integrations/utils.py b/enterprise/integrations/utils.py
index 81c5bd52a1..ffe4f81360 100644
--- a/enterprise/integrations/utils.py
+++ b/enterprise/integrations/utils.py
@@ -381,7 +381,7 @@ def infer_repo_from_message(user_msg: str) -> list[str]:
# Captures: protocol, domain, owner, repo (with optional .git extension)
git_url_pattern = r'https?://(?:github\.com|gitlab\.com|bitbucket\.org)/([a-zA-Z0-9_.-]+)/([a-zA-Z0-9_.-]+?)(?:\.git)?(?:[/?#].*?)?(?=\s|$|[^\w.-])'
- # Pattern to match direct owner/repo mentions (e.g., "All-Hands-AI/OpenHands")
+ # Pattern to match direct owner/repo mentions (e.g., "OpenHands/OpenHands")
# Must be surrounded by word boundaries or specific characters to avoid false positives
direct_pattern = (
r'(?:^|\s|[\[\(\'"])([a-zA-Z0-9_.-]+)/([a-zA-Z0-9_.-]+)(?=\s|$|[\]\)\'",.])'
diff --git a/enterprise/migrations/versions/078_create_telemetry_tables.py b/enterprise/migrations/versions/078_create_telemetry_tables.py
new file mode 100644
index 0000000000..bdcc9bf3d4
--- /dev/null
+++ b/enterprise/migrations/versions/078_create_telemetry_tables.py
@@ -0,0 +1,129 @@
+"""create telemetry tables
+
+Revision ID: 078
+Revises: 077
+Create Date: 2025-10-21
+
+"""
+
+from typing import Sequence, Union
+
+import sqlalchemy as sa
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision: str = '078'
+down_revision: Union[str, None] = '077'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ """Create telemetry tables for metrics collection and configuration."""
+ # Create telemetry_metrics table
+ op.create_table(
+ 'telemetry_metrics',
+ sa.Column(
+ 'id',
+ sa.String(), # UUID as string
+ nullable=False,
+ primary_key=True,
+ ),
+ sa.Column(
+ 'collected_at',
+ sa.DateTime(timezone=True),
+ nullable=False,
+ server_default=sa.text('CURRENT_TIMESTAMP'),
+ ),
+ sa.Column(
+ 'metrics_data',
+ sa.JSON(),
+ nullable=False,
+ ),
+ sa.Column(
+ 'uploaded_at',
+ sa.DateTime(timezone=True),
+ nullable=True,
+ ),
+ sa.Column(
+ 'upload_attempts',
+ sa.Integer(),
+ nullable=False,
+ server_default='0',
+ ),
+ sa.Column(
+ 'last_upload_error',
+ sa.Text(),
+ nullable=True,
+ ),
+ sa.Column(
+ 'created_at',
+ sa.DateTime(timezone=True),
+ nullable=False,
+ server_default=sa.text('CURRENT_TIMESTAMP'),
+ ),
+ sa.Column(
+ 'updated_at',
+ sa.DateTime(timezone=True),
+ nullable=False,
+ server_default=sa.text('CURRENT_TIMESTAMP'),
+ ),
+ )
+
+ # Create indexes for telemetry_metrics
+ op.create_index(
+ 'ix_telemetry_metrics_collected_at', 'telemetry_metrics', ['collected_at']
+ )
+ op.create_index(
+ 'ix_telemetry_metrics_uploaded_at', 'telemetry_metrics', ['uploaded_at']
+ )
+
+ # Create telemetry_replicated_identity table (minimal persistent identity data)
+ op.create_table(
+ 'telemetry_replicated_identity',
+ sa.Column(
+ 'id',
+ sa.Integer(),
+ nullable=False,
+ primary_key=True,
+ server_default='1',
+ ),
+ sa.Column(
+ 'customer_id',
+ sa.String(255),
+ nullable=True,
+ ),
+ sa.Column(
+ 'instance_id',
+ sa.String(255),
+ nullable=True,
+ ),
+ sa.Column(
+ 'created_at',
+ sa.DateTime(timezone=True),
+ nullable=False,
+ server_default=sa.text('CURRENT_TIMESTAMP'),
+ ),
+ sa.Column(
+ 'updated_at',
+ sa.DateTime(timezone=True),
+ nullable=False,
+ server_default=sa.text('CURRENT_TIMESTAMP'),
+ ),
+ )
+
+ # Add constraint to ensure single row in telemetry_replicated_identity
+ op.create_check_constraint(
+ 'single_identity_row', 'telemetry_replicated_identity', 'id = 1'
+ )
+
+
+def downgrade() -> None:
+ """Drop telemetry tables."""
+ # Drop indexes first
+ op.drop_index('ix_telemetry_metrics_uploaded_at', 'telemetry_metrics')
+ op.drop_index('ix_telemetry_metrics_collected_at', 'telemetry_metrics')
+
+ # Drop tables
+ op.drop_table('telemetry_replicated_identity')
+ op.drop_table('telemetry_metrics')
diff --git a/enterprise/migrations/versions/079_rename_user_secrets_to_custom_secrets.py b/enterprise/migrations/versions/079_rename_user_secrets_to_custom_secrets.py
new file mode 100644
index 0000000000..898293a338
--- /dev/null
+++ b/enterprise/migrations/versions/079_rename_user_secrets_to_custom_secrets.py
@@ -0,0 +1,39 @@
+"""rename user_secrets table to custom_secrets
+
+Revision ID: 079
+Revises: 078
+Create Date: 2025-10-27 00:00:00.000000
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+
+# revision identifiers, used by Alembic.
+revision: str = '079'
+down_revision: Union[str, None] = '078'
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ # Rename the table from user_secrets to custom_secrets
+ op.rename_table('user_secrets', 'custom_secrets')
+
+ # Rename the index to match the new table name
+ op.drop_index('idx_user_secrets_keycloak_user_id', 'custom_secrets')
+ op.create_index(
+ 'idx_custom_secrets_keycloak_user_id', 'custom_secrets', ['keycloak_user_id']
+ )
+
+
+def downgrade() -> None:
+ # Rename the index back to the original name
+ op.drop_index('idx_custom_secrets_keycloak_user_id', 'custom_secrets')
+ op.create_index(
+ 'idx_user_secrets_keycloak_user_id', 'custom_secrets', ['keycloak_user_id']
+ )
+
+ # Rename the table back from custom_secrets to user_secrets
+ op.rename_table('custom_secrets', 'user_secrets')
diff --git a/enterprise/pyproject.toml b/enterprise/pyproject.toml
index 30c0630747..f18407fea9 100644
--- a/enterprise/pyproject.toml
+++ b/enterprise/pyproject.toml
@@ -11,7 +11,7 @@ description = "Deploy OpenHands"
authors = [ "OpenHands" ]
license = "POLYFORM"
readme = "README.md"
-repository = "https://github.com/All-Hands-AI/OpenHands"
+repository = "https://github.com/OpenHands/OpenHands"
packages = [
{ include = "server" },
{ include = "storage" },
diff --git a/enterprise/server/auth/saas_user_auth.py b/enterprise/server/auth/saas_user_auth.py
index 7b3c246123..345090205b 100644
--- a/enterprise/server/auth/saas_user_auth.py
+++ b/enterprise/server/auth/saas_user_auth.py
@@ -31,7 +31,7 @@ from openhands.integrations.provider import (
)
from openhands.server.settings import Settings
from openhands.server.user_auth.user_auth import AuthType, UserAuth
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.settings.settings_store import SettingsStore
token_manager = TokenManager()
@@ -52,7 +52,7 @@ class SaasUserAuth(UserAuth):
settings_store: SaasSettingsStore | None = None
secrets_store: SaasSecretsStore | None = None
_settings: Settings | None = None
- _user_secrets: UserSecrets | None = None
+ _secrets: Secrets | None = None
accepted_tos: bool | None = None
auth_type: AuthType = AuthType.COOKIE
@@ -118,13 +118,13 @@ class SaasUserAuth(UserAuth):
self.secrets_store = secrets_store
return secrets_store
- async def get_user_secrets(self):
- user_secrets = self._user_secrets
+ async def get_secrets(self):
+ user_secrets = self._secrets
if user_secrets:
return user_secrets
secrets_store = await self.get_secrets_store()
user_secrets = await secrets_store.load()
- self._user_secrets = user_secrets
+ self._secrets = user_secrets
return user_secrets
async def get_access_token(self) -> SecretStr | None:
@@ -147,7 +147,7 @@ class SaasUserAuth(UserAuth):
if not access_token:
raise AuthError()
- user_secrets = await self.get_user_secrets()
+ user_secrets = await self.get_secrets()
try:
# TODO: I think we can do this in a single request if we refactor
diff --git a/enterprise/storage/saas_secrets_store.py b/enterprise/storage/saas_secrets_store.py
index 5b1018510e..53775a8235 100644
--- a/enterprise/storage/saas_secrets_store.py
+++ b/enterprise/storage/saas_secrets_store.py
@@ -7,11 +7,11 @@ from dataclasses import dataclass
from cryptography.fernet import Fernet
from sqlalchemy.orm import sessionmaker
from storage.database import session_maker
-from storage.stored_user_secrets import StoredUserSecrets
+from storage.stored_custom_secrets import StoredCustomSecrets
from openhands.core.config.openhands_config import OpenHandsConfig
from openhands.core.logger import openhands_logger as logger
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.secrets.secrets_store import SecretsStore
@@ -21,20 +21,20 @@ class SaasSecretsStore(SecretsStore):
session_maker: sessionmaker
config: OpenHandsConfig
- async def load(self) -> UserSecrets | None:
+ async def load(self) -> Secrets | None:
if not self.user_id:
return None
with self.session_maker() as session:
# Fetch all secrets for the given user ID
settings = (
- session.query(StoredUserSecrets)
- .filter(StoredUserSecrets.keycloak_user_id == self.user_id)
+ session.query(StoredCustomSecrets)
+ .filter(StoredCustomSecrets.keycloak_user_id == self.user_id)
.all()
)
if not settings:
- return UserSecrets()
+ return Secrets()
kwargs = {}
for secret in settings:
@@ -45,14 +45,14 @@ class SaasSecretsStore(SecretsStore):
self._decrypt_kwargs(kwargs)
- return UserSecrets(custom_secrets=kwargs) # type: ignore[arg-type]
+ return Secrets(custom_secrets=kwargs) # type: ignore[arg-type]
- async def store(self, item: UserSecrets):
+ async def store(self, item: Secrets):
with self.session_maker() as session:
# Incoming secrets are always the most updated ones
# Delete all existing records and override with incoming ones
- session.query(StoredUserSecrets).filter(
- StoredUserSecrets.keycloak_user_id == self.user_id
+ session.query(StoredCustomSecrets).filter(
+ StoredCustomSecrets.keycloak_user_id == self.user_id
).delete()
# Prepare the new secrets data
@@ -74,7 +74,7 @@ class SaasSecretsStore(SecretsStore):
# Add the new secrets
for secret_name, secret_value, description in secret_tuples:
- new_secret = StoredUserSecrets(
+ new_secret = StoredCustomSecrets(
keycloak_user_id=self.user_id,
secret_name=secret_name,
secret_value=secret_value,
diff --git a/enterprise/storage/stored_user_secrets.py b/enterprise/storage/stored_custom_secrets.py
similarity index 87%
rename from enterprise/storage/stored_user_secrets.py
rename to enterprise/storage/stored_custom_secrets.py
index 90c60eae49..c048644fce 100644
--- a/enterprise/storage/stored_user_secrets.py
+++ b/enterprise/storage/stored_custom_secrets.py
@@ -4,8 +4,8 @@ from sqlalchemy.orm import relationship
from storage.base import Base
-class StoredUserSecrets(Base): # type: ignore
- __tablename__ = 'user_secrets'
+class StoredCustomSecrets(Base): # type: ignore
+ __tablename__ = 'custom_secrets'
id = Column(Integer, Identity(), primary_key=True)
keycloak_user_id = Column(String, nullable=True, index=True)
org_id = Column(UUID(as_uuid=True), ForeignKey('org.id'), nullable=True)
diff --git a/enterprise/storage/telemetry_identity.py b/enterprise/storage/telemetry_identity.py
new file mode 100644
index 0000000000..201056745c
--- /dev/null
+++ b/enterprise/storage/telemetry_identity.py
@@ -0,0 +1,98 @@
+"""SQLAlchemy model for telemetry identity.
+
+This model stores persistent identity information that must survive container restarts
+for the OpenHands Enterprise Telemetry Service.
+"""
+
+from datetime import UTC, datetime
+from typing import Optional
+
+from sqlalchemy import CheckConstraint, Column, DateTime, Integer, String
+from storage.base import Base
+
+
+class TelemetryIdentity(Base): # type: ignore
+ """Stores persistent identity information for telemetry.
+
+ This table is designed to contain exactly one row (enforced by database constraint)
+ that maintains only the identity data that cannot be reliably recomputed:
+ - customer_id: Established relationship with Replicated
+ - instance_id: Generated once, must remain stable
+
+ Operational data like timestamps are derived from the telemetry_metrics table.
+ """
+
+ __tablename__ = 'telemetry_replicated_identity'
+ __table_args__ = (CheckConstraint('id = 1', name='single_identity_row'),)
+
+ id = Column(Integer, primary_key=True, default=1)
+ customer_id = Column(String(255), nullable=True)
+ instance_id = Column(String(255), nullable=True)
+ created_at = Column(
+ DateTime(timezone=True),
+ default=lambda: datetime.now(UTC),
+ nullable=False,
+ )
+ updated_at = Column(
+ DateTime(timezone=True),
+ default=lambda: datetime.now(UTC),
+ onupdate=lambda: datetime.now(UTC),
+ nullable=False,
+ )
+
+ def __init__(
+ self,
+ customer_id: Optional[str] = None,
+ instance_id: Optional[str] = None,
+ **kwargs,
+ ):
+ """Initialize telemetry identity.
+
+ Args:
+ customer_id: Unique identifier for the customer
+ instance_id: Unique identifier for this OpenHands instance
+ **kwargs: Additional keyword arguments for SQLAlchemy
+ """
+ super().__init__(**kwargs)
+
+ # Set defaults for fields that would normally be set by SQLAlchemy
+ now = datetime.now(UTC)
+ if not hasattr(self, 'created_at') or self.created_at is None:
+ self.created_at = now
+ if not hasattr(self, 'updated_at') or self.updated_at is None:
+ self.updated_at = now
+
+ # Force id to be 1 to maintain single-row constraint
+ self.id = 1
+ self.customer_id = customer_id
+ self.instance_id = instance_id
+
+ def set_customer_info(
+ self,
+ customer_id: Optional[str] = None,
+ instance_id: Optional[str] = None,
+ ) -> None:
+ """Update customer and instance identification information.
+
+ Args:
+ customer_id: Unique identifier for the customer
+ instance_id: Unique identifier for this OpenHands instance
+ """
+ if customer_id is not None:
+ self.customer_id = customer_id
+ if instance_id is not None:
+ self.instance_id = instance_id
+
+ @property
+ def has_customer_info(self) -> bool:
+ """Check if customer identification information is configured."""
+ return bool(self.customer_id and self.instance_id)
+
+ def __repr__(self) -> str:
+ return (
+ f""
+ )
+
+ class Config:
+ from_attributes = True
diff --git a/enterprise/storage/telemetry_metrics.py b/enterprise/storage/telemetry_metrics.py
new file mode 100644
index 0000000000..aa339bdc4f
--- /dev/null
+++ b/enterprise/storage/telemetry_metrics.py
@@ -0,0 +1,112 @@
+"""SQLAlchemy model for telemetry metrics data.
+
+This model stores individual metric collection records with upload tracking
+and retry logic for the OpenHands Enterprise Telemetry Service.
+"""
+
+import uuid
+from datetime import UTC, datetime
+from typing import Any, Dict, Optional
+
+from sqlalchemy import JSON, Column, DateTime, Integer, String, Text
+from storage.base import Base
+
+
+class TelemetryMetrics(Base): # type: ignore
+ """Stores collected telemetry metrics with upload tracking.
+
+ Each record represents a single metrics collection event with associated
+ metadata for upload status and retry logic.
+ """
+
+ __tablename__ = 'telemetry_metrics'
+
+ id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
+ collected_at = Column(
+ DateTime(timezone=True),
+ nullable=False,
+ default=lambda: datetime.now(UTC),
+ index=True,
+ )
+ metrics_data = Column(JSON, nullable=False)
+ uploaded_at = Column(DateTime(timezone=True), nullable=True, index=True)
+ upload_attempts = Column(Integer, nullable=False, default=0)
+ last_upload_error = Column(Text, nullable=True)
+ created_at = Column(
+ DateTime(timezone=True),
+ default=lambda: datetime.now(UTC),
+ nullable=False,
+ )
+ updated_at = Column(
+ DateTime(timezone=True),
+ default=lambda: datetime.now(UTC),
+ onupdate=lambda: datetime.now(UTC),
+ nullable=False,
+ )
+
+ def __init__(
+ self,
+ metrics_data: Dict[str, Any],
+ collected_at: Optional[datetime] = None,
+ **kwargs,
+ ):
+ """Initialize a new telemetry metrics record.
+
+ Args:
+ metrics_data: Dictionary containing the collected metrics
+ collected_at: Timestamp when metrics were collected (defaults to now)
+ **kwargs: Additional keyword arguments for SQLAlchemy
+ """
+ super().__init__(**kwargs)
+
+ # Set defaults for fields that would normally be set by SQLAlchemy
+ now = datetime.now(UTC)
+ if not hasattr(self, 'id') or self.id is None:
+ self.id = str(uuid.uuid4())
+ if not hasattr(self, 'upload_attempts') or self.upload_attempts is None:
+ self.upload_attempts = 0
+ if not hasattr(self, 'created_at') or self.created_at is None:
+ self.created_at = now
+ if not hasattr(self, 'updated_at') or self.updated_at is None:
+ self.updated_at = now
+
+ self.metrics_data = metrics_data
+ if collected_at:
+ self.collected_at = collected_at
+ elif not hasattr(self, 'collected_at') or self.collected_at is None:
+ self.collected_at = now
+
+ def mark_uploaded(self) -> None:
+ """Mark this metrics record as successfully uploaded."""
+ self.uploaded_at = datetime.now(UTC)
+ self.last_upload_error = None
+
+ def mark_upload_failed(self, error_message: str) -> None:
+ """Mark this metrics record as having failed upload.
+
+ Args:
+ error_message: Description of the upload failure
+ """
+ self.upload_attempts += 1
+ self.last_upload_error = error_message
+ self.uploaded_at = None
+
+ @property
+ def is_uploaded(self) -> bool:
+ """Check if this metrics record has been successfully uploaded."""
+ return self.uploaded_at is not None
+
+ @property
+ def needs_retry(self) -> bool:
+ """Check if this metrics record needs upload retry (failed but not too many attempts)."""
+ return not self.is_uploaded and self.upload_attempts < 3
+
+ def __repr__(self) -> str:
+ return (
+ f"'
+ )
+
+ class Config:
+ from_attributes = True
diff --git a/enterprise/tests/unit/integrations/jira/test_jira_view.py b/enterprise/tests/unit/integrations/jira/test_jira_view.py
index 07b885f59d..ee14bf803f 100644
--- a/enterprise/tests/unit/integrations/jira/test_jira_view.py
+++ b/enterprise/tests/unit/integrations/jira/test_jira_view.py
@@ -309,7 +309,7 @@ class TestJiraViewEdgeCases:
mock_agent_loop_info,
):
"""Test conversation creation when user has no secrets"""
- new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
+ new_conversation_view.saas_user_auth.get_secrets.return_value = None
mock_create_conversation.return_value = mock_agent_loop_info
mock_store.create_conversation = AsyncMock()
diff --git a/enterprise/tests/unit/integrations/jira_dc/test_jira_dc_view.py b/enterprise/tests/unit/integrations/jira_dc/test_jira_dc_view.py
index bd1f1f352e..91865f101d 100644
--- a/enterprise/tests/unit/integrations/jira_dc/test_jira_dc_view.py
+++ b/enterprise/tests/unit/integrations/jira_dc/test_jira_dc_view.py
@@ -309,7 +309,7 @@ class TestJiraDcViewEdgeCases:
mock_agent_loop_info,
):
"""Test conversation creation when user has no secrets"""
- new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
+ new_conversation_view.saas_user_auth.get_secrets.return_value = None
mock_create_conversation.return_value = mock_agent_loop_info
mock_store.create_conversation = AsyncMock()
diff --git a/enterprise/tests/unit/integrations/linear/test_linear_view.py b/enterprise/tests/unit/integrations/linear/test_linear_view.py
index dc410a9a5c..05e465e37f 100644
--- a/enterprise/tests/unit/integrations/linear/test_linear_view.py
+++ b/enterprise/tests/unit/integrations/linear/test_linear_view.py
@@ -309,7 +309,7 @@ class TestLinearViewEdgeCases:
mock_agent_loop_info,
):
"""Test conversation creation when user has no secrets"""
- new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
+ new_conversation_view.saas_user_auth.get_secrets.return_value = None
mock_create_conversation.return_value = mock_agent_loop_info
mock_store.create_conversation = AsyncMock()
diff --git a/enterprise/tests/unit/storage/__init__.py b/enterprise/tests/unit/storage/__init__.py
new file mode 100644
index 0000000000..aff1013265
--- /dev/null
+++ b/enterprise/tests/unit/storage/__init__.py
@@ -0,0 +1 @@
+# Storage unit tests
diff --git a/enterprise/tests/unit/storage/test_telemetry_identity.py b/enterprise/tests/unit/storage/test_telemetry_identity.py
new file mode 100644
index 0000000000..29c619f650
--- /dev/null
+++ b/enterprise/tests/unit/storage/test_telemetry_identity.py
@@ -0,0 +1,129 @@
+"""Unit tests for TelemetryIdentity model.
+
+Tests the persistent identity storage for the OpenHands Enterprise Telemetry Service.
+"""
+
+from datetime import datetime
+
+from storage.telemetry_identity import TelemetryIdentity
+
+
+class TestTelemetryIdentity:
+ """Test cases for TelemetryIdentity model."""
+
+ def test_create_identity_with_defaults(self):
+ """Test creating identity with default values."""
+ identity = TelemetryIdentity()
+
+ assert identity.id == 1
+ assert identity.customer_id is None
+ assert identity.instance_id is None
+ assert isinstance(identity.created_at, datetime)
+ assert isinstance(identity.updated_at, datetime)
+
+ def test_create_identity_with_values(self):
+ """Test creating identity with specific values."""
+ customer_id = 'cust_123'
+ instance_id = 'inst_456'
+
+ identity = TelemetryIdentity(customer_id=customer_id, instance_id=instance_id)
+
+ assert identity.id == 1
+ assert identity.customer_id == customer_id
+ assert identity.instance_id == instance_id
+
+ def test_set_customer_info(self):
+ """Test updating customer information."""
+ identity = TelemetryIdentity()
+
+ # Update customer info
+ identity.set_customer_info(
+ customer_id='new_customer', instance_id='new_instance'
+ )
+
+ assert identity.customer_id == 'new_customer'
+ assert identity.instance_id == 'new_instance'
+
+ def test_set_customer_info_partial(self):
+ """Test partial updates of customer information."""
+ identity = TelemetryIdentity(
+ customer_id='original_customer', instance_id='original_instance'
+ )
+
+ # Update only customer_id
+ identity.set_customer_info(customer_id='updated_customer')
+ assert identity.customer_id == 'updated_customer'
+ assert identity.instance_id == 'original_instance'
+
+ # Update only instance_id
+ identity.set_customer_info(instance_id='updated_instance')
+ assert identity.customer_id == 'updated_customer'
+ assert identity.instance_id == 'updated_instance'
+
+ def test_set_customer_info_with_none(self):
+ """Test that None values don't overwrite existing data."""
+ identity = TelemetryIdentity(
+ customer_id='existing_customer', instance_id='existing_instance'
+ )
+
+ # Call with None values - should not change existing data
+ identity.set_customer_info(customer_id=None, instance_id=None)
+ assert identity.customer_id == 'existing_customer'
+ assert identity.instance_id == 'existing_instance'
+
+ def test_has_customer_info_property(self):
+ """Test has_customer_info property logic."""
+ identity = TelemetryIdentity()
+
+ # Initially false (both None)
+ assert not identity.has_customer_info
+
+ # Still false with only customer_id
+ identity.customer_id = 'customer_123'
+ assert not identity.has_customer_info
+
+ # Still false with only instance_id
+ identity.customer_id = None
+ identity.instance_id = 'instance_456'
+ assert not identity.has_customer_info
+
+ # True when both are set
+ identity.customer_id = 'customer_123'
+ identity.instance_id = 'instance_456'
+ assert identity.has_customer_info
+
+ def test_has_customer_info_with_empty_strings(self):
+ """Test has_customer_info with empty strings."""
+ identity = TelemetryIdentity(customer_id='', instance_id='')
+
+ # Empty strings should be falsy
+ assert not identity.has_customer_info
+
+ def test_repr_method(self):
+ """Test string representation of identity."""
+ identity = TelemetryIdentity(
+ customer_id='test_customer', instance_id='test_instance'
+ )
+
+ repr_str = repr(identity)
+ assert 'TelemetryIdentity' in repr_str
+ assert 'test_customer' in repr_str
+ assert 'test_instance' in repr_str
+
+ def test_id_forced_to_one(self):
+ """Test that ID is always forced to 1."""
+ identity = TelemetryIdentity()
+ assert identity.id == 1
+
+ # Even if we try to set a different ID in constructor
+ identity2 = TelemetryIdentity(customer_id='test')
+ assert identity2.id == 1
+
+ def test_timestamps_are_set(self):
+ """Test that timestamps are properly set."""
+ identity = TelemetryIdentity()
+
+ assert identity.created_at is not None
+ assert identity.updated_at is not None
+ assert isinstance(identity.created_at, datetime)
+ assert isinstance(identity.updated_at, datetime)
diff --git a/enterprise/tests/unit/storage/test_telemetry_metrics.py b/enterprise/tests/unit/storage/test_telemetry_metrics.py
new file mode 100644
index 0000000000..6f6809b32a
--- /dev/null
+++ b/enterprise/tests/unit/storage/test_telemetry_metrics.py
@@ -0,0 +1,190 @@
+"""Unit tests for TelemetryMetrics model."""
+
+import uuid
+from datetime import UTC, datetime
+
+from storage.telemetry_metrics import TelemetryMetrics
+
+
+class TestTelemetryMetrics:
+ """Test cases for TelemetryMetrics model."""
+
+ def test_init_with_metrics_data(self):
+ """Test initialization with metrics data."""
+ metrics_data = {
+ 'cpu_usage': 75.5,
+ 'memory_usage': 1024,
+ 'active_sessions': 5,
+ }
+
+ metrics = TelemetryMetrics(metrics_data=metrics_data)
+
+ assert metrics.metrics_data == metrics_data
+ assert metrics.upload_attempts == 0
+ assert metrics.uploaded_at is None
+ assert metrics.last_upload_error is None
+ assert metrics.collected_at is not None
+ assert metrics.created_at is not None
+ assert metrics.updated_at is not None
+
+ def test_init_with_custom_collected_at(self):
+ """Test initialization with custom collected_at timestamp."""
+ metrics_data = {'test': 'value'}
+ custom_time = datetime(2023, 1, 1, 12, 0, 0, tzinfo=UTC)
+
+ metrics = TelemetryMetrics(metrics_data=metrics_data, collected_at=custom_time)
+
+ assert metrics.collected_at == custom_time
+
+ def test_mark_uploaded(self):
+ """Test marking metrics as uploaded."""
+ metrics = TelemetryMetrics(metrics_data={'test': 'data'})
+
+ # Initially not uploaded
+ assert not metrics.is_uploaded
+ assert metrics.uploaded_at is None
+
+ # Mark as uploaded
+ metrics.mark_uploaded()
+
+ assert metrics.is_uploaded
+
+ def test_mark_upload_failed(self):
+ """Test marking upload as failed."""
+ metrics = TelemetryMetrics(metrics_data={'test': 'data'})
+ error_message = 'Network timeout'
+
+ # Initially no failures
+ assert metrics.upload_attempts == 0
+ assert metrics.last_upload_error is None
+
+ # Mark as failed
+ metrics.mark_upload_failed(error_message)
+
+ assert metrics.upload_attempts == 1
+ assert metrics.last_upload_error == error_message
+ assert metrics.uploaded_at is None
+ assert not metrics.is_uploaded
+
+ def test_multiple_upload_failures(self):
+ """Test multiple upload failures increment attempts."""
+ metrics = TelemetryMetrics(metrics_data={'test': 'data'})
+
+ metrics.mark_upload_failed('Error 1')
+ assert metrics.upload_attempts == 1
+
+ metrics.mark_upload_failed('Error 2')
+ assert metrics.upload_attempts == 2
+ assert metrics.last_upload_error == 'Error 2'
+
+ def test_is_uploaded_property(self):
+ """Test is_uploaded property."""
+ metrics = TelemetryMetrics(metrics_data={'test': 'data'})
+
+ # Initially not uploaded
+ assert not metrics.is_uploaded
+
+ # After marking uploaded
+ metrics.mark_uploaded()
+ assert metrics.is_uploaded
+
+ def test_needs_retry_property(self):
+ """Test needs_retry property logic."""
+ metrics = TelemetryMetrics(metrics_data={'test': 'data'})
+
+ # Initially needs retry (0 attempts, not uploaded)
+ assert metrics.needs_retry
+
+ # After 1 failure, still needs retry
+ metrics.mark_upload_failed('Error 1')
+ assert metrics.needs_retry
+
+ # After 2 failures, still needs retry
+ metrics.mark_upload_failed('Error 2')
+ assert metrics.needs_retry
+
+ # After 3 failures, no more retries
+ metrics.mark_upload_failed('Error 3')
+ assert not metrics.needs_retry
+
+ # Reset and test successful upload
+ metrics2 = TelemetryMetrics(metrics_data={'test': 'data'}) # type: ignore[unreachable]
+ metrics2.mark_uploaded()
+ # After upload, needs_retry should be False since is_uploaded is True
+
+ def test_upload_failure_clears_uploaded_at(self):
+ """Test that upload failure clears uploaded_at timestamp."""
+ metrics = TelemetryMetrics(metrics_data={'test': 'data'})
+
+ # Mark as uploaded first
+ metrics.mark_uploaded()
+ assert metrics.uploaded_at is not None
+
+ # Mark as failed - should clear uploaded_at
+ metrics.mark_upload_failed('Network error')
+ assert metrics.uploaded_at is None
+
+ def test_successful_upload_clears_error(self):
+ """Test that successful upload clears error message."""
+ metrics = TelemetryMetrics(metrics_data={'test': 'data'})
+
+ # Mark as failed first
+ metrics.mark_upload_failed('Network error')
+ assert metrics.last_upload_error == 'Network error'
+
+ # Mark as uploaded - should clear error
+ metrics.mark_uploaded()
+ assert metrics.last_upload_error is None
+
+ def test_uuid_generation(self):
+ """Test that each instance gets a unique UUID."""
+ metrics1 = TelemetryMetrics(metrics_data={'test': 'data1'})
+ metrics2 = TelemetryMetrics(metrics_data={'test': 'data2'})
+
+ assert metrics1.id != metrics2.id
+ assert isinstance(uuid.UUID(metrics1.id), uuid.UUID)
+ assert isinstance(uuid.UUID(metrics2.id), uuid.UUID)
+
+ def test_repr(self):
+ """Test string representation."""
+ metrics = TelemetryMetrics(metrics_data={'test': 'data'})
+ repr_str = repr(metrics)
+
+ assert 'TelemetryMetrics' in repr_str
+ assert metrics.id in repr_str
+ assert str(metrics.collected_at) in repr_str
+ assert 'uploaded=False' in repr_str
+
+ # Test after upload
+ metrics.mark_uploaded()
+ repr_str = repr(metrics)
+ assert 'uploaded=True' in repr_str
+
+ def test_complex_metrics_data(self):
+ """Test with complex nested metrics data."""
+ complex_data = {
+ 'system': {
+ 'cpu': {'usage': 75.5, 'cores': 8},
+ 'memory': {'total': 16384, 'used': 8192},
+ },
+ 'sessions': [
+ {'id': 'session1', 'duration': 3600},
+ {'id': 'session2', 'duration': 1800},
+ ],
+ 'timestamp': '2023-01-01T12:00:00Z',
+ }
+
+ metrics = TelemetryMetrics(metrics_data=complex_data)
+
+ assert metrics.metrics_data == complex_data
+
+ def test_empty_metrics_data(self):
+ """Test with empty metrics data."""
+ metrics = TelemetryMetrics(metrics_data={})
+
+ assert metrics.metrics_data == {}
+
+ def test_config_class(self):
+ """Test that Config class is properly set."""
+ assert hasattr(TelemetryMetrics, 'Config')
+ assert TelemetryMetrics.Config.from_attributes is True
diff --git a/enterprise/tests/unit/test_saas_secrets_store.py b/enterprise/tests/unit/test_saas_secrets_store.py
index 4982a1cec9..d3bc223408 100644
--- a/enterprise/tests/unit/test_saas_secrets_store.py
+++ b/enterprise/tests/unit/test_saas_secrets_store.py
@@ -5,11 +5,11 @@ from unittest.mock import MagicMock
import pytest
from pydantic import SecretStr
from storage.saas_secrets_store import SaasSecretsStore
-from storage.stored_user_secrets import StoredUserSecrets
+from storage.stored_custom_secrets import StoredCustomSecrets
from openhands.core.config.openhands_config import OpenHandsConfig
from openhands.integrations.provider import CustomSecret
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
@pytest.fixture
@@ -27,8 +27,8 @@ def secrets_store(session_maker, mock_config):
class TestSaasSecretsStore:
@pytest.mark.asyncio
async def test_store_and_load(self, secrets_store):
- # Create a UserSecrets object with some test data
- user_secrets = UserSecrets(
+ # Create a Secrets object with some test data
+ user_secrets = Secrets(
custom_secrets=MappingProxyType(
{
'api_token': CustomSecret.from_value(
@@ -60,8 +60,8 @@ class TestSaasSecretsStore:
@pytest.mark.asyncio
async def test_encryption_decryption(self, secrets_store):
- # Create a UserSecrets object with sensitive data
- user_secrets = UserSecrets(
+ # Create a Secrets object with sensitive data
+ user_secrets = Secrets(
custom_secrets=MappingProxyType(
{
'api_token': CustomSecret.from_value(
@@ -87,8 +87,8 @@ class TestSaasSecretsStore:
# Verify the data is encrypted in the database
with secrets_store.session_maker() as session:
stored = (
- session.query(StoredUserSecrets)
- .filter(StoredUserSecrets.keycloak_user_id == 'user-id')
+ session.query(StoredCustomSecrets)
+ .filter(StoredCustomSecrets.keycloak_user_id == 'user-id')
.first()
)
@@ -154,7 +154,7 @@ class TestSaasSecretsStore:
@pytest.mark.asyncio
async def test_update_existing_secrets(self, secrets_store):
# Create and store initial secrets
- initial_secrets = UserSecrets(
+ initial_secrets = Secrets(
custom_secrets=MappingProxyType(
{
'api_token': CustomSecret.from_value(
@@ -169,7 +169,7 @@ class TestSaasSecretsStore:
await secrets_store.store(initial_secrets)
# Create and store updated secrets
- updated_secrets = UserSecrets(
+ updated_secrets = Secrets(
custom_secrets=MappingProxyType(
{
'api_token': CustomSecret.from_value(
diff --git a/enterprise/tests/unit/test_slack_integration.py b/enterprise/tests/unit/test_slack_integration.py
index 3f2d51ac46..255b730459 100644
--- a/enterprise/tests/unit/test_slack_integration.py
+++ b/enterprise/tests/unit/test_slack_integration.py
@@ -14,7 +14,7 @@ def slack_manager():
@pytest.mark.parametrize(
'message,expected',
[
- ('All-Hands-AI/Openhands', 'All-Hands-AI/Openhands'),
+ ('OpenHands/Openhands', 'OpenHands/Openhands'),
('deploy repo', 'deploy'),
('use hello world', None),
],
diff --git a/enterprise/tests/unit/test_utils.py b/enterprise/tests/unit/test_utils.py
index 8800c7b5a2..c523e89138 100644
--- a/enterprise/tests/unit/test_utils.py
+++ b/enterprise/tests/unit/test_utils.py
@@ -74,8 +74,8 @@ def test_infer_repo_from_message():
# Single GitHub URLs
('Clone https://github.com/demo123/demo1.git', ['demo123/demo1']),
(
- 'Check out https://github.com/All-Hands-AI/OpenHands.git for details',
- ['All-Hands-AI/OpenHands'],
+ 'Check out https://github.com/OpenHands/OpenHands.git for details',
+ ['OpenHands/OpenHands'],
),
('Visit https://github.com/microsoft/vscode', ['microsoft/vscode']),
# Single GitLab URLs
@@ -92,7 +92,7 @@ def test_infer_repo_from_message():
['atlassian/atlassian-connect-express'],
),
# Single direct owner/repo mentions
- ('Please deploy the All-Hands-AI/OpenHands repo', ['All-Hands-AI/OpenHands']),
+ ('Please deploy the OpenHands/OpenHands repo', ['OpenHands/OpenHands']),
('I need help with the microsoft/vscode repository', ['microsoft/vscode']),
('Check facebook/react for examples', ['facebook/react']),
('The torvalds/linux kernel', ['torvalds/linux']),
diff --git a/evaluation/README.md b/evaluation/README.md
index e15ea68f60..694623f63d 100644
--- a/evaluation/README.md
+++ b/evaluation/README.md
@@ -6,14 +6,14 @@ This folder contains code and resources to run experiments and evaluations.
### Setup
-Before starting evaluation, follow the instructions [here](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md) to setup your local development environment and LLM.
+Before starting evaluation, follow the instructions [here](https://github.com/OpenHands/OpenHands/blob/main/Development.md) to setup your local development environment and LLM.
Once you are done with setup, you can follow the benchmark-specific instructions in each subdirectory of the [evaluation directory](#supported-benchmarks).
Generally these will involve running `run_infer.py` to perform inference with the agents.
### Implementing and Evaluating an Agent
-To add an agent to OpenHands, you will need to implement it in the [agenthub directory](https://github.com/All-Hands-AI/OpenHands/tree/main/openhands/agenthub). There is a README there with more information.
+To add an agent to OpenHands, you will need to implement it in the [agenthub directory](https://github.com/OpenHands/OpenHands/tree/main/openhands/agenthub). There is a README there with more information.
To evaluate an agent, you can provide the agent's name to the `run_infer.py` program.
diff --git a/evaluation/benchmarks/commit0/run_infer.py b/evaluation/benchmarks/commit0/run_infer.py
index fb125498c3..bf667dacf3 100644
--- a/evaluation/benchmarks/commit0/run_infer.py
+++ b/evaluation/benchmarks/commit0/run_infer.py
@@ -109,7 +109,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
diff --git a/evaluation/benchmarks/ml_bench/run_analysis.py b/evaluation/benchmarks/ml_bench/run_analysis.py
index 8baddcffb1..4b7f743463 100644
--- a/evaluation/benchmarks/ml_bench/run_analysis.py
+++ b/evaluation/benchmarks/ml_bench/run_analysis.py
@@ -124,7 +124,7 @@ if __name__ == '__main__':
)
args, _ = parser.parse_known_args()
- # Check https://github.com/All-Hands-AI/OpenHands/blob/main/evaluation/swe_bench/README.md#configure-openhands-and-your-llm
+ # Check https://github.com/OpenHands/OpenHands/blob/main/evaluation/swe_bench/README.md#configure-openhands-and-your-llm
# for details of how to set `llm_config`
if args.llm_config:
specified_llm_config = get_llm_config_arg(args.llm_config)
diff --git a/evaluation/benchmarks/multi_swe_bench/SWE-Gym.md b/evaluation/benchmarks/multi_swe_bench/SWE-Gym.md
index 1b136e9d3d..5ce07e5596 100644
--- a/evaluation/benchmarks/multi_swe_bench/SWE-Gym.md
+++ b/evaluation/benchmarks/multi_swe_bench/SWE-Gym.md
@@ -36,8 +36,8 @@ We use it to train strong LM agents that achieve state-of-the-art open results o
The process of running SWE-Gym is very similar to how you'd run SWE-Bench evaluation.
-1. First, clone OpenHands repo `git clone https://github.com/All-Hands-AI/OpenHands.git`
-2. Then setup the repo following [Development.md](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md)
+1. First, clone OpenHands repo `git clone https://github.com/OpenHands/OpenHands.git`
+2. Then setup the repo following [Development.md](https://github.com/OpenHands/OpenHands/blob/main/Development.md)
3. Then you can simply serve your own model as an OpenAI compatible endpoint, put those info in config.toml. You can do this by following instruction [here](../../README.md#setup).
4. And then simply do the following to sample for 16x parallelism:
diff --git a/evaluation/benchmarks/multi_swe_bench/eval_infer.py b/evaluation/benchmarks/multi_swe_bench/eval_infer.py
index 22fdcc764b..061e4a909e 100644
--- a/evaluation/benchmarks/multi_swe_bench/eval_infer.py
+++ b/evaluation/benchmarks/multi_swe_bench/eval_infer.py
@@ -80,7 +80,7 @@ def get_config(metadata: EvalMetadata, instance: pd.Series) -> OpenHandsConfig:
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
sandbox_config.base_container_image = base_container_image
diff --git a/evaluation/benchmarks/multi_swe_bench/run_infer.py b/evaluation/benchmarks/multi_swe_bench/run_infer.py
index ef6bf7240b..d42879d7f8 100644
--- a/evaluation/benchmarks/multi_swe_bench/run_infer.py
+++ b/evaluation/benchmarks/multi_swe_bench/run_infer.py
@@ -316,7 +316,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
diff --git a/evaluation/benchmarks/multi_swe_bench/scripts/setup/prepare_swe_utils.sh b/evaluation/benchmarks/multi_swe_bench/scripts/setup/prepare_swe_utils.sh
index 2d35c6f218..dadc6e24dd 100644
--- a/evaluation/benchmarks/multi_swe_bench/scripts/setup/prepare_swe_utils.sh
+++ b/evaluation/benchmarks/multi_swe_bench/scripts/setup/prepare_swe_utils.sh
@@ -6,7 +6,7 @@ mkdir -p $EVAL_WORKSPACE
# 1. Prepare REPO
echo "==== Prepare SWE-bench repo ===="
-OH_SWE_BENCH_REPO_PATH="https://github.com/All-Hands-AI/SWE-bench.git"
+OH_SWE_BENCH_REPO_PATH="https://github.com/OpenHands/SWE-bench.git"
OH_SWE_BENCH_REPO_BRANCH="eval"
git clone -b $OH_SWE_BENCH_REPO_BRANCH $OH_SWE_BENCH_REPO_PATH $EVAL_WORKSPACE/OH-SWE-bench
diff --git a/evaluation/benchmarks/nocode_bench/run_infer_nc.py b/evaluation/benchmarks/nocode_bench/run_infer_nc.py
index 3c3d40bdfc..e102d5333a 100644
--- a/evaluation/benchmarks/nocode_bench/run_infer_nc.py
+++ b/evaluation/benchmarks/nocode_bench/run_infer_nc.py
@@ -161,7 +161,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
diff --git a/evaluation/benchmarks/nocode_bench/scripts/eval/verify_costs.py b/evaluation/benchmarks/nocode_bench/scripts/eval/verify_costs.py
index 628ecb4fb5..ae6ebc4801 100644
--- a/evaluation/benchmarks/nocode_bench/scripts/eval/verify_costs.py
+++ b/evaluation/benchmarks/nocode_bench/scripts/eval/verify_costs.py
@@ -10,7 +10,7 @@ def verify_instance_costs(row: pd.Series) -> float:
Verifies that the accumulated_cost matches the sum of individual costs in metrics.
Also checks for duplicate consecutive costs which might indicate buggy counting.
If the consecutive costs are identical, the file is affected by this bug:
- https://github.com/All-Hands-AI/OpenHands/issues/5383
+ https://github.com/OpenHands/OpenHands/issues/5383
Args:
row: DataFrame row containing instance data with metrics
diff --git a/evaluation/benchmarks/swe_bench/SWE-Gym.md b/evaluation/benchmarks/swe_bench/SWE-Gym.md
index 613d912022..e0f94caaf5 100644
--- a/evaluation/benchmarks/swe_bench/SWE-Gym.md
+++ b/evaluation/benchmarks/swe_bench/SWE-Gym.md
@@ -34,8 +34,8 @@ We use it to train strong LM agents that achieve state-of-the-art open results o
The process of running SWE-Gym is very similar to how you'd run SWE-Bench evaluation.
-1. First, clone OpenHands repo `git clone https://github.com/All-Hands-AI/OpenHands.git`
-2. Then setup the repo following [Development.md](https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md)
+1. First, clone OpenHands repo `git clone https://github.com/OpenHands/OpenHands.git`
+2. Then setup the repo following [Development.md](https://github.com/OpenHands/OpenHands/blob/main/Development.md)
3. Then you can simply serve your own model as an OpenAI compatible endpoint, put those info in config.toml. You can do this by following instruction [here](../../README.md#setup).
4. And then simply do the following to sample for 16x parallelism:
diff --git a/evaluation/benchmarks/swe_bench/eval_infer.py b/evaluation/benchmarks/swe_bench/eval_infer.py
index 46f3629be8..132d1e1c2d 100644
--- a/evaluation/benchmarks/swe_bench/eval_infer.py
+++ b/evaluation/benchmarks/swe_bench/eval_infer.py
@@ -76,7 +76,7 @@ def get_config(metadata: EvalMetadata, instance: pd.Series) -> OpenHandsConfig:
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
sandbox_config.base_container_image = base_container_image
diff --git a/evaluation/benchmarks/swe_bench/run_infer.py b/evaluation/benchmarks/swe_bench/run_infer.py
index 2b86cc3baa..f7290bc52d 100644
--- a/evaluation/benchmarks/swe_bench/run_infer.py
+++ b/evaluation/benchmarks/swe_bench/run_infer.py
@@ -217,7 +217,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
diff --git a/evaluation/benchmarks/swe_bench/run_localize.py b/evaluation/benchmarks/swe_bench/run_localize.py
index 2f7f09912a..a1d169860d 100644
--- a/evaluation/benchmarks/swe_bench/run_localize.py
+++ b/evaluation/benchmarks/swe_bench/run_localize.py
@@ -180,7 +180,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
diff --git a/evaluation/benchmarks/swe_bench/scripts/eval/verify_costs.py b/evaluation/benchmarks/swe_bench/scripts/eval/verify_costs.py
index 4d7ac30895..6193df5577 100644
--- a/evaluation/benchmarks/swe_bench/scripts/eval/verify_costs.py
+++ b/evaluation/benchmarks/swe_bench/scripts/eval/verify_costs.py
@@ -9,7 +9,7 @@ def verify_instance_costs(row: pd.Series) -> float:
"""Verifies that the accumulated_cost matches the sum of individual costs in metrics.
Also checks for duplicate consecutive costs which might indicate buggy counting.
If the consecutive costs are identical, the file is affected by this bug:
- https://github.com/All-Hands-AI/OpenHands/issues/5383
+ https://github.com/OpenHands/OpenHands/issues/5383
Args:
row: DataFrame row containing instance data with metrics
diff --git a/evaluation/benchmarks/swe_bench/scripts/setup/prepare_swe_utils.sh b/evaluation/benchmarks/swe_bench/scripts/setup/prepare_swe_utils.sh
index f41c45e3f6..0ca7434227 100755
--- a/evaluation/benchmarks/swe_bench/scripts/setup/prepare_swe_utils.sh
+++ b/evaluation/benchmarks/swe_bench/scripts/setup/prepare_swe_utils.sh
@@ -6,7 +6,7 @@ mkdir -p $EVAL_WORKSPACE
# 1. Prepare REPO
echo "==== Prepare SWE-bench repo ===="
-OH_SWE_BENCH_REPO_PATH="https://github.com/All-Hands-AI/SWE-bench.git"
+OH_SWE_BENCH_REPO_PATH="https://github.com/OpenHands/SWE-bench.git"
OH_SWE_BENCH_REPO_BRANCH="eval"
git clone -b $OH_SWE_BENCH_REPO_BRANCH $OH_SWE_BENCH_REPO_PATH $EVAL_WORKSPACE/OH-SWE-bench
diff --git a/evaluation/benchmarks/swe_perf/run_infer.py b/evaluation/benchmarks/swe_perf/run_infer.py
index 22b9912de6..7ee15a640f 100644
--- a/evaluation/benchmarks/swe_perf/run_infer.py
+++ b/evaluation/benchmarks/swe_perf/run_infer.py
@@ -255,7 +255,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
diff --git a/evaluation/benchmarks/testgeneval/README.md b/evaluation/benchmarks/testgeneval/README.md
index 6535348579..2055546c29 100644
--- a/evaluation/benchmarks/testgeneval/README.md
+++ b/evaluation/benchmarks/testgeneval/README.md
@@ -74,7 +74,7 @@ To contribute your evaluation results:
## Additional Resources
- [TestGenEval Paper](https://arxiv.org/abs/2410.00752)
-- [OpenHands Documentation](https://github.com/All-Hands-AI/OpenHands)
+- [OpenHands Documentation](https://github.com/OpenHands/OpenHands)
- [HuggingFace Datasets](https://huggingface.co/datasets)
-For any questions or issues, please open an issue in the [OpenHands repository](https://github.com/All-Hands-AI/OpenHands/issues).
+For any questions or issues, please open an issue in the [OpenHands repository](https://github.com/OpenHands/OpenHands/issues).
diff --git a/evaluation/benchmarks/testgeneval/run_infer.py b/evaluation/benchmarks/testgeneval/run_infer.py
index c8171cca94..5809a26469 100644
--- a/evaluation/benchmarks/testgeneval/run_infer.py
+++ b/evaluation/benchmarks/testgeneval/run_infer.py
@@ -124,7 +124,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = SandboxConfig(
diff --git a/evaluation/benchmarks/testgeneval/scripts/setup/prepare_swe_utils.sh b/evaluation/benchmarks/testgeneval/scripts/setup/prepare_swe_utils.sh
index 3b782a50c3..528224fe56 100755
--- a/evaluation/benchmarks/testgeneval/scripts/setup/prepare_swe_utils.sh
+++ b/evaluation/benchmarks/testgeneval/scripts/setup/prepare_swe_utils.sh
@@ -6,7 +6,7 @@ mkdir -p $EVAL_WORKSPACE
# 1. Prepare REPO
echo "==== Prepare SWE-bench repo ===="
-OH_SWE_BENCH_REPO_PATH="https://github.com/All-Hands-AI/SWE-bench.git"
+OH_SWE_BENCH_REPO_PATH="https://github.com/OpenHands/SWE-bench.git"
OH_SWE_BENCH_REPO_BRANCH="eval"
git clone -b $OH_SWE_BENCH_REPO_BRANCH $OH_SWE_BENCH_REPO_PATH $EVAL_WORKSPACE/OH-SWE-bench
diff --git a/evaluation/benchmarks/visual_swe_bench/run_infer.py b/evaluation/benchmarks/visual_swe_bench/run_infer.py
index ca096d9e19..6d9f3d6811 100644
--- a/evaluation/benchmarks/visual_swe_bench/run_infer.py
+++ b/evaluation/benchmarks/visual_swe_bench/run_infer.py
@@ -147,7 +147,7 @@ def get_config(
logger.info(
f'Using instance container image: {base_container_image}. '
f'Please make sure this image exists. '
- f'Submit an issue on https://github.com/All-Hands-AI/OpenHands if you run into any issues.'
+ f'Submit an issue on https://github.com/OpenHands/OpenHands if you run into any issues.'
)
sandbox_config = get_default_sandbox_config_for_eval()
diff --git a/evaluation/integration_tests/README.md b/evaluation/integration_tests/README.md
index ce98b2d00a..afe48d70f4 100644
--- a/evaluation/integration_tests/README.md
+++ b/evaluation/integration_tests/README.md
@@ -1,8 +1,8 @@
# Integration tests
-This directory implements integration tests that [was running in CI](https://github.com/All-Hands-AI/OpenHands/tree/23d3becf1d6f5d07e592f7345750c314a826b4e9/tests/integration).
+This directory implements integration tests that [was running in CI](https://github.com/OpenHands/OpenHands/tree/23d3becf1d6f5d07e592f7345750c314a826b4e9/tests/integration).
-[PR 3985](https://github.com/All-Hands-AI/OpenHands/pull/3985) introduce LLM-based editing, which requires access to LLM to perform edit. Hence, we remove integration tests from CI and intend to run them as nightly evaluation to ensure the quality of OpenHands softwares.
+[PR 3985](https://github.com/OpenHands/OpenHands/pull/3985) introduce LLM-based editing, which requires access to LLM to perform edit. Hence, we remove integration tests from CI and intend to run them as nightly evaluation to ensure the quality of OpenHands softwares.
## To add new tests
diff --git a/evaluation/integration_tests/tests/t06_github_pr_browsing.py b/evaluation/integration_tests/tests/t06_github_pr_browsing.py
index 3c25e0300a..b85e868401 100644
--- a/evaluation/integration_tests/tests/t06_github_pr_browsing.py
+++ b/evaluation/integration_tests/tests/t06_github_pr_browsing.py
@@ -6,7 +6,7 @@ from openhands.runtime.base import Runtime
class Test(BaseIntegrationTest):
- INSTRUCTION = 'Look at https://github.com/All-Hands-AI/OpenHands/pull/8, and tell me what is happening there and what did @asadm suggest.'
+ INSTRUCTION = 'Look at https://github.com/OpenHands/OpenHands/pull/8, and tell me what is happening there and what did @asadm suggest.'
@classmethod
def initialize_runtime(cls, runtime: Runtime) -> None:
diff --git a/frontend/README.md b/frontend/README.md
index a6ebbd1a6c..f11e38bca7 100644
--- a/frontend/README.md
+++ b/frontend/README.md
@@ -27,7 +27,7 @@ This is the frontend of the OpenHands project. It is a React application that pr
```sh
# Clone the repository
-git clone https://github.com/All-Hands-AI/OpenHands.git
+git clone https://github.com/OpenHands/OpenHands.git
# Change the directory to the frontend
cd OpenHands/frontend
@@ -163,7 +163,7 @@ npm run test:coverage
1. **Component Testing**
- Test components in isolation
- - Use our custom [`renderWithProviders()`](https://github.com/All-Hands-AI/OpenHands/blob/ce26f1c6d3feec3eedf36f823dee732b5a61e517/frontend/test-utils.tsx#L56-L85) that wraps the components we want to test in our providers. It is especially useful for components that use Redux
+ - Use our custom [`renderWithProviders()`](https://github.com/OpenHands/OpenHands/blob/ce26f1c6d3feec3eedf36f823dee732b5a61e517/frontend/test-utils.tsx#L56-L85) that wraps the components we want to test in our providers. It is especially useful for components that use Redux
- Use `render()` from React Testing Library to render components
- Prefer querying elements by role, label, or test ID over CSS selectors
- Test both rendering and interaction scenarios
@@ -223,12 +223,12 @@ describe("ComponentName", () => {
For real-world examples of testing, check out these test files:
1. **Chat Input Component Test**:
- [`__tests__/components/chat/chat-input.test.tsx`](https://github.com/All-Hands-AI/OpenHands/blob/main/frontend/__tests__/components/chat/chat-input.test.tsx)
+ [`__tests__/components/chat/chat-input.test.tsx`](https://github.com/OpenHands/OpenHands/blob/main/frontend/__tests__/components/chat/chat-input.test.tsx)
- Demonstrates comprehensive testing of a complex input component
- Covers various scenarios like submission, disabled states, and user interactions
2. **File Explorer Component Test**:
- [`__tests__/components/file-explorer/file-explorer.test.tsx`](https://github.com/All-Hands-AI/OpenHands/blob/main/frontend/__tests__/components/file-explorer/file-explorer.test.tsx)
+ [`__tests__/components/file-explorer/file-explorer.test.tsx`](https://github.com/OpenHands/OpenHands/blob/main/frontend/__tests__/components/file-explorer/file-explorer.test.tsx)
- Shows testing of a more complex component with multiple interactions
- Illustrates testing of nested components and state management
diff --git a/frontend/__tests__/components/features/home/repo-connector.test.tsx b/frontend/__tests__/components/features/home/repo-connector.test.tsx
index 7948c6d112..8e186257a0 100644
--- a/frontend/__tests__/components/features/home/repo-connector.test.tsx
+++ b/frontend/__tests__/components/features/home/repo-connector.test.tsx
@@ -57,7 +57,7 @@ const MOCK_RESPOSITORIES: GitRepository[] = [
},
{
id: "2",
- full_name: "All-Hands-AI/OpenHands",
+ full_name: "OpenHands/OpenHands",
git_provider: "github",
is_public: true,
main_branch: "main",
@@ -114,7 +114,7 @@ describe("RepoConnector", () => {
// Wait for the options to be loaded and displayed
await waitFor(() => {
expect(screen.getByText("rbren/polaris")).toBeInTheDocument();
- expect(screen.getByText("All-Hands-AI/OpenHands")).toBeInTheDocument();
+ expect(screen.getByText("OpenHands/OpenHands")).toBeInTheDocument();
});
});
diff --git a/frontend/__tests__/parse-pr-url.test.ts b/frontend/__tests__/parse-pr-url.test.ts
index fc4ed69ee8..477a40c255 100644
--- a/frontend/__tests__/parse-pr-url.test.ts
+++ b/frontend/__tests__/parse-pr-url.test.ts
@@ -118,7 +118,7 @@ describe("parse-pr-url", () => {
it("should handle typical microagent finish messages", () => {
const text = `
I have successfully created a pull request with the requested changes.
- You can view the PR here: https://github.com/All-Hands-AI/OpenHands/pull/1234
+ You can view the PR here: https://github.com/OpenHands/OpenHands/pull/1234
The changes include:
- Updated the component
@@ -126,7 +126,7 @@ describe("parse-pr-url", () => {
- Fixed the issue
`;
const url = getFirstPRUrl(text);
- expect(url).toBe("https://github.com/All-Hands-AI/OpenHands/pull/1234");
+ expect(url).toBe("https://github.com/OpenHands/OpenHands/pull/1234");
});
it("should handle messages with PR URLs in the middle", () => {
diff --git a/frontend/src/components/features/chat/chat-interface.tsx b/frontend/src/components/features/chat/chat-interface.tsx
index 350b2e6e71..a6dacc9cda 100644
--- a/frontend/src/components/features/chat/chat-interface.tsx
+++ b/frontend/src/components/features/chat/chat-interface.tsx
@@ -47,6 +47,7 @@ import {
isConversationStateUpdateEvent,
} from "#/types/v1/type-guards";
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
+import { useTaskPolling } from "#/hooks/query/use-task-polling";
function getEntryPoint(
hasRepository: boolean | null,
@@ -62,6 +63,7 @@ export function ChatInterface() {
const { data: conversation } = useActiveConversation();
const { errorMessage } = useErrorMessageStore();
const { isLoadingMessages } = useWsClient();
+ const { isTask } = useTaskPolling();
const { send } = useSendMessage();
const storeEvents = useEventStore((state) => state.events);
const { setOptimisticUserMessage, getOptimisticUserMessage } =
@@ -220,7 +222,7 @@ export function ChatInterface() {
onScroll={(e) => onChatBodyScroll(e.currentTarget)}
className="custom-scrollbar-always flex flex-col grow overflow-y-auto overflow-x-hidden px-4 pt-4 gap-2 fast-smooth-scroll"
>
- {isLoadingMessages && !isV1Conversation && (
+ {isLoadingMessages && !isV1Conversation && !isTask && (
diff --git a/frontend/src/contexts/conversation-websocket-context.tsx b/frontend/src/contexts/conversation-websocket-context.tsx
index b04f7aba9b..3de57ad8d0 100644
--- a/frontend/src/contexts/conversation-websocket-context.tsx
+++ b/frontend/src/contexts/conversation-websocket-context.tsx
@@ -68,10 +68,15 @@ export function ConversationWebSocketProvider({
const { appendInput, appendOutput } = useCommandStore();
// Build WebSocket URL from props
- const wsUrl = useMemo(
- () => buildWebSocketUrl(conversationId, conversationUrl),
- [conversationId, conversationUrl],
- );
+ // Only build URL if we have both conversationId and conversationUrl
+ // This prevents connection attempts during task polling phase
+ const wsUrl = useMemo(() => {
+ // Don't attempt connection if we're missing required data
+ if (!conversationId || !conversationUrl) {
+ return null;
+ }
+ return buildWebSocketUrl(conversationId, conversationUrl);
+ }, [conversationId, conversationUrl]);
// Reset hasConnected flag when conversation changes
useEffect(() => {
@@ -185,9 +190,10 @@ export function ConversationWebSocketProvider({
};
}, [handleMessage, setErrorMessage, removeErrorMessage, sessionApiKey]);
- // Build a fallback URL to prevent hook from connecting if conversation data isn't ready
- const websocketUrl = wsUrl || "ws://localhost/placeholder";
- const { socket } = useWebSocket(websocketUrl, websocketOptions);
+ // Only attempt WebSocket connection when we have a valid URL
+ // This prevents connection attempts during task polling phase
+ const websocketUrl = wsUrl;
+ const { socket } = useWebSocket(websocketUrl || "", websocketOptions);
// V1 send message function via WebSocket
const sendMessage = useCallback(
@@ -212,7 +218,7 @@ export function ConversationWebSocketProvider({
);
useEffect(() => {
- // Only process socket updates if we have a valid URL
+ // Only process socket updates if we have a valid URL and socket
if (socket && wsUrl) {
// Update state based on socket readyState
const updateState = () => {
diff --git a/frontend/src/hooks/query/use-paginated-conversations.ts b/frontend/src/hooks/query/use-paginated-conversations.ts
index 8d4a7a693a..5dfb41390a 100644
--- a/frontend/src/hooks/query/use-paginated-conversations.ts
+++ b/frontend/src/hooks/query/use-paginated-conversations.ts
@@ -1,14 +1,29 @@
-import { useInfiniteQuery } from "@tanstack/react-query";
+import { useInfiniteQuery, useQueryClient } from "@tanstack/react-query";
import ConversationService from "#/api/conversation-service/conversation-service.api";
import { useIsAuthed } from "./use-is-authed";
export const usePaginatedConversations = (limit: number = 20) => {
const { data: userIsAuthenticated } = useIsAuthed();
+ const queryClient = useQueryClient();
return useInfiniteQuery({
queryKey: ["user", "conversations", "paginated", limit],
- queryFn: ({ pageParam }) =>
- ConversationService.getUserConversations(limit, pageParam),
+ queryFn: async ({ pageParam }) => {
+ const result = await ConversationService.getUserConversations(
+ limit,
+ pageParam,
+ );
+
+ // Optimistically populate individual conversation caches
+ result.results.forEach((conversation) => {
+ queryClient.setQueryData(
+ ["user", "conversation", conversation.conversation_id],
+ conversation,
+ );
+ });
+
+ return result;
+ },
enabled: !!userIsAuthenticated,
getNextPageParam: (lastPage) => lastPage.next_page_id,
initialPageParam: undefined as string | undefined,
diff --git a/frontend/src/hooks/use-websocket.ts b/frontend/src/hooks/use-websocket.ts
index 34f46205fd..96a160838e 100644
--- a/frontend/src/hooks/use-websocket.ts
+++ b/frontend/src/hooks/use-websocket.ts
@@ -123,7 +123,10 @@ export const useWebSocket = (
shouldReconnectRef.current = true;
attemptCountRef.current = 0;
- connectWebSocket();
+ // Only attempt connection if we have a valid URL
+ if (url && url.trim() !== "") {
+ connectWebSocket();
+ }
return () => {
// Disable reconnection on unmount to prevent reconnection attempts
diff --git a/frontend/src/stores/browser-store.ts b/frontend/src/stores/browser-store.ts
index cb28f3aa67..a627702853 100644
--- a/frontend/src/stores/browser-store.ts
+++ b/frontend/src/stores/browser-store.ts
@@ -14,7 +14,7 @@ interface BrowserStore extends BrowserState {
}
const initialState: BrowserState = {
- url: "https://github.com/All-Hands-AI/OpenHands",
+ url: "https://github.com/OpenHands/OpenHands",
screenshotSrc: "",
};
diff --git a/microagents/README.md b/microagents/README.md
index 33a4193a08..97e920e535 100644
--- a/microagents/README.md
+++ b/microagents/README.md
@@ -66,7 +66,7 @@ Key characteristics:
- **Reusable**: Knowledge can be applied across multiple projects
- **Versioned**: Support multiple versions of tools/frameworks
-You can see an example of a knowledge-based agent in [OpenHands's github microagent](https://github.com/All-Hands-AI/OpenHands/tree/main/microagents/github.md).
+You can see an example of a knowledge-based agent in [OpenHands's github microagent](https://github.com/OpenHands/OpenHands/tree/main/microagents/github.md).
### 2. Repository Agents
@@ -82,7 +82,7 @@ Key features:
- **Always active**: Automatically loaded for the repository
- **Locally maintained**: Updated with the project
-You can see an example of a repo agent in [the agent for the OpenHands repo itself](https://github.com/All-Hands-AI/OpenHands/blob/main/.openhands/microagents/repo.md).
+You can see an example of a repo agent in [the agent for the OpenHands repo itself](https://github.com/OpenHands/OpenHands/blob/main/.openhands/microagents/repo.md).
## Contributing
diff --git a/microagents/add_agent.md b/microagents/add_agent.md
index 44316e1b5e..e3fccd19eb 100644
--- a/microagents/add_agent.md
+++ b/microagents/add_agent.md
@@ -37,4 +37,4 @@ When creating a new microagent:
For detailed information, see:
- [Microagents Overview](https://docs.all-hands.dev/usage/prompting/microagents-overview)
-- [Example GitHub Microagent](https://github.com/All-Hands-AI/OpenHands/blob/main/microagents/github.md)
+- [Example GitHub Microagent](https://github.com/OpenHands/OpenHands/blob/main/microagents/github.md)
diff --git a/microagents/bitbucket.md b/microagents/bitbucket.md
index 3e94bc719e..93f8147b67 100644
--- a/microagents/bitbucket.md
+++ b/microagents/bitbucket.md
@@ -5,6 +5,7 @@ version: 1.0.0
agent: CodeActAgent
triggers:
- bitbucket
+- git
---
You have access to an environment variable, `BITBUCKET_TOKEN`, which allows you to interact with
diff --git a/openhands-cli/README.md b/openhands-cli/README.md
index 740a50b99c..be936f6223 100644
--- a/openhands-cli/README.md
+++ b/openhands-cli/README.md
@@ -1,8 +1,8 @@
# OpenHands V1 CLI
-A **lightweight, modern CLI** to interact with the OpenHands agent (powered by [agent-sdk](https://github.com/All-Hands-AI/agent-sdk)).
+A **lightweight, modern CLI** to interact with the OpenHands agent (powered by [agent-sdk](https://github.com/OpenHands/agent-sdk)).
-The [OpenHands V0 CLI (legacy)](https://github.com/All-Hands-AI/OpenHands/tree/main/openhands/cli) is being deprecated.
+The [OpenHands V0 CLI (legacy)](https://github.com/OpenHands/OpenHands/tree/main/openhands/cli) is being deprecated.
---
diff --git a/openhands-cli/openhands_cli/gui_launcher.py b/openhands-cli/openhands_cli/gui_launcher.py
index d2c149c9d5..554817379f 100644
--- a/openhands-cli/openhands_cli/gui_launcher.py
+++ b/openhands-cli/openhands_cli/gui_launcher.py
@@ -104,8 +104,8 @@ def launch_gui_server(mount_cwd: bool = False, gpu: bool = False) -> None:
# Get the current version for the Docker image
version = get_openhands_version()
- runtime_image = f'docker.all-hands.dev/all-hands-ai/runtime:{version}-nikolaik'
- app_image = f'docker.all-hands.dev/all-hands-ai/openhands:{version}'
+ runtime_image = f'docker.all-hands.dev/openhands/runtime:{version}-nikolaik'
+ app_image = f'docker.all-hands.dev/openhands/openhands:{version}'
print_formatted_text(HTML('Pulling required Docker images...'))
diff --git a/openhands-cli/openhands_cli/pt_style.py b/openhands-cli/openhands_cli/pt_style.py
index 24fab6a9f0..3b4ade6c9a 100644
--- a/openhands-cli/openhands_cli/pt_style.py
+++ b/openhands-cli/openhands_cli/pt_style.py
@@ -20,7 +20,7 @@ def get_cli_style() -> BaseStyle:
'prompt': f'{COLOR_GOLD} bold',
# Ensure good contrast for fuzzy matches on the selected completion row
# across terminals/themes (e.g., Ubuntu GNOME, Alacritty, Kitty).
- # See https://github.com/All-Hands-AI/OpenHands/issues/10330
+ # See https://github.com/OpenHands/OpenHands/issues/10330
'completion-menu.completion.current fuzzymatch.outside': 'fg:#ffffff bg:#888888',
'selected': COLOR_GOLD,
'risk-high': '#FF0000 bold', # Red bold for HIGH risk
diff --git a/openhands-cli/tests/test_gui_launcher.py b/openhands-cli/tests/test_gui_launcher.py
index 05d5c00c74..dfcca32bc0 100644
--- a/openhands-cli/tests/test_gui_launcher.py
+++ b/openhands-cli/tests/test_gui_launcher.py
@@ -182,7 +182,7 @@ class TestLaunchGuiServer:
# Check pull command
pull_call = mock_run.call_args_list[0]
pull_cmd = pull_call[0][0]
- assert pull_cmd[0:3] == ['docker', 'pull', 'docker.all-hands.dev/all-hands-ai/runtime:latest-nikolaik']
+ assert pull_cmd[0:3] == ['docker', 'pull', 'docker.all-hands.dev/openhands/runtime:latest-nikolaik']
# Check run command
run_call = mock_run.call_args_list[1]
diff --git a/openhands-ui/package.json b/openhands-ui/package.json
index 50b5f00807..49cc397425 100644
--- a/openhands-ui/package.json
+++ b/openhands-ui/package.json
@@ -44,12 +44,12 @@
],
"repository": {
"type": "git",
- "url": "https://github.com/All-Hands-AI/OpenHands.git",
+ "url": "https://github.com/OpenHands/OpenHands.git",
"directory": "openhands-ui"
},
"homepage": "https://www.all-hands.dev/",
"bugs": {
- "url": "https://github.com/All-Hands-AI/OpenHands/issues"
+ "url": "https://github.com/OpenHands/OpenHands/issues"
},
"devDependencies": {
"@chromatic-com/storybook": "^4.0.0",
diff --git a/openhands/agenthub/codeact_agent/README.md b/openhands/agenthub/codeact_agent/README.md
index 9686845e50..3a36f6b3c2 100644
--- a/openhands/agenthub/codeact_agent/README.md
+++ b/openhands/agenthub/codeact_agent/README.md
@@ -13,7 +13,7 @@ The CodeAct agent operates through a function calling interface. At each turn, t
- Interact with web browsers using `browser` and `fetch`
- Edit files using `str_replace_editor` or `edit_file`
-
+
## Built-in Tools
diff --git a/openhands/agenthub/codeact_agent/codeact_agent.py b/openhands/agenthub/codeact_agent/codeact_agent.py
index 83e72010c4..85e5f88cbc 100644
--- a/openhands/agenthub/codeact_agent/codeact_agent.py
+++ b/openhands/agenthub/codeact_agent/codeact_agent.py
@@ -63,7 +63,7 @@ class CodeActAgent(Agent):
- Execute any valid Linux `bash` command
- Execute any valid `Python` code with [an interactive Python interpreter](https://ipython.org/). This is simulated through `bash` command, see plugin system below for more details.
- 
+ 
"""
diff --git a/openhands/app_server/app_conversation/sql_app_conversation_info_service.py b/openhands/app_server/app_conversation/sql_app_conversation_info_service.py
index 9e67c3930e..03f3d03c7c 100644
--- a/openhands/app_server/app_conversation/sql_app_conversation_info_service.py
+++ b/openhands/app_server/app_conversation/sql_app_conversation_info_service.py
@@ -147,9 +147,9 @@ class SQLAppConversationInfoService(AppConversationInfoService):
elif sort_order == AppConversationSortOrder.CREATED_AT_DESC:
query = query.order_by(StoredConversationMetadata.created_at.desc())
elif sort_order == AppConversationSortOrder.UPDATED_AT:
- query = query.order_by(StoredConversationMetadata.updated_at)
+ query = query.order_by(StoredConversationMetadata.last_updated_at)
elif sort_order == AppConversationSortOrder.UPDATED_AT_DESC:
- query = query.order_by(StoredConversationMetadata.updated_at.desc())
+ query = query.order_by(StoredConversationMetadata.last_updated_at.desc())
elif sort_order == AppConversationSortOrder.TITLE:
query = query.order_by(StoredConversationMetadata.title)
elif sort_order == AppConversationSortOrder.TITLE_DESC:
@@ -198,9 +198,7 @@ class SQLAppConversationInfoService(AppConversationInfoService):
query = select(func.count(StoredConversationMetadata.conversation_id))
user_id = await self.user_context.get_user_id()
if user_id:
- query = query.where(
- StoredConversationMetadata.created_by_user_id == user_id
- )
+ query = query.where(StoredConversationMetadata.user_id == user_id)
query = self._apply_filters(
query=query,
diff --git a/openhands/app_server/event/filesystem_event_service.py b/openhands/app_server/event/filesystem_event_service.py
index cbcdf5e0cf..05e2ed9350 100644
--- a/openhands/app_server/event/filesystem_event_service.py
+++ b/openhands/app_server/event/filesystem_event_service.py
@@ -1,5 +1,6 @@
"""Filesystem-based EventService implementation."""
+import asyncio
import glob
import json
import logging
@@ -76,6 +77,14 @@ class FilesystemEventService(EventService):
data = event.model_dump(mode='json')
f.write(json.dumps(data, indent=2))
+ def _load_events_from_files(self, file_paths: list[Path]) -> list[Event]:
+ events = []
+ for file_path in file_paths:
+ event = self._load_event_from_file(file_path)
+ if event is not None:
+ events.append(event)
+ return events
+
def _load_event_from_file(self, filepath: Path) -> Event | None:
"""Load an event from a file."""
try:
@@ -255,12 +264,11 @@ class FilesystemEventService(EventService):
if start_index + limit < len(files):
next_page_id = files[start_index + limit].name
- # Load all events from files
- page_events = []
- for file_path in page_files:
- event = self._load_event_from_file(file_path)
- if event is not None:
- page_events.append(event)
+ # Load all events from files in a background thread.
+ loop = asyncio.get_running_loop()
+ page_events = await loop.run_in_executor(
+ None, self._load_events_from_files, page_files
+ )
return EventPage(items=page_events, next_page_id=next_page_id)
diff --git a/openhands/app_server/user/auth_user_context.py b/openhands/app_server/user/auth_user_context.py
index 783f3a38c7..53612364f5 100644
--- a/openhands/app_server/user/auth_user_context.py
+++ b/openhands/app_server/user/auth_user_context.py
@@ -71,7 +71,7 @@ class AuthUserContext(UserContext):
results = {}
# Include custom secrets...
- secrets = await self.user_auth.get_user_secrets()
+ secrets = await self.user_auth.get_secrets()
if secrets:
for name, custom_secret in secrets.custom_secrets.items():
results[name] = StaticSecret(value=custom_secret.secret)
diff --git a/openhands/cli/gui_launcher.py b/openhands/cli/gui_launcher.py
index 544f8987c7..7946bc8796 100644
--- a/openhands/cli/gui_launcher.py
+++ b/openhands/cli/gui_launcher.py
@@ -94,8 +94,8 @@ def launch_gui_server(mount_cwd: bool = False, gpu: bool = False) -> None:
# Get the current version for the Docker image
version = __version__
- runtime_image = f'docker.all-hands.dev/all-hands-ai/runtime:{version}-nikolaik'
- app_image = f'docker.all-hands.dev/all-hands-ai/openhands:{version}'
+ runtime_image = f'docker.all-hands.dev/openhands/runtime:{version}-nikolaik'
+ app_image = f'docker.all-hands.dev/openhands/openhands:{version}'
print_formatted_text(HTML('Pulling required Docker images...'))
diff --git a/openhands/cli/pt_style.py b/openhands/cli/pt_style.py
index 9df4f0a0a5..d171214e33 100644
--- a/openhands/cli/pt_style.py
+++ b/openhands/cli/pt_style.py
@@ -19,7 +19,7 @@ def get_cli_style() -> Style:
'prompt': f'{COLOR_GOLD} bold',
# Ensure good contrast for fuzzy matches on the selected completion row
# across terminals/themes (e.g., Ubuntu GNOME, Alacritty, Kitty).
- # See https://github.com/All-Hands-AI/OpenHands/issues/10330
+ # See https://github.com/OpenHands/OpenHands/issues/10330
'completion-menu.completion.current fuzzymatch.outside': 'fg:#ffffff bg:#888888',
'selected': COLOR_GOLD,
'risk-high': '#FF0000 bold', # Red bold for HIGH risk
diff --git a/openhands/cli/vscode_extension.py b/openhands/cli/vscode_extension.py
index b458d3db14..3cc92e8b96 100644
--- a/openhands/cli/vscode_extension.py
+++ b/openhands/cli/vscode_extension.py
@@ -16,7 +16,7 @@ def download_latest_vsix_from_github() -> str | None:
Returns:
Path to downloaded .vsix file, or None if failed
"""
- api_url = 'https://api.github.com/repos/All-Hands-AI/OpenHands/releases'
+ api_url = 'https://api.github.com/repos/OpenHands/OpenHands/releases'
try:
with urllib.request.urlopen(api_url, timeout=10) as response:
if response.status != 200:
diff --git a/openhands/controller/agent_controller.py b/openhands/controller/agent_controller.py
index ce0b5e0b3a..e9616c66b5 100644
--- a/openhands/controller/agent_controller.py
+++ b/openhands/controller/agent_controller.py
@@ -974,7 +974,7 @@ class AgentController:
if self.agent.config.cli_mode:
# TODO(refactor): this is not ideal to have CLI been an exception
# We should refactor agent controller to consider this in the future
- # See issue: https://github.com/All-Hands-AI/OpenHands/issues/10464
+ # See issue: https://github.com/OpenHands/OpenHands/issues/10464
action.confirmation_state = ( # type: ignore[union-attr]
ActionConfirmationStatus.AWAITING_CONFIRMATION
)
diff --git a/openhands/core/config/llm_config.py b/openhands/core/config/llm_config.py
index f8f1bce726..c5caaf3a2c 100644
--- a/openhands/core/config/llm_config.py
+++ b/openhands/core/config/llm_config.py
@@ -179,7 +179,7 @@ class LLMConfig(BaseModel):
# Set an API version by default for Azure models
# Required for newer models.
- # Azure issue: https://github.com/All-Hands-AI/OpenHands/issues/7755
+ # Azure issue: https://github.com/OpenHands/OpenHands/issues/7755
if self.model.startswith('azure') and self.api_version is None:
self.api_version = '2024-12-01-preview'
diff --git a/openhands/core/setup.py b/openhands/core/setup.py
index 855e765f59..47656a9fa6 100644
--- a/openhands/core/setup.py
+++ b/openhands/core/setup.py
@@ -28,7 +28,7 @@ from openhands.runtime import get_runtime_cls
from openhands.runtime.base import Runtime
from openhands.server.services.conversation_stats import ConversationStats
from openhands.storage import get_file_store
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.utils.async_utils import GENERAL_TIMEOUT, call_async_from_sync
@@ -109,9 +109,9 @@ def get_provider_tokens():
bitbucket_token = SecretStr(os.environ['BITBUCKET_TOKEN'])
provider_tokens[ProviderType.BITBUCKET] = ProviderToken(token=bitbucket_token)
- # Wrap provider tokens in UserSecrets if any tokens were found
+ # Wrap provider tokens in Secrets if any tokens were found
secret_store = (
- UserSecrets(provider_tokens=provider_tokens) if provider_tokens else None # type: ignore[arg-type]
+ Secrets(provider_tokens=provider_tokens) if provider_tokens else None # type: ignore[arg-type]
)
return secret_store.provider_tokens if secret_store else None
diff --git a/openhands/events/serialization/action.py b/openhands/events/serialization/action.py
index b86d4aa52a..98f2b89e61 100644
--- a/openhands/events/serialization/action.py
+++ b/openhands/events/serialization/action.py
@@ -56,7 +56,7 @@ ACTION_TYPE_TO_CLASS = {action_class.action: action_class for action_class in ac
def handle_action_deprecated_args(args: dict[str, Any]) -> dict[str, Any]:
- # keep_prompt has been deprecated in https://github.com/All-Hands-AI/OpenHands/pull/4881
+ # keep_prompt has been deprecated in https://github.com/OpenHands/OpenHands/pull/4881
if 'keep_prompt' in args:
args.pop('keep_prompt')
diff --git a/openhands/events/serialization/observation.py b/openhands/events/serialization/observation.py
index 0b3a87a19a..d55493ad91 100644
--- a/openhands/events/serialization/observation.py
+++ b/openhands/events/serialization/observation.py
@@ -82,7 +82,7 @@ def _update_cmd_output_metadata(
def handle_observation_deprecated_extras(extras: dict) -> dict:
- # These are deprecated in https://github.com/All-Hands-AI/OpenHands/pull/4881
+ # These are deprecated in https://github.com/OpenHands/OpenHands/pull/4881
if 'exit_code' in extras:
extras['metadata'] = _update_cmd_output_metadata(
extras.get('metadata', None), exit_code=extras.pop('exit_code')
@@ -92,7 +92,7 @@ def handle_observation_deprecated_extras(extras: dict) -> dict:
extras.get('metadata', None), pid=extras.pop('command_id')
)
- # formatted_output_and_error has been deprecated in https://github.com/All-Hands-AI/OpenHands/pull/6671
+ # formatted_output_and_error has been deprecated in https://github.com/OpenHands/OpenHands/pull/6671
if 'formatted_output_and_error' in extras:
extras.pop('formatted_output_and_error')
return extras
diff --git a/openhands/integrations/vscode/README.md b/openhands/integrations/vscode/README.md
index da291dd829..c17a58ff19 100644
--- a/openhands/integrations/vscode/README.md
+++ b/openhands/integrations/vscode/README.md
@@ -4,7 +4,7 @@ The official OpenHands companion extension for Visual Studio Code.
This extension seamlessly integrates OpenHands into your VSCode workflow, allowing you to start coding sessions with your AI agent directly from your editor.
-
+
## Features
@@ -32,7 +32,7 @@ You can access the extension's commands in two ways:
For the best experience, the OpenHands CLI will attempt to install the extension for you automatically the first time you run it inside VSCode.
If you need to install it manually:
-1. Download the latest `.vsix` file from the [GitHub Releases page](https://github.com/All-Hands-AI/OpenHands/releases).
+1. Download the latest `.vsix` file from the [GitHub Releases page](https://github.com/OpenHands/OpenHands/releases).
2. In VSCode, open the Command Palette (`Ctrl+Shift+P`).
3. Run the **"Extensions: Install from VSIX..."** command.
4. Select the `.vsix` file you downloaded.
diff --git a/openhands/integrations/vscode/package.json b/openhands/integrations/vscode/package.json
index 248aaf2aab..849e9c085f 100644
--- a/openhands/integrations/vscode/package.json
+++ b/openhands/integrations/vscode/package.json
@@ -7,7 +7,7 @@
"license": "MIT",
"repository": {
"type": "git",
- "url": "https://github.com/all-hands-ai/OpenHands.git"
+ "url": "https://github.com/openhands/OpenHands.git"
},
"engines": {
"vscode": "^1.98.2",
diff --git a/openhands/linter/__init__.py b/openhands/linter/__init__.py
index 23e5d0de6e..dc0c91ba4a 100644
--- a/openhands/linter/__init__.py
+++ b/openhands/linter/__init__.py
@@ -3,7 +3,7 @@
Part of this Linter module is adapted from Aider (Apache 2.0 License, [original
code](https://github.com/paul-gauthier/aider/blob/main/aider/linter.py)).
- Please see the [original repository](https://github.com/paul-gauthier/aider) for more information.
-- The detailed implementation of the linter can be found at: https://github.com/All-Hands-AI/openhands-aci.
+- The detailed implementation of the linter can be found at: https://github.com/OpenHands/openhands-aci.
"""
from openhands_aci.linter import DefaultLinter, LintResult
diff --git a/openhands/llm/llm.py b/openhands/llm/llm.py
index 9589ec94e3..8595813d2a 100644
--- a/openhands/llm/llm.py
+++ b/openhands/llm/llm.py
@@ -160,7 +160,7 @@ class LLM(RetryMixin, DebugMixin):
'temperature'
) # temperature is not supported for reasoning models
kwargs.pop('top_p') # reasoning model like o3 doesn't support top_p
- # Azure issue: https://github.com/All-Hands-AI/OpenHands/issues/6777
+ # Azure issue: https://github.com/OpenHands/OpenHands/issues/6777
if self.config.model.startswith('azure'):
kwargs['max_tokens'] = self.config.max_output_tokens
kwargs.pop('max_completion_tokens')
diff --git a/openhands/memory/condenser/impl/amortized_forgetting_condenser.py b/openhands/memory/condenser/impl/amortized_forgetting_condenser.py
index a33455c341..8c5dd3dc2c 100644
--- a/openhands/memory/condenser/impl/amortized_forgetting_condenser.py
+++ b/openhands/memory/condenser/impl/amortized_forgetting_condenser.py
@@ -55,7 +55,7 @@ class AmortizedForgettingCondenser(RollingCondenser):
return Condensation(action=event)
def should_condense(self, view: View) -> bool:
- return len(view) > self.max_size
+ return len(view) > self.max_size or view.unhandled_condensation_request
@classmethod
def from_config(
diff --git a/openhands/memory/condenser/impl/llm_attention_condenser.py b/openhands/memory/condenser/impl/llm_attention_condenser.py
index 81b7fde8dc..3b3153046e 100644
--- a/openhands/memory/condenser/impl/llm_attention_condenser.py
+++ b/openhands/memory/condenser/impl/llm_attention_condenser.py
@@ -116,7 +116,7 @@ class LLMAttentionCondenser(RollingCondenser):
return Condensation(action=event)
def should_condense(self, view: View) -> bool:
- return len(view) > self.max_size
+ return len(view) > self.max_size or view.unhandled_condensation_request
@classmethod
def from_config(
diff --git a/openhands/memory/condenser/impl/llm_summarizing_condenser.py b/openhands/memory/condenser/impl/llm_summarizing_condenser.py
index af2c369ae9..c6553ca6c0 100644
--- a/openhands/memory/condenser/impl/llm_summarizing_condenser.py
+++ b/openhands/memory/condenser/impl/llm_summarizing_condenser.py
@@ -158,7 +158,7 @@ CURRENT_STATE: Last flip: Heads, Haiku count: 15/20"""
)
def should_condense(self, view: View) -> bool:
- return len(view) > self.max_size
+ return len(view) > self.max_size or view.unhandled_condensation_request
@classmethod
def from_config(
diff --git a/openhands/memory/condenser/impl/structured_summary_condenser.py b/openhands/memory/condenser/impl/structured_summary_condenser.py
index a698e898d8..f06ae17a2c 100644
--- a/openhands/memory/condenser/impl/structured_summary_condenser.py
+++ b/openhands/memory/condenser/impl/structured_summary_condenser.py
@@ -305,7 +305,7 @@ Capture all relevant information, especially:
)
def should_condense(self, view: View) -> bool:
- return len(view) > self.max_size
+ return len(view) > self.max_size or view.unhandled_condensation_request
@classmethod
def from_config(
diff --git a/openhands/resolver/README.md b/openhands/resolver/README.md
index 55a169e0b6..0bcd5a2307 100644
--- a/openhands/resolver/README.md
+++ b/openhands/resolver/README.md
@@ -2,7 +2,7 @@
Need help resolving a GitHub, GitLab, or Bitbucket issue but don't have the time to do it yourself? Let an AI agent help you out!
-This tool allows you to use open-source AI agents based on [OpenHands](https://github.com/all-hands-ai/openhands)
+This tool allows you to use open-source AI agents based on [OpenHands](https://github.com/openhands/openhands)
to attempt to resolve GitHub, GitLab, and Bitbucket issues automatically. While it can handle multiple issues, it's primarily designed
to help you resolve one issue at a time with high quality.
@@ -62,7 +62,7 @@ Follow these steps to use this workflow in your own repository:
2. Create a draft PR if successful, or push a branch if unsuccessful
3. Comment on the issue with the results
-Need help? Feel free to [open an issue](https://github.com/all-hands-ai/openhands/issues) or email us at [contact@all-hands.dev](mailto:contact@all-hands.dev).
+Need help? Feel free to [open an issue](https://github.com/openhands/openhands/issues).
## Manual Installation
@@ -142,7 +142,7 @@ python -m openhands.resolver.resolve_issue --selected-repo [OWNER]/[REPO] --issu
For instance, if you want to resolve issue #100 in this repo, you would run:
```bash
-python -m openhands.resolver.resolve_issue --selected-repo all-hands-ai/openhands --issue-number 100
+python -m openhands.resolver.resolve_issue --selected-repo openhands/openhands --issue-number 100
```
The output will be written to the `output/` directory.
@@ -150,7 +150,7 @@ The output will be written to the `output/` directory.
If you've installed the package from source using poetry, you can use:
```bash
-poetry run python openhands/resolver/resolve_issue.py --selected-repo all-hands-ai/openhands --issue-number 100
+poetry run python openhands/resolver/resolve_issue.py --selected-repo openhands/openhands --issue-number 100
```
## Responding to PR Comments
@@ -198,7 +198,7 @@ python -m openhands.resolver.send_pull_request --issue-number ISSUE_NUMBER --use
## Providing Custom Instructions
-You can customize how the AI agent approaches issue resolution by adding a repository microagent file at `.openhands/microagents/repo.md` in your repository. This file's contents will be automatically loaded in the prompt when working with your repository. For more information about repository microagents, see [Repository Instructions](https://github.com/All-Hands-AI/OpenHands/tree/main/microagents#2-repository-instructions-private).
+You can customize how the AI agent approaches issue resolution by adding a repository microagent file at `.openhands/microagents/repo.md` in your repository. This file's contents will be automatically loaded in the prompt when working with your repository. For more information about repository microagents, see [Repository Instructions](https://github.com/OpenHands/OpenHands/tree/main/microagents#2-repository-instructions-private).
## Troubleshooting
diff --git a/openhands/resolver/examples/openhands-resolver.yml b/openhands/resolver/examples/openhands-resolver.yml
index 4268545e96..66508c9990 100644
--- a/openhands/resolver/examples/openhands-resolver.yml
+++ b/openhands/resolver/examples/openhands-resolver.yml
@@ -19,7 +19,7 @@ permissions:
jobs:
call-openhands-resolver:
- uses: All-Hands-AI/OpenHands/.github/workflows/openhands-resolver.yml@main
+ uses: OpenHands/OpenHands/.github/workflows/openhands-resolver.yml@main
with:
macro: ${{ vars.OPENHANDS_MACRO || '@openhands-agent' }}
max_iterations: ${{ fromJson(vars.OPENHANDS_MAX_ITER || 50) }}
diff --git a/openhands/resolver/send_pull_request.py b/openhands/resolver/send_pull_request.py
index f77ba7f540..8857602ec1 100644
--- a/openhands/resolver/send_pull_request.py
+++ b/openhands/resolver/send_pull_request.py
@@ -349,7 +349,7 @@ def send_pull_request(
pr_body = f'This pull request fixes #{issue.number}.'
if additional_message:
pr_body += f'\n\n{additional_message}'
- pr_body += '\n\nAutomatic fix generated by [OpenHands](https://github.com/All-Hands-AI/OpenHands/) 🙌'
+ pr_body += '\n\nAutomatic fix generated by [OpenHands](https://github.com/OpenHands/OpenHands/) 🙌'
# For cross repo pull request, we need to send head parameter like fork_owner:branch as per git documentation here : https://docs.github.com/en/rest/pulls/pulls?apiVersion=2022-11-28#create-a-pull-request
# head parameter usage : The name of the branch where your changes are implemented. For cross-repository pull requests in the same network, namespace head with a user like this: username:branch.
diff --git a/openhands/runtime/README.md b/openhands/runtime/README.md
index 69501f31ad..76661c4f07 100644
--- a/openhands/runtime/README.md
+++ b/openhands/runtime/README.md
@@ -150,7 +150,7 @@ Key features:
- Support for cloud-based deployments
- Potential for improved security through isolation
-At the time of this writing, this is mostly used in parallel evaluation, such as this example for [SWE-Bench](https://github.com/All-Hands-AI/OpenHands/tree/main/evaluation/benchmarks/swe_bench#run-inference-on-remoteruntime-experimental).
+At the time of this writing, this is mostly used in parallel evaluation, such as this example for [SWE-Bench](https://github.com/OpenHands/OpenHands/tree/main/evaluation/benchmarks/swe_bench#run-inference-on-remoteruntime-experimental).
## Related Components
diff --git a/openhands/runtime/impl/docker/docker_runtime.py b/openhands/runtime/impl/docker/docker_runtime.py
index 0dfc1e8946..b5eb4c5735 100644
--- a/openhands/runtime/impl/docker/docker_runtime.py
+++ b/openhands/runtime/impl/docker/docker_runtime.py
@@ -466,6 +466,7 @@ class DockerRuntime(ActionExecutionClient):
'VSCODE_PORT': str(self._vscode_port),
'APP_PORT_1': str(self._app_ports[0]),
'APP_PORT_2': str(self._app_ports[1]),
+ 'OPENHANDS_SESSION_ID': str(self.sid),
'PIP_BREAK_SYSTEM_PACKAGES': '1',
}
)
diff --git a/openhands/runtime/impl/kubernetes/README.md b/openhands/runtime/impl/kubernetes/README.md
index a9469f313f..67788fcaaf 100644
--- a/openhands/runtime/impl/kubernetes/README.md
+++ b/openhands/runtime/impl/kubernetes/README.md
@@ -40,7 +40,7 @@ Two configuration options are required to use the Kubernetes runtime:
2. **Runtime Container Image**: Specify the container image to use for the runtime environment
```toml
[sandbox]
- runtime_container_image = "docker.all-hands.dev/all-hands-ai/runtime:0.59-nikolaik"
+ runtime_container_image = "docker.all-hands.dev/openhands/runtime:0.59-nikolaik"
```
#### Additional Kubernetes Options
diff --git a/openhands/runtime/impl/local/local_runtime.py b/openhands/runtime/impl/local/local_runtime.py
index ee9fc7a706..01df02dfe6 100644
--- a/openhands/runtime/impl/local/local_runtime.py
+++ b/openhands/runtime/impl/local/local_runtime.py
@@ -79,7 +79,7 @@ def get_user_info() -> tuple[int, str | None]:
def check_dependencies(code_repo_path: str, check_browser: bool) -> None:
- ERROR_MESSAGE = 'Please follow the instructions in https://github.com/All-Hands-AI/OpenHands/blob/main/Development.md to install OpenHands.'
+ ERROR_MESSAGE = 'Please follow the instructions in https://github.com/OpenHands/OpenHands/blob/main/Development.md to install OpenHands.'
if not os.path.exists(code_repo_path):
raise ValueError(
f'Code repo path {code_repo_path} does not exist. ' + ERROR_MESSAGE
@@ -158,7 +158,7 @@ class LocalRuntime(ActionExecutionClient):
logger.warning(
'Initializing LocalRuntime. WARNING: NO SANDBOX IS USED. '
- 'This is an experimental feature, please report issues to https://github.com/All-Hands-AI/OpenHands/issues. '
+ 'This is an experimental feature, please report issues to https://github.com/OpenHands/OpenHands/issues. '
'`run_as_openhands` will be ignored since the current user will be used to launch the server. '
'We highly recommend using a sandbox (eg. DockerRuntime) unless you '
'are running in a controlled environment.\n'
diff --git a/openhands/runtime/plugins/agent_skills/README.md b/openhands/runtime/plugins/agent_skills/README.md
index 2ce9e869ad..652a5c6df2 100644
--- a/openhands/runtime/plugins/agent_skills/README.md
+++ b/openhands/runtime/plugins/agent_skills/README.md
@@ -5,7 +5,7 @@ This folder implements a skill/tool set `agentskills` for OpenHands.
It is intended to be used by the agent **inside sandbox**.
The skill set will be exposed as a `pip` package that can be installed as a plugin inside the sandbox.
-The skill set can contain a bunch of wrapped tools for agent ([many examples here](https://github.com/All-Hands-AI/OpenHands/pull/1914)), for example:
+The skill set can contain a bunch of wrapped tools for agent ([many examples here](https://github.com/OpenHands/OpenHands/pull/1914)), for example:
- Audio/Video to text (these are a temporary solution, and we should switch to multimodal models when they are sufficiently cheap
- PDF to text
- etc.
diff --git a/openhands/runtime/plugins/agent_skills/file_editor/__init__.py b/openhands/runtime/plugins/agent_skills/file_editor/__init__.py
index 06d5bcca63..971335eeea 100644
--- a/openhands/runtime/plugins/agent_skills/file_editor/__init__.py
+++ b/openhands/runtime/plugins/agent_skills/file_editor/__init__.py
@@ -1,6 +1,6 @@
"""This file imports a global singleton of the `EditTool` class as well as raw functions that expose
its __call__.
-The implementation of the `EditTool` class can be found at: https://github.com/All-Hands-AI/openhands-aci/.
+The implementation of the `EditTool` class can be found at: https://github.com/OpenHands/openhands-aci/.
"""
from openhands_aci.editor import file_editor
diff --git a/openhands/server/routes/manage_conversations.py b/openhands/server/routes/manage_conversations.py
index 2bf05e3c55..b6261a6fc6 100644
--- a/openhands/server/routes/manage_conversations.py
+++ b/openhands/server/routes/manage_conversations.py
@@ -71,8 +71,8 @@ from openhands.server.types import LLMAuthenticationError, MissingSettingsError
from openhands.server.user_auth import (
get_auth_type,
get_provider_tokens,
+ get_secrets,
get_user_id,
- get_user_secrets,
get_user_settings,
get_user_settings_store,
)
@@ -85,8 +85,8 @@ from openhands.storage.data_models.conversation_metadata import (
ConversationTrigger,
)
from openhands.storage.data_models.conversation_status import ConversationStatus
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.data_models.settings import Settings
-from openhands.storage.data_models.user_secrets import UserSecrets
from openhands.storage.locations import get_experiment_config_filename
from openhands.storage.settings.settings_store import SettingsStore
from openhands.utils.async_utils import wait_all
@@ -210,7 +210,7 @@ async def new_conversation(
data: InitSessionRequest,
user_id: str = Depends(get_user_id),
provider_tokens: PROVIDER_TOKEN_TYPE = Depends(get_provider_tokens),
- user_secrets: UserSecrets = Depends(get_user_secrets),
+ user_secrets: Secrets = Depends(get_secrets),
auth_type: AuthType | None = Depends(get_auth_type),
) -> ConversationResponse:
"""Initialize a new session or join an existing one.
diff --git a/openhands/server/routes/secrets.py b/openhands/server/routes/secrets.py
index cf808e17d4..175d8863db 100644
--- a/openhands/server/routes/secrets.py
+++ b/openhands/server/routes/secrets.py
@@ -14,11 +14,11 @@ from openhands.server.settings import (
)
from openhands.server.user_auth import (
get_provider_tokens,
+ get_secrets,
get_secrets_store,
- get_user_secrets,
)
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.data_models.settings import Settings
-from openhands.storage.data_models.user_secrets import UserSecrets
from openhands.storage.secrets.secrets_store import SecretsStore
from openhands.storage.settings.settings_store import SettingsStore
@@ -32,20 +32,18 @@ app = APIRouter(prefix='/api', dependencies=get_dependencies())
async def invalidate_legacy_secrets_store(
settings: Settings, settings_store: SettingsStore, secrets_store: SecretsStore
-) -> UserSecrets | None:
+) -> Secrets | None:
"""We are moving `secrets_store` (a field from `Settings` object) to its own dedicated store
- This function moves the values from Settings to UserSecrets, and deletes the values in Settings
+ This function moves the values from Settings to Secrets, and deletes the values in Settings
While this function in called multiple times, the migration only ever happens once
"""
if len(settings.secrets_store.provider_tokens.items()) > 0:
- user_secrets = UserSecrets(
- provider_tokens=settings.secrets_store.provider_tokens
- )
+ user_secrets = Secrets(provider_tokens=settings.secrets_store.provider_tokens)
await secrets_store.store(user_secrets)
# Invalidate old tokens via settings store serializer
invalidated_secrets_settings = settings.model_copy(
- update={'secrets_store': UserSecrets()}
+ update={'secrets_store': Secrets()}
)
await settings_store.store(invalidated_secrets_settings)
@@ -120,7 +118,7 @@ async def store_provider_tokens(
try:
user_secrets = await secrets_store.load()
if not user_secrets:
- user_secrets = UserSecrets()
+ user_secrets = Secrets()
if provider_info.provider_tokens:
existing_providers = [provider for provider in user_secrets.provider_tokens]
@@ -183,7 +181,7 @@ async def unset_provider_tokens(
@app.get('/secrets', response_model=GETCustomSecrets)
async def load_custom_secrets_names(
- user_secrets: UserSecrets | None = Depends(get_user_secrets),
+ user_secrets: Secrets | None = Depends(get_secrets),
) -> GETCustomSecrets | JSONResponse:
try:
if not user_secrets:
@@ -235,8 +233,8 @@ async def create_custom_secret(
description=secret_description or '',
)
- # Create a new UserSecrets that preserves provider tokens
- updated_user_secrets = UserSecrets(
+ # Create a new Secrets that preserves provider tokens
+ updated_user_secrets = Secrets(
custom_secrets=custom_secrets, # type: ignore[arg-type]
provider_tokens=existing_secrets.provider_tokens
if existing_secrets
@@ -290,7 +288,7 @@ async def update_custom_secret(
description=secret_description or '',
)
- updated_secrets = UserSecrets(
+ updated_secrets = Secrets(
custom_secrets=custom_secrets, # type: ignore[arg-type]
provider_tokens=existing_secrets.provider_tokens,
)
@@ -330,8 +328,8 @@ async def delete_custom_secret(
# Remove the secret
custom_secrets.pop(secret_id)
- # Create a new UserSecrets that preserves provider tokens and remaining secrets
- updated_secrets = UserSecrets(
+ # Create a new Secrets that preserves provider tokens and remaining secrets
+ updated_secrets = Secrets(
custom_secrets=custom_secrets, # type: ignore[arg-type]
provider_tokens=existing_secrets.provider_tokens,
)
diff --git a/openhands/server/services/conversation_service.py b/openhands/server/services/conversation_service.py
index 2b0f61ee55..927e55ce58 100644
--- a/openhands/server/services/conversation_service.py
+++ b/openhands/server/services/conversation_service.py
@@ -27,7 +27,7 @@ from openhands.storage.data_models.conversation_metadata import (
ConversationMetadata,
ConversationTrigger,
)
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.utils.conversation_summary import get_default_conversation_title
@@ -232,7 +232,7 @@ async def setup_init_conversation_settings(
settings = await settings_store.load()
secrets_store = await SecretsStoreImpl.get_instance(config, user_id)
- user_secrets: UserSecrets | None = await secrets_store.load()
+ user_secrets: Secrets | None = await secrets_store.load()
if not settings:
from socketio.exceptions import ConnectionRefusedError
diff --git a/openhands/server/session/agent_session.py b/openhands/server/session/agent_session.py
index 984a5985b5..41c80ffbd1 100644
--- a/openhands/server/session/agent_session.py
+++ b/openhands/server/session/agent_session.py
@@ -30,7 +30,7 @@ from openhands.runtime.base import Runtime
from openhands.runtime.impl.remote.remote_runtime import RemoteRuntime
from openhands.runtime.runtime_status import RuntimeStatus
from openhands.server.services.conversation_stats import ConversationStats
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.files import FileStore
from openhands.utils.async_utils import EXECUTOR, call_sync_from_async
from openhands.utils.shutdown_listener import should_continue
@@ -128,7 +128,7 @@ class AgentSession:
finished = False # For monitoring
runtime_connected = False
restored_state = False
- custom_secrets_handler = UserSecrets(
+ custom_secrets_handler = Secrets(
custom_secrets=custom_secrets if custom_secrets else {} # type: ignore[arg-type]
)
try:
@@ -316,7 +316,7 @@ class AgentSession:
if self.runtime is not None:
raise RuntimeError('Runtime already created')
- custom_secrets_handler = UserSecrets(custom_secrets=custom_secrets or {}) # type: ignore[arg-type]
+ custom_secrets_handler = Secrets(custom_secrets=custom_secrets or {}) # type: ignore[arg-type]
env_vars = custom_secrets_handler.get_env_vars()
self.logger.debug(f'Initializing runtime `{runtime_name}` now...')
diff --git a/openhands/server/user_auth/__init__.py b/openhands/server/user_auth/__init__.py
index b87b864580..acd4ca0b49 100644
--- a/openhands/server/user_auth/__init__.py
+++ b/openhands/server/user_auth/__init__.py
@@ -4,7 +4,7 @@ from pydantic import SecretStr
from openhands.integrations.provider import PROVIDER_TOKEN_TYPE
from openhands.server.settings import Settings
from openhands.server.user_auth.user_auth import AuthType, get_user_auth
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.secrets.secrets_store import SecretsStore
from openhands.storage.settings.settings_store import SettingsStore
@@ -39,9 +39,9 @@ async def get_secrets_store(request: Request) -> SecretsStore:
return secrets_store
-async def get_user_secrets(request: Request) -> UserSecrets | None:
+async def get_secrets(request: Request) -> Secrets | None:
user_auth = await get_user_auth(request)
- user_secrets = await user_auth.get_user_secrets()
+ user_secrets = await user_auth.get_secrets()
return user_secrets
diff --git a/openhands/server/user_auth/default_user_auth.py b/openhands/server/user_auth/default_user_auth.py
index e673d7ef48..2e0a7b5af9 100644
--- a/openhands/server/user_auth/default_user_auth.py
+++ b/openhands/server/user_auth/default_user_auth.py
@@ -7,7 +7,7 @@ from openhands.integrations.provider import PROVIDER_TOKEN_TYPE
from openhands.server import shared
from openhands.server.settings import Settings
from openhands.server.user_auth.user_auth import UserAuth
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.secrets.secrets_store import SecretsStore
from openhands.storage.settings.settings_store import SettingsStore
@@ -19,7 +19,7 @@ class DefaultUserAuth(UserAuth):
_settings: Settings | None = None
_settings_store: SettingsStore | None = None
_secrets_store: SecretsStore | None = None
- _user_secrets: UserSecrets | None = None
+ _secrets: Secrets | None = None
async def get_user_id(self) -> str | None:
"""The default implementation does not support multi tenancy, so user_id is always None"""
@@ -73,17 +73,17 @@ class DefaultUserAuth(UserAuth):
self._secrets_store = secret_store
return secret_store
- async def get_user_secrets(self) -> UserSecrets | None:
- user_secrets = self._user_secrets
+ async def get_secrets(self) -> Secrets | None:
+ user_secrets = self._secrets
if user_secrets:
return user_secrets
secrets_store = await self.get_secrets_store()
user_secrets = await secrets_store.load()
- self._user_secrets = user_secrets
+ self._secrets = user_secrets
return user_secrets
async def get_provider_tokens(self) -> PROVIDER_TOKEN_TYPE | None:
- user_secrets = await self.get_user_secrets()
+ user_secrets = await self.get_secrets()
if user_secrets is None:
return None
return user_secrets.provider_tokens
diff --git a/openhands/server/user_auth/user_auth.py b/openhands/server/user_auth/user_auth.py
index 6bd0bd2b81..e370d32474 100644
--- a/openhands/server/user_auth/user_auth.py
+++ b/openhands/server/user_auth/user_auth.py
@@ -9,7 +9,7 @@ from pydantic import SecretStr
from openhands.integrations.provider import PROVIDER_TOKEN_TYPE
from openhands.server.settings import Settings
from openhands.server.shared import server_config
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.secrets.secrets_store import SecretsStore
from openhands.storage.settings.settings_store import SettingsStore
from openhands.utils.import_utils import get_impl
@@ -69,7 +69,7 @@ class UserAuth(ABC):
"""Get secrets store"""
@abstractmethod
- async def get_user_secrets(self) -> UserSecrets | None:
+ async def get_secrets(self) -> Secrets | None:
"""Get the user's secrets"""
def get_auth_type(self) -> AuthType | None:
diff --git a/openhands/storage/data_models/user_secrets.py b/openhands/storage/data_models/secrets.py
similarity index 98%
rename from openhands/storage/data_models/user_secrets.py
rename to openhands/storage/data_models/secrets.py
index 36af6f336f..ce5302e754 100644
--- a/openhands/storage/data_models/user_secrets.py
+++ b/openhands/storage/data_models/secrets.py
@@ -23,7 +23,7 @@ from openhands.integrations.provider import (
from openhands.integrations.service_types import ProviderType
-class UserSecrets(BaseModel):
+class Secrets(BaseModel):
provider_tokens: PROVIDER_TOKEN_TYPE_WITH_JSON_SCHEMA = Field(
default_factory=lambda: MappingProxyType({})
)
@@ -96,7 +96,7 @@ class UserSecrets(BaseModel):
) -> dict[str, MappingProxyType | None]:
"""Custom deserializer to convert dictionary into MappingProxyType"""
if not isinstance(data, dict):
- raise ValueError('UserSecrets must be initialized with a dictionary')
+ raise ValueError('Secrets must be initialized with a dictionary')
new_data: dict[str, MappingProxyType | None] = {}
diff --git a/openhands/storage/data_models/settings.py b/openhands/storage/data_models/settings.py
index fe37b241c9..72785c1822 100644
--- a/openhands/storage/data_models/settings.py
+++ b/openhands/storage/data_models/settings.py
@@ -14,7 +14,7 @@ from pydantic import (
from openhands.core.config.llm_config import LLMConfig
from openhands.core.config.mcp_config import MCPConfig
from openhands.core.config.utils import load_openhands_config
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
class Settings(BaseModel):
@@ -30,7 +30,7 @@ class Settings(BaseModel):
llm_base_url: str | None = None
remote_runtime_resource_factor: int | None = None
# Planned to be removed from settings
- secrets_store: UserSecrets = Field(default_factory=UserSecrets, frozen=True)
+ secrets_store: Secrets = Field(default_factory=Secrets, frozen=True)
enable_default_condenser: bool = True
enable_sound_notifications: bool = False
enable_proactive_conversation_starters: bool = True
@@ -76,7 +76,7 @@ class Settings(BaseModel):
@model_validator(mode='before')
@classmethod
def convert_provider_tokens(cls, data: dict | object) -> dict | object:
- """Convert provider tokens from JSON format to UserSecrets format."""
+ """Convert provider tokens from JSON format to Secrets format."""
if not isinstance(data, dict):
return data
@@ -87,10 +87,10 @@ class Settings(BaseModel):
custom_secrets = secrets_store.get('custom_secrets')
tokens = secrets_store.get('provider_tokens')
- secret_store = UserSecrets(provider_tokens={}, custom_secrets={}) # type: ignore[arg-type]
+ secret_store = Secrets(provider_tokens={}, custom_secrets={}) # type: ignore[arg-type]
if isinstance(tokens, dict):
- converted_store = UserSecrets(provider_tokens=tokens) # type: ignore[arg-type]
+ converted_store = Secrets(provider_tokens=tokens) # type: ignore[arg-type]
secret_store = secret_store.model_copy(
update={'provider_tokens': converted_store.provider_tokens}
)
@@ -98,7 +98,7 @@ class Settings(BaseModel):
secret_store.model_copy(update={'provider_tokens': tokens})
if isinstance(custom_secrets, dict):
- converted_store = UserSecrets(custom_secrets=custom_secrets) # type: ignore[arg-type]
+ converted_store = Secrets(custom_secrets=custom_secrets) # type: ignore[arg-type]
secret_store = secret_store.model_copy(
update={'custom_secrets': converted_store.custom_secrets}
)
@@ -119,7 +119,7 @@ class Settings(BaseModel):
return v
@field_serializer('secrets_store')
- def secrets_store_serializer(self, secrets: UserSecrets, info: SerializationInfo):
+ def secrets_store_serializer(self, secrets: Secrets, info: SerializationInfo):
"""Custom serializer for secrets store."""
"""Force invalidate secret store"""
return {'provider_tokens': {}}
diff --git a/openhands/storage/secrets/file_secrets_store.py b/openhands/storage/secrets/file_secrets_store.py
index 1b87853cb4..9e9d744424 100644
--- a/openhands/storage/secrets/file_secrets_store.py
+++ b/openhands/storage/secrets/file_secrets_store.py
@@ -5,7 +5,7 @@ from dataclasses import dataclass
from openhands.core.config.openhands_config import OpenHandsConfig
from openhands.storage import get_file_store
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.files import FileStore
from openhands.storage.secrets.secrets_store import SecretsStore
from openhands.utils.async_utils import call_sync_from_async
@@ -16,7 +16,7 @@ class FileSecretsStore(SecretsStore):
file_store: FileStore
path: str = 'secrets.json'
- async def load(self) -> UserSecrets | None:
+ async def load(self) -> Secrets | None:
try:
json_str = await call_sync_from_async(self.file_store.read, self.path)
kwargs = json.loads(json_str)
@@ -26,12 +26,12 @@ class FileSecretsStore(SecretsStore):
if v.get('token')
}
kwargs['provider_tokens'] = provider_tokens
- secrets = UserSecrets(**kwargs)
+ secrets = Secrets(**kwargs)
return secrets
except FileNotFoundError:
return None
- async def store(self, secrets: UserSecrets) -> None:
+ async def store(self, secrets: Secrets) -> None:
json_str = secrets.model_dump_json(context={'expose_secrets': True})
await call_sync_from_async(self.file_store.write, self.path, json_str)
diff --git a/openhands/storage/secrets/secrets_store.py b/openhands/storage/secrets/secrets_store.py
index 2683bbe69c..068810a632 100644
--- a/openhands/storage/secrets/secrets_store.py
+++ b/openhands/storage/secrets/secrets_store.py
@@ -3,7 +3,7 @@ from __future__ import annotations
from abc import ABC, abstractmethod
from openhands.core.config.openhands_config import OpenHandsConfig
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
class SecretsStore(ABC):
@@ -21,11 +21,11 @@ class SecretsStore(ABC):
"""
@abstractmethod
- async def load(self) -> UserSecrets | None:
+ async def load(self) -> Secrets | None:
"""Load secrets."""
@abstractmethod
- async def store(self, secrets: UserSecrets) -> None:
+ async def store(self, secrets: Secrets) -> None:
"""Store secrets."""
@classmethod
diff --git a/poetry.lock b/poetry.lock
index 49d7046fd2..7d859218fb 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -7296,7 +7296,7 @@ wsproto = ">=1.2.0"
[package.source]
type = "git"
-url = "https://github.com/All-Hands-AI/agent-sdk.git"
+url = "https://github.com/OpenHands/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
subdirectory = "openhands-agent-server"
@@ -7326,7 +7326,7 @@ boto3 = ["boto3 (>=1.35.0)"]
[package.source]
type = "git"
-url = "https://github.com/All-Hands-AI/agent-sdk.git"
+url = "https://github.com/OpenHands/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
subdirectory = "openhands-sdk"
@@ -7353,7 +7353,7 @@ pydantic = ">=2.11.7"
[package.source]
type = "git"
-url = "https://github.com/All-Hands-AI/agent-sdk.git"
+url = "https://github.com/OpenHands/agent-sdk.git"
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
subdirectory = "openhands-tools"
@@ -16524,4 +16524,4 @@ third-party-runtimes = ["daytona", "e2b-code-interpreter", "modal", "runloop-api
[metadata]
lock-version = "2.1"
python-versions = "^3.12,<3.14"
-content-hash = "fd68ed845befeb646ee910db46f1ef9c5a1fd2e6d1ac6189c04864e0665f66ed"
+content-hash = "60190cc9aa659cec08eea106b69c8c4f56de64d003f1b9da60c47fd07cb8aa06"
diff --git a/pyproject.toml b/pyproject.toml
index b5f6a40230..9938870e4a 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -11,7 +11,7 @@ description = "OpenHands: Code Less, Make More"
authors = [ "OpenHands" ]
license = "MIT"
readme = "README.md"
-repository = "https://github.com/All-Hands-AI/OpenHands"
+repository = "https://github.com/OpenHands/OpenHands"
packages = [
{ include = "openhands/**/*" },
{ include = "third_party/**/*" },
@@ -113,9 +113,9 @@ e2b-code-interpreter = { version = "^2.0.0", optional = true }
pybase62 = "^1.0.0"
# V1 dependencies
-openhands-agent-server = { git = "https://github.com/All-Hands-AI/agent-sdk.git", subdirectory = "openhands-agent-server", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e" }
-openhands-sdk = { git = "https://github.com/All-Hands-AI/agent-sdk.git", subdirectory = "openhands-sdk", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e" }
-openhands-tools = { git = "https://github.com/All-Hands-AI/agent-sdk.git", subdirectory = "openhands-tools", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e" }
+openhands-agent-server = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-agent-server", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e" }
+openhands-sdk = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-sdk", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e" }
+openhands-tools = { git = "https://github.com/OpenHands/agent-sdk.git", subdirectory = "openhands-tools", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e" }
python-jose = { version = ">=3.3", extras = [ "cryptography" ] }
sqlalchemy = { extras = [ "asyncio" ], version = "^2.0.40" }
pg8000 = "^1.31.5"
diff --git a/tests/runtime/test_bash.py b/tests/runtime/test_bash.py
index dcf1b6aadf..b6ecc7f2bb 100644
--- a/tests/runtime/test_bash.py
+++ b/tests/runtime/test_bash.py
@@ -271,7 +271,7 @@ def test_no_ps2_in_output(temp_dir, runtime_cls, run_as_openhands):
is_windows(), reason='Test uses Linux-specific bash loops and sed commands'
)
def test_multiline_command_loop(temp_dir, runtime_cls):
- # https://github.com/All-Hands-AI/OpenHands/issues/3143
+ # https://github.com/OpenHands/OpenHands/issues/3143
init_cmd = """mkdir -p _modules && \
for month in {01..04}; do
for day in {01..05}; do
@@ -1453,7 +1453,7 @@ def test_bash_remove_prefix(temp_dir, runtime_cls, run_as_openhands):
try:
# create a git repo - same for both platforms
action = CmdRunAction(
- 'git init && git remote add origin https://github.com/All-Hands-AI/OpenHands'
+ 'git init && git remote add origin https://github.com/OpenHands/OpenHands'
)
obs = runtime.run_action(action)
# logger.info(obs, extra={'msg_type': 'OBSERVATION'})
@@ -1463,7 +1463,7 @@ def test_bash_remove_prefix(temp_dir, runtime_cls, run_as_openhands):
obs = runtime.run_action(CmdRunAction('git remote -v'))
# logger.info(obs, extra={'msg_type': 'OBSERVATION'})
assert obs.metadata.exit_code == 0
- assert 'https://github.com/All-Hands-AI/OpenHands' in obs.content
+ assert 'https://github.com/OpenHands/OpenHands' in obs.content
assert 'git remote -v' not in obs.content
finally:
_close_test_runtime(runtime)
diff --git a/tests/runtime/test_microagent.py b/tests/runtime/test_microagent.py
index 0ffd98bdbe..c43b55eecd 100644
--- a/tests/runtime/test_microagent.py
+++ b/tests/runtime/test_microagent.py
@@ -114,7 +114,7 @@ def test_load_microagents_with_selected_repo(temp_dir, runtime_cls, run_as_openh
try:
# Load microagents with selected repository
loaded_agents = runtime.get_microagents_from_selected_repo(
- 'All-Hands-AI/OpenHands'
+ 'OpenHands/OpenHands'
)
# Verify all agents are loaded
diff --git a/tests/runtime/test_setup.py b/tests/runtime/test_setup.py
index 8ee1096b77..0a1ea675b9 100644
--- a/tests/runtime/test_setup.py
+++ b/tests/runtime/test_setup.py
@@ -17,7 +17,7 @@ def test_initialize_repository_for_runtime(temp_dir, runtime_cls, run_as_openhan
runtime, config = _load_runtime(temp_dir, runtime_cls, run_as_openhands)
mock_repo = Repository(
id='1232',
- full_name='All-Hands-AI/OpenHands',
+ full_name='OpenHands/OpenHands',
git_provider=ProviderType.GITHUB,
is_public=True,
)
@@ -27,7 +27,7 @@ def test_initialize_repository_for_runtime(temp_dir, runtime_cls, run_as_openhan
return_value=mock_repo,
):
repository_dir = initialize_repository_for_runtime(
- runtime, selected_repository='All-Hands-AI/OpenHands'
+ runtime, selected_repository='OpenHands/OpenHands'
)
assert repository_dir is not None
diff --git a/tests/unit/app_server/test_sql_app_conversation_info_service.py b/tests/unit/app_server/test_sql_app_conversation_info_service.py
index a38193fe28..f2c873feed 100644
--- a/tests/unit/app_server/test_sql_app_conversation_info_service.py
+++ b/tests/unit/app_server/test_sql_app_conversation_info_service.py
@@ -75,7 +75,8 @@ def service(async_session) -> SQLAppConversationInfoService:
def service_with_user(async_session) -> SQLAppConversationInfoService:
"""Create a SQLAppConversationInfoService instance with a user_id for testing."""
return SQLAppConversationInfoService(
- db_session=async_session, user_id='test_user_123'
+ db_session=async_session,
+ user_context=SpecifyUserContext(user_id='test_user_123'),
)
@@ -446,6 +447,23 @@ class TestSQLAppConversationInfoService:
count = await service.count_app_conversation_info()
assert count == len(multiple_conversation_infos)
+ @pytest.mark.asyncio
+ async def test_count_conversation_info_with_user_id(
+ self,
+ service_with_user: SQLAppConversationInfoService,
+ multiple_conversation_infos: list[AppConversationInfo],
+ ):
+ """Test count without any filters."""
+ # Save all conversation infos
+ for info in multiple_conversation_infos:
+ await service_with_user.save_app_conversation_info(info)
+
+ # Count without filters
+ count = await service_with_user.count_app_conversation_info(
+ updated_at__gte=datetime(1900, 1, 1, tzinfo=timezone.utc)
+ )
+ assert count == len(multiple_conversation_infos)
+
@pytest.mark.asyncio
async def test_count_conversation_info_with_filters(
self,
diff --git a/tests/unit/integrations/bitbucket/test_bitbucket.py b/tests/unit/integrations/bitbucket/test_bitbucket.py
index 513ce3f2b7..5d29ee3032 100644
--- a/tests/unit/integrations/bitbucket/test_bitbucket.py
+++ b/tests/unit/integrations/bitbucket/test_bitbucket.py
@@ -219,7 +219,7 @@ def test_send_pull_request_bitbucket(
mock_service_context.assert_called_once()
# Verify create_pull_request was called with the correct data
- expected_body = 'This pull request fixes #123.\n\nAutomatic fix generated by [OpenHands](https://github.com/All-Hands-AI/OpenHands/) 🙌'
+ expected_body = 'This pull request fixes #123.\n\nAutomatic fix generated by [OpenHands](https://github.com/OpenHands/OpenHands/) 🙌'
mock_service.create_pull_request.assert_called_once_with(
{
'title': 'Test PR',
@@ -733,7 +733,7 @@ def test_initialize_repository_for_runtime_with_bitbucket_token(
# Set up environment with BITBUCKET_TOKEN
with patch.dict(os.environ, {'BITBUCKET_TOKEN': 'username:app_password'}):
result = initialize_repository_for_runtime(
- runtime=mock_runtime, selected_repository='all-hands-ai/test-repo'
+ runtime=mock_runtime, selected_repository='openhands/test-repo'
)
# Verify the result
@@ -756,7 +756,7 @@ def test_initialize_repository_for_runtime_with_bitbucket_token(
)
# Check that the repository was passed correctly
- assert args[3] == 'all-hands-ai/test-repo' # selected_repository
+ assert args[3] == 'openhands/test-repo' # selected_repository
assert args[4] is None # selected_branch
@@ -789,7 +789,7 @@ def test_initialize_repository_for_runtime_with_multiple_tokens(
},
):
result = initialize_repository_for_runtime(
- runtime=mock_runtime, selected_repository='all-hands-ai/test-repo'
+ runtime=mock_runtime, selected_repository='openhands/test-repo'
)
# Verify the result
@@ -853,7 +853,7 @@ def test_initialize_repository_for_runtime_without_bitbucket_token(
del os.environ['BITBUCKET_TOKEN']
result = initialize_repository_for_runtime(
- runtime=mock_runtime, selected_repository='all-hands-ai/test-repo'
+ runtime=mock_runtime, selected_repository='openhands/test-repo'
)
# Verify the result
diff --git a/tests/unit/integrations/test_provider_immutability.py b/tests/unit/integrations/test_provider_immutability.py
index b820a588f3..48faf56256 100644
--- a/tests/unit/integrations/test_provider_immutability.py
+++ b/tests/unit/integrations/test_provider_immutability.py
@@ -9,8 +9,8 @@ from openhands.integrations.provider import (
ProviderToken,
ProviderType,
)
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.data_models.settings import Settings
-from openhands.storage.data_models.user_secrets import UserSecrets
def test_provider_token_immutability():
@@ -34,8 +34,8 @@ def test_provider_token_immutability():
def test_secret_store_immutability():
- """Test that UserSecrets is immutable"""
- store = UserSecrets(
+ """Test that Secrets is immutable"""
+ store = Secrets(
provider_tokens={ProviderType.GITHUB: ProviderToken(token=SecretStr('test'))}
)
@@ -69,7 +69,7 @@ def test_secret_store_immutability():
def test_settings_immutability():
"""Test that Settings secrets_store is immutable"""
settings = Settings(
- secrets_store=UserSecrets(
+ secrets_store=Secrets(
provider_tokens={
ProviderType.GITHUB: ProviderToken(token=SecretStr('test'))
}
@@ -78,7 +78,7 @@ def test_settings_immutability():
# Test direct modification of secrets_store
with pytest.raises(ValidationError):
- settings.secrets_store = UserSecrets()
+ settings.secrets_store = Secrets()
# Test nested modification attempts
with pytest.raises((TypeError, AttributeError)):
@@ -87,7 +87,7 @@ def test_settings_immutability():
)
# Test model_copy creates new instance
- new_store = UserSecrets(
+ new_store = Secrets(
provider_tokens={
ProviderType.GITHUB: ProviderToken(token=SecretStr('new_token'))
}
@@ -140,10 +140,10 @@ def test_provider_handler_immutability():
def test_token_conversion():
- """Test token conversion in UserSecrets.create"""
+ """Test token conversion in Secrets.create"""
# Test with string token
store1 = Settings(
- secrets_store=UserSecrets(
+ secrets_store=Secrets(
provider_tokens={
ProviderType.GITHUB: ProviderToken(token=SecretStr('test_token'))
}
@@ -159,7 +159,7 @@ def test_token_conversion():
assert store1.secrets_store.provider_tokens[ProviderType.GITHUB].user_id is None
# Test with dict token
- store2 = UserSecrets(
+ store2 = Secrets(
provider_tokens={'github': {'token': 'test_token', 'user_id': 'user1'}}
)
assert (
@@ -170,14 +170,14 @@ def test_token_conversion():
# Test with ProviderToken
token = ProviderToken(token=SecretStr('test_token'), user_id='user2')
- store3 = UserSecrets(provider_tokens={ProviderType.GITHUB: token})
+ store3 = Secrets(provider_tokens={ProviderType.GITHUB: token})
assert (
store3.provider_tokens[ProviderType.GITHUB].token.get_secret_value()
== 'test_token'
)
assert store3.provider_tokens[ProviderType.GITHUB].user_id == 'user2'
- store4 = UserSecrets(
+ store4 = Secrets(
provider_tokens={
ProviderType.GITHUB: 123 # Invalid type
}
@@ -186,10 +186,10 @@ def test_token_conversion():
assert ProviderType.GITHUB not in store4.provider_tokens
# Test with empty/None token
- store5 = UserSecrets(provider_tokens={ProviderType.GITHUB: None})
+ store5 = Secrets(provider_tokens={ProviderType.GITHUB: None})
assert ProviderType.GITHUB not in store5.provider_tokens
- store6 = UserSecrets(
+ store6 = Secrets(
provider_tokens={
'invalid_provider': 'test_token' # Invalid provider type
}
diff --git a/tests/unit/llm/test_litellm_proxy_model_parsing.py b/tests/unit/llm/test_litellm_proxy_model_parsing.py
new file mode 100644
index 0000000000..c108570131
--- /dev/null
+++ b/tests/unit/llm/test_litellm_proxy_model_parsing.py
@@ -0,0 +1,236 @@
+import sys
+import types
+from unittest.mock import patch
+
+# Provide lightweight stubs for optional dependencies that are imported at module import time
+# elsewhere in the codebase, to avoid installing heavy packages for this focused unit test.
+if 'pythonjsonlogger' not in sys.modules:
+ pythonjsonlogger = types.ModuleType('pythonjsonlogger')
+ pythonjsonlogger.json = types.ModuleType('pythonjsonlogger.json')
+
+ class _DummyJsonFormatter: # minimal stub
+ def __init__(self, *args, **kwargs):
+ pass
+
+ pythonjsonlogger.json.JsonFormatter = _DummyJsonFormatter
+ sys.modules['pythonjsonlogger'] = pythonjsonlogger
+ sys.modules['pythonjsonlogger.json'] = pythonjsonlogger.json
+
+if 'google' not in sys.modules:
+ google = types.ModuleType('google')
+ # make it package-like
+ google.__path__ = [] # type: ignore[attr-defined]
+ sys.modules['google'] = google
+if 'google.api_core' not in sys.modules:
+ api_core = types.ModuleType('google.api_core')
+ api_core.__path__ = [] # type: ignore[attr-defined]
+ sys.modules['google.api_core'] = api_core
+if 'google.api_core.exceptions' not in sys.modules:
+ exceptions_mod = types.ModuleType('google.api_core.exceptions')
+
+ # Provide a NotFound exception type used by storage backends
+ class _NotFound(Exception):
+ pass
+
+ exceptions_mod.NotFound = _NotFound
+ sys.modules['google.api_core.exceptions'] = exceptions_mod
+
+# Also stub google.cloud and google.cloud.storage used by storage backends
+if 'google.cloud' not in sys.modules:
+ google_cloud_pkg = types.ModuleType('google.cloud')
+ google_cloud_pkg.__path__ = [] # type: ignore[attr-defined]
+ sys.modules['google.cloud'] = google_cloud_pkg
+if 'google.cloud.storage' not in sys.modules:
+ storage_pkg = types.ModuleType('google.cloud.storage')
+ storage_pkg.__path__ = [] # type: ignore[attr-defined]
+
+ class _DummyClient:
+ def __init__(self, *args, **kwargs):
+ pass
+
+ storage_pkg.Client = _DummyClient
+ sys.modules['google.cloud.storage'] = storage_pkg
+
+# Submodules used by storage backend
+if 'google.cloud.storage.blob' not in sys.modules:
+ blob_mod = types.ModuleType('google.cloud.storage.blob')
+
+ class _DummyBlob:
+ def __init__(self, *args, **kwargs):
+ pass
+
+ blob_mod.Blob = _DummyBlob
+ sys.modules['google.cloud.storage.blob'] = blob_mod
+if 'google.cloud.storage.bucket' not in sys.modules:
+ bucket_mod = types.ModuleType('google.cloud.storage.bucket')
+
+ class _DummyBucket:
+ def __init__(self, *args, **kwargs):
+ pass
+
+ bucket_mod.Bucket = _DummyBucket
+ sys.modules['google.cloud.storage.bucket'] = bucket_mod
+
+# Also provide google.cloud.storage.client module referencing the Client stub
+if 'google.cloud.storage.client' not in sys.modules:
+ client_mod = types.ModuleType('google.cloud.storage.client')
+ try:
+ client_mod.Client = sys.modules['google.cloud.storage'].Client # type: ignore[attr-defined]
+ except Exception:
+
+ class _DummyClient2:
+ def __init__(self, *args, **kwargs):
+ pass
+
+ client_mod.Client = _DummyClient2
+ sys.modules['google.cloud.storage.client'] = client_mod
+
+# Stub boto3 used by S3 backend
+if 'boto3' not in sys.modules:
+ boto3_mod = types.ModuleType('boto3')
+
+ def _noop(*args, **kwargs):
+ class _Dummy:
+ def __getattr__(self, _):
+ return _noop
+
+ def __call__(self, *a, **k):
+ return None
+
+ return _Dummy()
+
+ boto3_mod.client = _noop
+ boto3_mod.resource = _noop
+
+ class _DummySession:
+ def client(self, *a, **k):
+ return _noop()
+
+ def resource(self, *a, **k):
+ return _noop()
+
+ boto3_mod.session = types.SimpleNamespace(Session=_DummySession)
+ sys.modules['boto3'] = boto3_mod
+
+if 'botocore' not in sys.modules:
+ botocore_mod = types.ModuleType('botocore')
+ botocore_mod.__path__ = [] # type: ignore[attr-defined]
+ sys.modules['botocore'] = botocore_mod
+if 'botocore.exceptions' not in sys.modules:
+ botocore_exc = types.ModuleType('botocore.exceptions')
+
+ class _BotoCoreError(Exception):
+ pass
+
+ botocore_exc.BotoCoreError = _BotoCoreError
+ sys.modules['botocore.exceptions'] = botocore_exc
+
+# Stub uvicorn server constants used by shutdown listener
+if 'uvicorn' not in sys.modules:
+ uvicorn_mod = types.ModuleType('uvicorn')
+ uvicorn_mod.__path__ = [] # type: ignore[attr-defined]
+ sys.modules['uvicorn'] = uvicorn_mod
+if 'uvicorn.server' not in sys.modules:
+ uvicorn_server = types.ModuleType('uvicorn.server')
+ # minimal placeholder; value isn't used in this test
+ uvicorn_server.HANDLED_SIGNALS = set()
+ sys.modules['uvicorn.server'] = uvicorn_server
+
+# Stub json_repair used by openhands.io.json
+if 'json_repair' not in sys.modules:
+ json_repair_mod = types.ModuleType('json_repair')
+
+ def repair_json(s: str) -> str:
+ return s
+
+ json_repair_mod.repair_json = repair_json
+ sys.modules['json_repair'] = json_repair_mod
+
+# Stub deprecated.deprecated decorator
+if 'deprecated' not in sys.modules:
+ deprecated_mod = types.ModuleType('deprecated')
+
+ def deprecated(*dargs, **dkwargs): # decorator shim
+ def _wrap(func):
+ return func
+
+ # Support both @deprecated and @deprecated(reason="...") usages
+ if dargs and callable(dargs[0]) and not dkwargs:
+ return dargs[0]
+ return _wrap
+
+ deprecated_mod.deprecated = deprecated
+ sys.modules['deprecated'] = deprecated_mod
+
+# Import OpenHands after stubbing optional deps
+from openhands.core.config.llm_config import LLMConfig
+from openhands.llm.llm import LLM
+from openhands.llm.metrics import Metrics
+
+
+class DummyResponse:
+ def __init__(self, json_data):
+ self._json = json_data
+
+ def json(self):
+ return self._json
+
+
+@patch('httpx.get')
+def test_litellm_proxy_model_with_nested_slashes_is_accepted(mock_get):
+ # Arrange: simulate LiteLLM proxy /v1/model/info returning our model
+ model_tail = 'copilot/gpt-4.1'
+ mock_get.return_value = DummyResponse(
+ {
+ 'data': [
+ {
+ 'model_name': model_tail,
+ 'model_info': {
+ 'max_input_tokens': 128000,
+ 'supports_vision': False,
+ },
+ }
+ ]
+ }
+ )
+
+ cfg = LLMConfig(
+ model=f'litellm_proxy/{model_tail}',
+ api_key=None,
+ base_url='http://localhost:4000', # any string; we mock httpx.get anyway
+ )
+
+ # Act: construct LLM; should not raise ValidationError
+ llm = LLM(config=cfg, service_id='test', metrics=Metrics(model_name=cfg.model))
+
+ # Assert: model remains intact and model_info was set from proxy data
+ assert llm.config.model == f'litellm_proxy/{model_tail}'
+ assert llm.model_info is None or isinstance(
+ llm.model_info, (dict, types.MappingProxyType)
+ )
+
+
+@patch('httpx.get')
+def test_litellm_proxy_model_info_lookup_uses_full_tail(mock_get):
+ # Ensure we match exactly the entire tail after prefix when selecting model info
+ model_tail = 'nested/provider/path/model-x'
+ mock_get.return_value = DummyResponse(
+ {
+ 'data': [
+ {'model_name': model_tail, 'model_info': {'max_input_tokens': 32000}},
+ {'model_name': 'other', 'model_info': {'max_input_tokens': 1}},
+ ]
+ }
+ )
+
+ cfg = LLMConfig(
+ model=f'litellm_proxy/{model_tail}',
+ api_key=None,
+ base_url='http://localhost:4000',
+ )
+
+ llm = LLM(config=cfg, service_id='test', metrics=Metrics(model_name=cfg.model))
+
+ # If proxy data was set, prefer that exact match; otherwise at least the construction should succeed
+ if llm.model_info is not None:
+ assert llm.model_info.get('max_input_tokens') == 32000
diff --git a/tests/unit/resolver/test_patch_apply.py b/tests/unit/resolver/test_patch_apply.py
index eb6cef2c43..4360f1dcd2 100644
--- a/tests/unit/resolver/test_patch_apply.py
+++ b/tests/unit/resolver/test_patch_apply.py
@@ -4,7 +4,7 @@ from openhands.resolver.patching.patch import diffobj, parse_diff
def test_patch_apply_with_empty_lines():
# The original file has no indentation and uses \n line endings
- original_content = '# PR Viewer\n\nThis React application allows you to view open pull requests from GitHub repositories in a GitHub organization. By default, it uses the All-Hands-AI organization.\n\n## Setup'
+ original_content = '# PR Viewer\n\nThis React application allows you to view open pull requests from GitHub repositories in a GitHub organization. By default, it uses the OpenHands organization.\n\n## Setup'
# The patch has spaces at the start of each line and uses \n line endings
patch = """diff --git a/README.md b/README.md
@@ -14,8 +14,8 @@ index b760a53..5071727 100644
@@ -1,3 +1,3 @@
# PR Viewer
--This React application allows you to view open pull requests from GitHub repositories in a GitHub organization. By default, it uses the All-Hands-AI organization.
-+This React application was created by Graham Neubig and OpenHands. It allows you to view open pull requests from GitHub repositories in a GitHub organization. By default, it uses the All-Hands-AI organization."""
+-This React application allows you to view open pull requests from GitHub repositories in a GitHub organization. By default, it uses the OpenHands organization.
++This React application was created by Graham Neubig and OpenHands. It allows you to view open pull requests from GitHub repositories in a GitHub organization. By default, it uses the OpenHands organization."""
print('Original content lines:')
for i, line in enumerate(original_content.splitlines(), 1):
@@ -40,7 +40,7 @@ index b760a53..5071727 100644
expected_result = [
'# PR Viewer',
'',
- 'This React application was created by Graham Neubig and OpenHands. It allows you to view open pull requests from GitHub repositories in a GitHub organization. By default, it uses the All-Hands-AI organization.',
+ 'This React application was created by Graham Neubig and OpenHands. It allows you to view open pull requests from GitHub repositories in a GitHub organization. By default, it uses the OpenHands organization.',
'',
'## Setup',
]
diff --git a/tests/unit/server/data_models/test_conversation.py b/tests/unit/server/data_models/test_conversation.py
index 2dae9685f5..ec41d4e9fe 100644
--- a/tests/unit/server/data_models/test_conversation.py
+++ b/tests/unit/server/data_models/test_conversation.py
@@ -82,7 +82,7 @@ def test_client():
def create_new_test_conversation(
test_request: InitSessionRequest, auth_type: AuthType | None = None
):
- # Create a mock UserSecrets object with the required custom_secrets attribute
+ # Create a mock Secrets object with the required custom_secrets attribute
mock_user_secrets = MagicMock()
mock_user_secrets.custom_secrets = MappingProxyType({})
diff --git a/tests/unit/server/routes/test_secrets_api.py b/tests/unit/server/routes/test_secrets_api.py
index 0f5bae19e9..59c978b05b 100644
--- a/tests/unit/server/routes/test_secrets_api.py
+++ b/tests/unit/server/routes/test_secrets_api.py
@@ -18,7 +18,7 @@ from openhands.server.routes.secrets import (
app as secrets_app,
)
from openhands.storage import get_file_store
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.secrets.file_secrets_store import FileSecretsStore
@@ -62,7 +62,7 @@ async def test_load_custom_secrets_names(test_client, file_secrets_store):
provider_tokens = {
ProviderType.GITHUB: ProviderToken(token=SecretStr('github-token'))
}
- user_secrets = UserSecrets(
+ user_secrets = Secrets(
custom_secrets=custom_secrets, provider_tokens=provider_tokens
)
@@ -101,7 +101,7 @@ async def test_load_custom_secrets_names_empty(test_client, file_secrets_store):
provider_tokens = {
ProviderType.GITHUB: ProviderToken(token=SecretStr('github-token'))
}
- user_secrets = UserSecrets(provider_tokens=provider_tokens, custom_secrets={})
+ user_secrets = Secrets(provider_tokens=provider_tokens, custom_secrets={})
# Store the initial settings
await file_secrets_store.store(user_secrets)
@@ -123,7 +123,7 @@ async def test_add_custom_secret(test_client, file_secrets_store):
provider_tokens = {
ProviderType.GITHUB: ProviderToken(token=SecretStr('github-token'))
}
- user_secrets = UserSecrets(provider_tokens=provider_tokens)
+ user_secrets = Secrets(provider_tokens=provider_tokens)
# Store the initial settings
await file_secrets_store.store(user_secrets)
@@ -184,7 +184,7 @@ async def test_update_existing_custom_secret(test_client, file_secrets_store):
provider_tokens = {
ProviderType.GITHUB: ProviderToken(token=SecretStr('github-token'))
}
- user_secrets = UserSecrets(
+ user_secrets = Secrets(
custom_secrets=custom_secrets, provider_tokens=provider_tokens
)
@@ -223,7 +223,7 @@ async def test_add_multiple_custom_secrets(test_client, file_secrets_store):
provider_tokens = {
ProviderType.GITHUB: ProviderToken(token=SecretStr('github-token'))
}
- user_secrets = UserSecrets(
+ user_secrets = Secrets(
custom_secrets=custom_secrets, provider_tokens=provider_tokens
)
@@ -285,7 +285,7 @@ async def test_delete_custom_secret(test_client, file_secrets_store):
provider_tokens = {
ProviderType.GITHUB: ProviderToken(token=SecretStr('github-token'))
}
- user_secrets = UserSecrets(
+ user_secrets = Secrets(
custom_secrets=custom_secrets, provider_tokens=provider_tokens
)
@@ -323,7 +323,7 @@ async def test_delete_nonexistent_custom_secret(test_client, file_secrets_store)
provider_tokens = {
ProviderType.GITHUB: ProviderToken(token=SecretStr('github-token'))
}
- user_secrets = UserSecrets(
+ user_secrets = Secrets(
custom_secrets=custom_secrets, provider_tokens=provider_tokens
)
@@ -355,7 +355,7 @@ async def test_add_git_providers_with_host(test_client, file_secrets_store):
provider_tokens = {
ProviderType.GITHUB: ProviderToken(token=SecretStr('github-token'))
}
- user_secrets = UserSecrets(provider_tokens=provider_tokens)
+ user_secrets = Secrets(provider_tokens=provider_tokens)
await file_secrets_store.store(user_secrets)
# Mock check_provider_tokens to return empty string (no error)
@@ -394,7 +394,7 @@ async def test_add_git_providers_update_host_only(test_client, file_secrets_stor
token=SecretStr('github-token'), host='github.com'
)
}
- user_secrets = UserSecrets(provider_tokens=provider_tokens)
+ user_secrets = Secrets(provider_tokens=provider_tokens)
await file_secrets_store.store(user_secrets)
# Mock check_provider_tokens to return empty string (no error)
@@ -433,7 +433,7 @@ async def test_add_git_providers_invalid_token_with_host(
):
"""Test adding an invalid token with a host."""
# Create initial user secrets
- user_secrets = UserSecrets()
+ user_secrets = Secrets()
await file_secrets_store.store(user_secrets)
# Mock validate_provider_token to return None (invalid token)
@@ -456,7 +456,7 @@ async def test_add_git_providers_invalid_token_with_host(
async def test_add_multiple_git_providers_with_hosts(test_client, file_secrets_store):
"""Test adding multiple git providers with different hosts."""
# Create initial user secrets
- user_secrets = UserSecrets()
+ user_secrets = Secrets()
await file_secrets_store.store(user_secrets)
# Mock check_provider_tokens to return empty string (no error)
diff --git a/tests/unit/server/routes/test_settings_api.py b/tests/unit/server/routes/test_settings_api.py
index 63a96eb7d9..f01b1d77df 100644
--- a/tests/unit/server/routes/test_settings_api.py
+++ b/tests/unit/server/routes/test_settings_api.py
@@ -9,7 +9,7 @@ from pydantic import SecretStr
from openhands.integrations.provider import ProviderToken, ProviderType
from openhands.server.app import app
from openhands.server.user_auth.user_auth import UserAuth
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.memory import InMemoryFileStore
from openhands.storage.secrets.secrets_store import SecretsStore
from openhands.storage.settings.file_settings_store import FileSettingsStore
@@ -43,7 +43,7 @@ class MockUserAuth(UserAuth):
async def get_secrets_store(self) -> SecretsStore | None:
return None
- async def get_user_secrets(self) -> UserSecrets | None:
+ async def get_secrets(self) -> Secrets | None:
return None
@classmethod
diff --git a/tests/unit/server/routes/test_settings_store_functions.py b/tests/unit/server/routes/test_settings_store_functions.py
index 688a02d75a..6296a8e354 100644
--- a/tests/unit/server/routes/test_settings_store_functions.py
+++ b/tests/unit/server/routes/test_settings_store_functions.py
@@ -14,8 +14,8 @@ from openhands.server.routes.secrets import (
from openhands.server.routes.settings import store_llm_settings
from openhands.server.settings import POSTProviderModel
from openhands.storage import get_file_store
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.data_models.settings import Settings
-from openhands.storage.data_models.user_secrets import UserSecrets
from openhands.storage.secrets.file_secrets_store import FileSecretsStore
@@ -220,9 +220,9 @@ async def test_store_provider_tokens_new_tokens(test_client, file_secrets_store)
mock_store = MagicMock()
mock_store.load = AsyncMock(return_value=None) # No existing settings
- UserSecrets()
+ Secrets()
- user_secrets = await file_secrets_store.store(UserSecrets())
+ user_secrets = await file_secrets_store.store(Secrets())
response = test_client.post('/api/add-git-providers', json=provider_tokens)
assert response.status_code == 200
@@ -242,8 +242,8 @@ async def test_store_provider_tokens_update_existing(test_client, file_secrets_s
github_token = ProviderToken(token=SecretStr('old-token'))
provider_tokens = {ProviderType.GITHUB: github_token}
- # Create a UserSecrets with the provider tokens
- user_secrets = UserSecrets(provider_tokens=provider_tokens)
+ # Create a Secrets with the provider tokens
+ user_secrets = Secrets(provider_tokens=provider_tokens)
await file_secrets_store.store(user_secrets)
@@ -268,7 +268,7 @@ async def test_store_provider_tokens_keep_existing(test_client, file_secrets_sto
# Create existing secrets with a GitHub token
github_token = ProviderToken(token=SecretStr('existing-token'))
provider_tokens = {ProviderType.GITHUB: github_token}
- user_secrets = UserSecrets(provider_tokens=provider_tokens)
+ user_secrets = Secrets(provider_tokens=provider_tokens)
await file_secrets_store.store(user_secrets)
diff --git a/tests/unit/server/test_openapi_schema_generation.py b/tests/unit/server/test_openapi_schema_generation.py
index f9e0c7f894..2aa798e1e6 100644
--- a/tests/unit/server/test_openapi_schema_generation.py
+++ b/tests/unit/server/test_openapi_schema_generation.py
@@ -9,7 +9,7 @@ from pydantic import SecretStr
from openhands.integrations.provider import ProviderToken, ProviderType
from openhands.server.app import app
from openhands.server.user_auth.user_auth import UserAuth
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
from openhands.storage.memory import InMemoryFileStore
from openhands.storage.secrets.secrets_store import SecretsStore
from openhands.storage.settings.file_settings_store import FileSettingsStore
@@ -43,7 +43,7 @@ class MockUserAuth(UserAuth):
async def get_secrets_store(self) -> SecretsStore | None:
return None
- async def get_user_secrets(self) -> UserSecrets | None:
+ async def get_secrets(self) -> Secrets | None:
return None
@classmethod
diff --git a/tests/unit/storage/data_models/test_secret_store.py b/tests/unit/storage/data_models/test_secret_store.py
index 4112e2fdf1..9e67dc025a 100644
--- a/tests/unit/storage/data_models/test_secret_store.py
+++ b/tests/unit/storage/data_models/test_secret_store.py
@@ -10,12 +10,12 @@ from openhands.integrations.provider import (
ProviderToken,
ProviderType,
)
-from openhands.storage.data_models.user_secrets import UserSecrets
+from openhands.storage.data_models.secrets import Secrets
-class TestUserSecrets:
+class TestSecrets:
def test_adding_only_provider_tokens(self):
- """Test adding only provider tokens to the UserSecrets."""
+ """Test adding only provider tokens to the Secrets."""
# Create provider tokens
github_token = ProviderToken(
token=SecretStr('github-token-123'), user_id='user1'
@@ -31,7 +31,7 @@ class TestUserSecrets:
}
# Initialize the store with a dict that will be converted to MappingProxyType
- store = UserSecrets(provider_tokens=provider_tokens)
+ store = Secrets(provider_tokens=provider_tokens)
# Verify the tokens were added correctly
assert isinstance(store.provider_tokens, MappingProxyType)
@@ -52,7 +52,7 @@ class TestUserSecrets:
assert len(store.custom_secrets) == 0
def test_adding_only_custom_secrets(self):
- """Test adding only custom secrets to the UserSecrets."""
+ """Test adding only custom secrets to the Secrets."""
# Create custom secrets
custom_secrets = {
'API_KEY': CustomSecret(
@@ -64,7 +64,7 @@ class TestUserSecrets:
}
# Initialize the store with custom secrets
- store = UserSecrets(custom_secrets=custom_secrets)
+ store = Secrets(custom_secrets=custom_secrets)
# Verify the custom secrets were added correctly
assert isinstance(store.custom_secrets, MappingProxyType)
@@ -95,7 +95,7 @@ class TestUserSecrets:
custom_secrets_proxy = MappingProxyType({'API_KEY': custom_secret})
# Test with dict for provider_tokens and MappingProxyType for custom_secrets
- store1 = UserSecrets(
+ store1 = Secrets(
provider_tokens=provider_tokens_dict, custom_secrets=custom_secrets_proxy
)
@@ -120,7 +120,7 @@ class TestUserSecrets:
'API_KEY': {'secret': 'api-key-123', 'description': 'API key'}
}
- store2 = UserSecrets(
+ store2 = Secrets(
provider_tokens=provider_tokens_proxy, custom_secrets=custom_secrets_dict
)
@@ -146,7 +146,7 @@ class TestUserSecrets:
)
}
- initial_store = UserSecrets(
+ initial_store = Secrets(
provider_tokens=MappingProxyType({ProviderType.GITHUB: github_token}),
custom_secrets=MappingProxyType(custom_secret),
)
@@ -212,7 +212,7 @@ class TestUserSecrets:
)
def test_serialization_with_expose_secrets(self):
- """Test serializing the UserSecrets with expose_secrets=True."""
+ """Test serializing the Secrets with expose_secrets=True."""
# Create a store with both provider tokens and custom secrets
github_token = ProviderToken(
token=SecretStr('github-token-123'), user_id='user1'
@@ -223,7 +223,7 @@ class TestUserSecrets:
)
}
- store = UserSecrets(
+ store = Secrets(
provider_tokens=MappingProxyType({ProviderType.GITHUB: github_token}),
custom_secrets=MappingProxyType(custom_secrets),
)
@@ -290,7 +290,7 @@ class TestUserSecrets:
}
# Initialize the store
- store = UserSecrets(provider_tokens=mixed_provider_tokens)
+ store = Secrets(provider_tokens=mixed_provider_tokens)
# Verify all tokens are converted to SecretStr
assert isinstance(store.provider_tokens, MappingProxyType)
@@ -322,7 +322,7 @@ class TestUserSecrets:
}
# Initialize the store
- store = UserSecrets(custom_secrets=custom_secrets_dict)
+ store = Secrets(custom_secrets=custom_secrets_dict)
# Verify all secrets are converted to CustomSecret objects
assert isinstance(store.custom_secrets, MappingProxyType)
diff --git a/third_party/runtime/impl/daytona/README.md b/third_party/runtime/impl/daytona/README.md
index 926c343982..53dc30a8c6 100644
--- a/third_party/runtime/impl/daytona/README.md
+++ b/third_party/runtime/impl/daytona/README.md
@@ -48,7 +48,7 @@ Once executed, OpenHands should be running locally and ready for use.
## Manual Initialization
### Step 1: Set the `OPENHANDS_VERSION` Environment Variable
-Run the following command in your terminal, replacing `` with the latest release's version seen in the [main README.md file](https://github.com/All-Hands-AI/OpenHands?tab=readme-ov-file#-quick-start):
+Run the following command in your terminal, replacing `` with the latest release's version seen in the [main README.md file](https://github.com/OpenHands/OpenHands?tab=readme-ov-file#-quick-start):
#### Mac/Linux:
```bash
@@ -85,14 +85,14 @@ This command pulls and runs the OpenHands container using Docker. Once executed,
#### Mac/Linux:
```bash
docker run -it --rm --pull=always \
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:${OPENHANDS_VERSION}-nikolaik \
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/openhands/runtime:${OPENHANDS_VERSION}-nikolaik \
-e LOG_ALL_EVENTS=true \
-e RUNTIME=daytona \
-e DAYTONA_API_KEY=${DAYTONA_API_KEY} \
-v ~/.openhands:/.openhands \
-p 3000:3000 \
--name openhands-app \
- docker.all-hands.dev/all-hands-ai/openhands:${OPENHANDS_VERSION}
+ docker.all-hands.dev/openhands/openhands:${OPENHANDS_VERSION}
```
> **Note**: If you used OpenHands before version 0.44, you may want to run `mv ~/.openhands-state ~/.openhands` to migrate your conversation history to the new location.
@@ -100,14 +100,14 @@ docker run -it --rm --pull=always \
#### Windows:
```powershell
docker run -it --rm --pull=always `
- -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/all-hands-ai/runtime:${env:OPENHANDS_VERSION}-nikolaik `
+ -e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/openhands/runtime:${env:OPENHANDS_VERSION}-nikolaik `
-e LOG_ALL_EVENTS=true `
-e RUNTIME=daytona `
-e DAYTONA_API_KEY=${env:DAYTONA_API_KEY} `
-v ~/.openhands:/.openhands `
-p 3000:3000 `
--name openhands-app `
- docker.all-hands.dev/all-hands-ai/openhands:${env:OPENHANDS_VERSION}
+ docker.all-hands.dev/openhands/openhands:${env:OPENHANDS_VERSION}
```
> **Note**: If you used OpenHands before version 0.44, you may want to run `mv ~/.openhands-state ~/.openhands` to migrate your conversation history to the new location.