mirror of
https://github.com/OpenHands/OpenHands.git
synced 2025-12-26 05:48:36 +08:00
Merge branch 'main' into openhands/configure-conda-channel-alias
This commit is contained in:
commit
e8a0c99902
4
.github/scripts/update_pr_description.sh
vendored
4
.github/scripts/update_pr_description.sh
vendored
@ -13,9 +13,9 @@ DOCKER_RUN_COMMAND="docker run -it --rm \
|
||||
-p 3000:3000 \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
--add-host host.docker.internal:host-gateway \
|
||||
-e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/openhands/runtime:${SHORT_SHA}-nikolaik \
|
||||
-e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.openhands.dev/openhands/runtime:${SHORT_SHA}-nikolaik \
|
||||
--name openhands-app-${SHORT_SHA} \
|
||||
docker.all-hands.dev/openhands/openhands:${SHORT_SHA}"
|
||||
docker.openhands.dev/openhands/openhands:${SHORT_SHA}"
|
||||
|
||||
# Define the uvx command
|
||||
UVX_RUN_COMMAND="uvx --python 3.12 --from git+https://github.com/OpenHands/OpenHands@${BRANCH_NAME}#subdirectory=openhands-cli openhands"
|
||||
|
||||
@ -71,6 +71,14 @@ jobs:
|
||||
|
||||
echo "✅ Build & test finished without ❌ markers"
|
||||
|
||||
- name: Verify binary files exist
|
||||
run: |
|
||||
if ! ls openhands-cli/dist/openhands* 1> /dev/null 2>&1; then
|
||||
echo "❌ No binaries found to upload!"
|
||||
exit 1
|
||||
fi
|
||||
echo "✅ Found binaries to upload."
|
||||
|
||||
- name: Upload binary artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
|
||||
2
.github/workflows/ghcr-build.yml
vendored
2
.github/workflows/ghcr-build.yml
vendored
@ -37,7 +37,6 @@ jobs:
|
||||
shell: bash
|
||||
id: define-base-images
|
||||
run: |
|
||||
# Only build nikolaik on PRs, otherwise build both nikolaik and ubuntu.
|
||||
if [[ "$GITHUB_EVENT_NAME" == "pull_request" ]]; then
|
||||
json=$(jq -n -c '[
|
||||
{ image: "nikolaik/python-nodejs:python3.12-nodejs22", tag: "nikolaik" },
|
||||
@ -46,7 +45,6 @@ jobs:
|
||||
else
|
||||
json=$(jq -n -c '[
|
||||
{ image: "nikolaik/python-nodejs:python3.12-nodejs22", tag: "nikolaik" },
|
||||
{ image: "ghcr.io/openhands/python-nodejs:python3.13-nodejs22-trixie", tag: "trixie" },
|
||||
{ image: "ubuntu:24.04", tag: "ubuntu" }
|
||||
]')
|
||||
fi
|
||||
|
||||
@ -83,6 +83,116 @@ VSCode Extension:
|
||||
- Use `vscode.window.createOutputChannel()` for debug logging instead of `showErrorMessage()` popups
|
||||
- Pre-commit process runs both frontend and backend checks when committing extension changes
|
||||
|
||||
## Enterprise Directory
|
||||
|
||||
The `enterprise/` directory contains additional functionality that extends the open-source OpenHands codebase. This includes:
|
||||
- Authentication and user management (Keycloak integration)
|
||||
- Database migrations (Alembic)
|
||||
- Integration services (GitHub, GitLab, Jira, Linear, Slack)
|
||||
- Billing and subscription management (Stripe)
|
||||
- Telemetry and analytics (PostHog, custom metrics framework)
|
||||
|
||||
### Enterprise Development Setup
|
||||
|
||||
**Prerequisites:**
|
||||
- Python 3.12
|
||||
- Poetry (for dependency management)
|
||||
- Node.js 22.x (for frontend)
|
||||
- Docker (optional)
|
||||
|
||||
**Setup Steps:**
|
||||
1. First, build the main OpenHands project: `make build`
|
||||
2. Then install enterprise dependencies: `cd enterprise && poetry install --with dev,test` (This can take a very long time. Be patient.)
|
||||
3. Set up enterprise pre-commit hooks: `poetry run pre-commit install --config ./dev_config/python/.pre-commit-config.yaml`
|
||||
|
||||
**Running Enterprise Tests:**
|
||||
```bash
|
||||
# Enterprise unit tests (full suite)
|
||||
PYTHONPATH=".:$PYTHONPATH" poetry run --project=enterprise pytest --forked -n auto -s -p no:ddtrace -p no:ddtrace.pytest_bdd -p no:ddtrace.pytest_benchmark ./enterprise/tests/unit --cov=enterprise --cov-branch
|
||||
|
||||
# Test specific modules (faster for development)
|
||||
cd enterprise
|
||||
PYTHONPATH=".:$PYTHONPATH" poetry run pytest tests/unit/telemetry/ --confcutdir=tests/unit/telemetry
|
||||
|
||||
# Enterprise linting (IMPORTANT: use --show-diff-on-failure to match GitHub CI)
|
||||
poetry run pre-commit run --all-files --show-diff-on-failure --config ./dev_config/python/.pre-commit-config.yaml
|
||||
```
|
||||
|
||||
**Running Enterprise Server:**
|
||||
```bash
|
||||
cd enterprise
|
||||
make start-backend # Development mode with hot reload
|
||||
# or
|
||||
make run # Full application (backend + frontend)
|
||||
```
|
||||
|
||||
**Key Configuration Files:**
|
||||
- `enterprise/pyproject.toml` - Enterprise-specific dependencies
|
||||
- `enterprise/Makefile` - Enterprise build and run commands
|
||||
- `enterprise/dev_config/python/` - Linting and type checking configuration
|
||||
- `enterprise/migrations/` - Database migration files
|
||||
|
||||
**Database Migrations:**
|
||||
Enterprise uses Alembic for database migrations. When making schema changes:
|
||||
1. Create migration files in `enterprise/migrations/versions/`
|
||||
2. Test migrations thoroughly
|
||||
3. The CI will check for migration conflicts on PRs
|
||||
|
||||
**Integration Development:**
|
||||
The enterprise codebase includes integrations for:
|
||||
- **GitHub** - PR management, webhooks, app installations
|
||||
- **GitLab** - Similar to GitHub but for GitLab instances
|
||||
- **Jira** - Issue tracking and project management
|
||||
- **Linear** - Modern issue tracking
|
||||
- **Slack** - Team communication and notifications
|
||||
|
||||
Each integration follows a consistent pattern with service classes, storage models, and API endpoints.
|
||||
|
||||
**Important Notes:**
|
||||
- Enterprise code is licensed under Polyform Free Trial License (30-day limit)
|
||||
- The enterprise server extends the OSS server through dynamic imports
|
||||
- Database changes require careful migration planning in `enterprise/migrations/`
|
||||
- Always test changes in both OSS and enterprise contexts
|
||||
- Use the enterprise-specific Makefile commands for development
|
||||
|
||||
**Enterprise Testing Best Practices:**
|
||||
|
||||
**Database Testing:**
|
||||
- Use SQLite in-memory databases (`sqlite:///:memory:`) for unit tests instead of real PostgreSQL
|
||||
- Create module-specific `conftest.py` files with database fixtures
|
||||
- Mock external database connections in unit tests to avoid dependency on running services
|
||||
- Use real database connections only for integration tests
|
||||
|
||||
**Import Patterns:**
|
||||
- Use relative imports without `enterprise.` prefix in enterprise code
|
||||
- Example: `from storage.database import session_maker` not `from enterprise.storage.database import session_maker`
|
||||
- This ensures code works in both OSS and enterprise contexts
|
||||
|
||||
**Test Structure:**
|
||||
- Place tests in `enterprise/tests/unit/` following the same structure as the source code
|
||||
- Use `--confcutdir=tests/unit/[module]` when testing specific modules
|
||||
- Create comprehensive fixtures for complex objects (databases, external services)
|
||||
- Write platform-agnostic tests (avoid hardcoded OS-specific assertions)
|
||||
|
||||
**Mocking Strategy:**
|
||||
- Use `AsyncMock` for async operations and `MagicMock` for complex objects
|
||||
- Mock all external dependencies (databases, APIs, file systems) in unit tests
|
||||
- Use `patch` with correct import paths (e.g., `telemetry.registry.logger` not `enterprise.telemetry.registry.logger`)
|
||||
- Test both success and failure scenarios with proper error handling
|
||||
|
||||
**Coverage Goals:**
|
||||
- Aim for 90%+ test coverage on new enterprise modules
|
||||
- Focus on critical business logic and error handling paths
|
||||
- Use `--cov-report=term-missing` to identify uncovered lines
|
||||
|
||||
**Troubleshooting:**
|
||||
- If tests fail, ensure all dependencies are installed: `poetry install --with dev,test`
|
||||
- For database issues, check migration status and run migrations if needed
|
||||
- For frontend issues, ensure the main OpenHands frontend is built: `make build`
|
||||
- Check logs in the `logs/` directory for runtime issues
|
||||
- If tests fail with import errors, verify `PYTHONPATH=".:$PYTHONPATH"` is set
|
||||
- **If GitHub CI fails but local linting passes**: Always use `--show-diff-on-failure` flag to match CI behavior exactly
|
||||
|
||||
## Template for Github Pull Request
|
||||
|
||||
If you are starting a pull request (PR), please follow the template in `.github/pull_request_template.md`.
|
||||
|
||||
@ -159,7 +159,7 @@ poetry run pytest ./tests/unit/test_*.py
|
||||
To reduce build time (e.g., if no changes were made to the client-runtime component), you can use an existing Docker
|
||||
container image by setting the SANDBOX_RUNTIME_CONTAINER_IMAGE environment variable to the desired Docker image.
|
||||
|
||||
Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/openhands/runtime:0.59-nikolaik`
|
||||
Example: `export SANDBOX_RUNTIME_CONTAINER_IMAGE=ghcr.io/openhands/runtime:0.60-nikolaik`
|
||||
|
||||
## Develop inside Docker container
|
||||
|
||||
@ -193,7 +193,7 @@ Here's a guide to the important documentation files in the repository:
|
||||
- [/README.md](./README.md): Main project overview, features, and basic setup instructions
|
||||
- [/Development.md](./Development.md) (this file): Comprehensive guide for developers working on OpenHands
|
||||
- [/CONTRIBUTING.md](./CONTRIBUTING.md): Guidelines for contributing to the project, including code style and PR process
|
||||
- [/docs/DOC_STYLE_GUIDE.md](./docs/DOC_STYLE_GUIDE.md): Standards for writing and maintaining project documentation
|
||||
- [DOC_STYLE_GUIDE.md](https://github.com/All-Hands-AI/docs/blob/main/openhands/DOC_STYLE_GUIDE.md): Standards for writing and maintaining project documentation
|
||||
- [/openhands/README.md](./openhands/README.md): Details about the backend Python implementation
|
||||
- [/frontend/README.md](./frontend/README.md): Frontend React application setup and development guide
|
||||
- [/containers/README.md](./containers/README.md): Information about Docker containers and deployment
|
||||
|
||||
10
README.md
10
README.md
@ -66,10 +66,10 @@ See the [uv installation guide](https://docs.astral.sh/uv/getting-started/instal
|
||||
**Launch OpenHands**:
|
||||
```bash
|
||||
# Launch the GUI server
|
||||
uvx --python 3.12 --from openhands-ai openhands serve
|
||||
uvx --python 3.12 openhands serve
|
||||
|
||||
# Or launch the CLI
|
||||
uvx --python 3.12 --from openhands-ai openhands
|
||||
uvx --python 3.12 openhands
|
||||
```
|
||||
|
||||
You'll find OpenHands running at [http://localhost:3000](http://localhost:3000) (for GUI mode)!
|
||||
@ -82,17 +82,17 @@ You'll find OpenHands running at [http://localhost:3000](http://localhost:3000)
|
||||
You can also run OpenHands directly with Docker:
|
||||
|
||||
```bash
|
||||
docker pull docker.all-hands.dev/openhands/runtime:0.59-nikolaik
|
||||
docker pull docker.openhands.dev/openhands/runtime:0.60-nikolaik
|
||||
|
||||
docker run -it --rm --pull=always \
|
||||
-e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.all-hands.dev/openhands/runtime:0.59-nikolaik \
|
||||
-e SANDBOX_RUNTIME_CONTAINER_IMAGE=docker.openhands.dev/openhands/runtime:0.60-nikolaik \
|
||||
-e LOG_ALL_EVENTS=true \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v ~/.openhands:/.openhands \
|
||||
-p 3000:3000 \
|
||||
--add-host host.docker.internal:host-gateway \
|
||||
--name openhands-app \
|
||||
docker.all-hands.dev/openhands/openhands:0.59
|
||||
docker.openhands.dev/openhands/openhands:0.60
|
||||
```
|
||||
|
||||
</details>
|
||||
|
||||
@ -104,6 +104,9 @@ RUN apt-get update && apt-get install -y \
|
||||
&& apt-get clean \
|
||||
&& apt-get autoremove -y
|
||||
|
||||
# mark /app as safe git directory to avoid pre-commit errors
|
||||
RUN git config --system --add safe.directory /app
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# cache build dependencies
|
||||
|
||||
@ -12,7 +12,7 @@ services:
|
||||
- SANDBOX_API_HOSTNAME=host.docker.internal
|
||||
- DOCKER_HOST_ADDR=host.docker.internal
|
||||
#
|
||||
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/openhands/runtime:0.59-nikolaik}
|
||||
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-ghcr.io/openhands/runtime:0.60-nikolaik}
|
||||
- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234}
|
||||
- WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace}
|
||||
ports:
|
||||
|
||||
@ -7,7 +7,7 @@ services:
|
||||
image: openhands:latest
|
||||
container_name: openhands-app-${DATE:-}
|
||||
environment:
|
||||
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.all-hands.dev/openhands/runtime:0.59-nikolaik}
|
||||
- SANDBOX_RUNTIME_CONTAINER_IMAGE=${SANDBOX_RUNTIME_CONTAINER_IMAGE:-docker.openhands.dev/openhands/runtime:0.60-nikolaik}
|
||||
#- SANDBOX_USER_ID=${SANDBOX_USER_ID:-1234} # enable this only if you want a specific non-root sandbox user but you will have to manually adjust permissions of ~/.openhands for this user
|
||||
- WORKSPACE_MOUNT_PATH=${WORKSPACE_BASE:-$PWD/workspace}
|
||||
ports:
|
||||
|
||||
@ -0,0 +1,856 @@
|
||||
# OpenHands Enterprise Usage Telemetry Service
|
||||
|
||||
## Table of Contents
|
||||
|
||||
1. [Introduction](#1-introduction)
|
||||
- 1.1 [Problem Statement](#11-problem-statement)
|
||||
- 1.2 [Proposed Solution](#12-proposed-solution)
|
||||
2. [User Interface](#2-user-interface)
|
||||
- 2.1 [License Warning Banner](#21-license-warning-banner)
|
||||
- 2.2 [Administrator Experience](#22-administrator-experience)
|
||||
3. [Other Context](#3-other-context)
|
||||
- 3.1 [Replicated Platform Integration](#31-replicated-platform-integration)
|
||||
- 3.2 [Administrator Email Detection Strategy](#32-administrator-email-detection-strategy)
|
||||
- 3.3 [Metrics Collection Framework](#33-metrics-collection-framework)
|
||||
4. [Technical Design](#4-technical-design)
|
||||
- 4.1 [Database Schema](#41-database-schema)
|
||||
- 4.1.1 [Telemetry Metrics Table](#411-telemetry-metrics-table)
|
||||
- 4.1.2 [Telemetry Identity Table](#412-telemetry-identity-table)
|
||||
- 4.2 [Metrics Collection Framework](#42-metrics-collection-framework)
|
||||
- 4.2.1 [Base Collector Interface](#421-base-collector-interface)
|
||||
- 4.2.2 [Collector Registry](#422-collector-registry)
|
||||
- 4.2.3 [Example Collector Implementation](#423-example-collector-implementation)
|
||||
- 4.3 [Collection and Upload System](#43-collection-and-upload-system)
|
||||
- 4.3.1 [Metrics Collection Processor](#431-metrics-collection-processor)
|
||||
- 4.3.2 [Replicated Upload Processor](#432-replicated-upload-processor)
|
||||
- 4.4 [License Warning System](#44-license-warning-system)
|
||||
- 4.4.1 [License Status Endpoint](#441-license-status-endpoint)
|
||||
- 4.4.2 [UI Integration](#442-ui-integration)
|
||||
- 4.5 [Cronjob Configuration](#45-cronjob-configuration)
|
||||
- 4.5.1 [Collection Cronjob](#451-collection-cronjob)
|
||||
- 4.5.2 [Upload Cronjob](#452-upload-cronjob)
|
||||
5. [Implementation Plan](#5-implementation-plan)
|
||||
- 5.1 [Database Schema and Models (M1)](#51-database-schema-and-models-m1)
|
||||
- 5.1.1 [OpenHands - Database Migration](#511-openhands---database-migration)
|
||||
- 5.1.2 [OpenHands - Model Tests](#512-openhands---model-tests)
|
||||
- 5.2 [Metrics Collection Framework (M2)](#52-metrics-collection-framework-m2)
|
||||
- 5.2.1 [OpenHands - Core Collection Framework](#521-openhands---core-collection-framework)
|
||||
- 5.2.2 [OpenHands - Example Collectors](#522-openhands---example-collectors)
|
||||
- 5.2.3 [OpenHands - Framework Tests](#523-openhands---framework-tests)
|
||||
- 5.3 [Collection and Upload Processors (M3)](#53-collection-and-upload-processors-m3)
|
||||
- 5.3.1 [OpenHands - Collection Processor](#531-openhands---collection-processor)
|
||||
- 5.3.2 [OpenHands - Upload Processor](#532-openhands---upload-processor)
|
||||
- 5.3.3 [OpenHands - Integration Tests](#533-openhands---integration-tests)
|
||||
- 5.4 [License Warning API (M4)](#54-license-warning-api-m4)
|
||||
- 5.4.1 [OpenHands - License Status API](#541-openhands---license-status-api)
|
||||
- 5.4.2 [OpenHands - API Integration](#542-openhands---api-integration)
|
||||
- 5.5 [UI Warning Banner (M5)](#55-ui-warning-banner-m5)
|
||||
- 5.5.1 [OpenHands - UI Warning Banner](#551-openhands---ui-warning-banner)
|
||||
- 5.5.2 [OpenHands - UI Integration](#552-openhands---ui-integration)
|
||||
- 5.6 [Helm Chart Deployment Configuration (M6)](#56-helm-chart-deployment-configuration-m6)
|
||||
- 5.6.1 [OpenHands-Cloud - Cronjob Manifests](#561-openhands-cloud---cronjob-manifests)
|
||||
- 5.6.2 [OpenHands-Cloud - Configuration Management](#562-openhands-cloud---configuration-management)
|
||||
- 5.7 [Documentation and Enhanced Collectors (M7)](#57-documentation-and-enhanced-collectors-m7)
|
||||
- 5.7.1 [OpenHands - Advanced Collectors](#571-openhands---advanced-collectors)
|
||||
- 5.7.2 [OpenHands - Monitoring and Testing](#572-openhands---monitoring-and-testing)
|
||||
- 5.7.3 [OpenHands - Technical Documentation](#573-openhands---technical-documentation)
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
### 1.1 Problem Statement
|
||||
|
||||
OpenHands Enterprise (OHE) helm charts are publicly available but not open source, creating a visibility gap for the sales team. Unknown users can install and use OHE without the vendor's knowledge, preventing proper customer engagement and sales pipeline management. Without usage telemetry, the vendor cannot identify potential customers, track installation health, or proactively support users who may need assistance.
|
||||
|
||||
### 1.2 Proposed Solution
|
||||
|
||||
We propose implementing a comprehensive telemetry service that leverages the Replicated metrics platform and Python SDK to track OHE installations and usage. The solution provides automatic customer discovery, instance monitoring, and usage metrics collection while maintaining a clear license compliance pathway.
|
||||
|
||||
The system consists of three main components: (1) a pluggable metrics collection framework that allows developers to easily define and register custom metrics collectors, (2) automated cronjobs that periodically collect metrics and upload them to Replicated's vendor portal, and (3) a license compliance warning system that displays UI notifications when telemetry uploads fail, indicating potential license expiration.
|
||||
|
||||
The design ensures that telemetry cannot be easily disabled without breaking core OHE functionality by tying the warning system to environment variables that are essential for OHE operation. This approach balances user transparency with business requirements for customer visibility.
|
||||
|
||||
## 2. User Interface
|
||||
|
||||
### 2.1 License Warning Banner
|
||||
|
||||
When telemetry uploads fail for more than 4 days, users will see a prominent warning banner in the OpenHands Enterprise UI:
|
||||
|
||||
```
|
||||
⚠️ Your OpenHands Enterprise license will expire in 30 days. Please contact support if this issue persists.
|
||||
```
|
||||
|
||||
The banner appears at the top of all pages and cannot be permanently dismissed while the condition persists. Users can temporarily dismiss it, but it will reappear on page refresh until telemetry uploads resume successfully.
|
||||
|
||||
### 2.2 Administrator Experience
|
||||
|
||||
System administrators will not need to configure the telemetry system manually. The service automatically:
|
||||
|
||||
1. **Detects OHE installations** using existing required environment variables (`GITHUB_APP_CLIENT_ID`, `KEYCLOAK_SERVER_URL`, etc.)
|
||||
|
||||
2. **Generates unique customer identifiers** using administrator contact information:
|
||||
- Customer email: Determined by the following priority order:
|
||||
1. `OPENHANDS_ADMIN_EMAIL` environment variable (if set in helm values)
|
||||
2. Email of the first user who accepted Terms of Service (earliest `accepted_tos` timestamp)
|
||||
- Instance ID: Automatically generated by Replicated SDK using machine fingerprinting (IOPlatformUUID on macOS, D-Bus machine ID on Linux, Machine GUID on Windows)
|
||||
- **No Fallback**: If neither email source is available, telemetry collection is skipped until at least one user exists
|
||||
|
||||
3. **Collects and uploads metrics transparently** in the background via weekly collection and daily upload cronjobs
|
||||
|
||||
4. **Displays warnings only when necessary** for license compliance - no notifications appear during normal operation
|
||||
|
||||
## 3. Other Context
|
||||
|
||||
### 3.1 Replicated Platform Integration
|
||||
|
||||
The Replicated platform provides vendor-hosted infrastructure for collecting customer and instance telemetry. The Python SDK handles authentication, state management, and reliable metric delivery. Key concepts:
|
||||
|
||||
- **Customer**: Represents a unique OHE installation, identified by email or installation fingerprint
|
||||
- **Instance**: Represents a specific deployment of OHE for a customer
|
||||
- **Metrics**: Custom key-value data points collected from the installation
|
||||
- **Status**: Instance health indicators (running, degraded, updating, etc.)
|
||||
|
||||
The SDK automatically handles machine fingerprinting, local state caching, and retry logic for failed uploads.
|
||||
|
||||
### 3.2 Administrator Email Detection Strategy
|
||||
|
||||
To identify the appropriate administrator contact for sales outreach, the system uses a three-tier approach that avoids performance penalties on user authentication:
|
||||
|
||||
**Tier 1: Explicit Configuration** - The `OPENHANDS_ADMIN_EMAIL` environment variable allows administrators to explicitly specify the contact email during deployment.
|
||||
|
||||
**Tier 2: First Active User Detection** - If no explicit email is configured, the system identifies the first user who accepted Terms of Service (earliest `accepted_tos` timestamp with a valid email). This represents the first person to actively engage with the system and is very likely the administrator or installer.
|
||||
|
||||
**No Fallback Needed** - If neither email source is available, telemetry collection is skipped entirely. This ensures we only report meaningful usage data when there are actual active users.
|
||||
|
||||
**Performance Optimization**: The admin email determination is performed only during telemetry upload attempts, ensuring zero performance impact on user login flows.
|
||||
|
||||
### 3.3 Metrics Collection Framework
|
||||
|
||||
The proposed collector framework allows developers to define metrics in a single file change:
|
||||
|
||||
```python
|
||||
@register_collector("user_activity")
|
||||
class UserActivityCollector(MetricsCollector):
|
||||
def collect(self) -> Dict[str, Any]:
|
||||
# Query database and return metrics
|
||||
return {"active_users_7d": count, "conversations_created": total}
|
||||
```
|
||||
|
||||
Collectors are automatically discovered and executed by the collection cronjob, making the system extensible without modifying core collection logic.
|
||||
|
||||
## 4. Technical Design
|
||||
|
||||
### 4.1 Database Schema
|
||||
|
||||
#### 4.1.1 Telemetry Metrics Table
|
||||
|
||||
Stores collected metrics with transmission status tracking:
|
||||
|
||||
```sql
|
||||
CREATE TABLE telemetry_metrics (
|
||||
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
||||
collected_at TIMESTAMP WITH TIME ZONE NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
metrics_data JSONB NOT NULL,
|
||||
uploaded_at TIMESTAMP WITH TIME ZONE NULL,
|
||||
upload_attempts INTEGER DEFAULT 0,
|
||||
last_upload_error TEXT NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX idx_telemetry_metrics_collected_at ON telemetry_metrics(collected_at);
|
||||
CREATE INDEX idx_telemetry_metrics_uploaded_at ON telemetry_metrics(uploaded_at);
|
||||
```
|
||||
|
||||
#### 4.1.2 Telemetry Identity Table
|
||||
|
||||
Stores persistent identity information that must survive container restarts:
|
||||
|
||||
```sql
|
||||
CREATE TABLE telemetry_identity (
|
||||
id INTEGER PRIMARY KEY DEFAULT 1,
|
||||
customer_id VARCHAR(255) NULL,
|
||||
instance_id VARCHAR(255) NULL,
|
||||
created_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at TIMESTAMP WITH TIME ZONE DEFAULT CURRENT_TIMESTAMP,
|
||||
CONSTRAINT single_identity_row CHECK (id = 1)
|
||||
);
|
||||
```
|
||||
|
||||
**Design Rationale:**
|
||||
- **Separation of Concerns**: Identity data (customer_id, instance_id) is separated from operational data
|
||||
- **Persistent vs Computed**: Only data that cannot be reliably recomputed is persisted
|
||||
- **Upload Tracking**: Upload timestamps are tied directly to the metrics they represent
|
||||
- **Simplified Queries**: System state can be derived from metrics table (e.g., `MAX(uploaded_at)` for last successful upload)
|
||||
|
||||
### 4.2 Metrics Collection Framework
|
||||
|
||||
#### 4.2.1 Base Collector Interface
|
||||
|
||||
```python
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Dict, Any, List
|
||||
from dataclasses import dataclass
|
||||
|
||||
@dataclass
|
||||
class MetricResult:
|
||||
key: str
|
||||
value: Any
|
||||
|
||||
class MetricsCollector(ABC):
|
||||
"""Base class for metrics collectors."""
|
||||
|
||||
@abstractmethod
|
||||
def collect(self) -> List[MetricResult]:
|
||||
"""Collect metrics and return results."""
|
||||
pass
|
||||
|
||||
@property
|
||||
@abstractmethod
|
||||
def collector_name(self) -> str:
|
||||
"""Unique name for this collector."""
|
||||
pass
|
||||
|
||||
def should_collect(self) -> bool:
|
||||
"""Override to add collection conditions."""
|
||||
return True
|
||||
```
|
||||
|
||||
#### 4.2.2 Collector Registry
|
||||
|
||||
```python
|
||||
from typing import Dict, Type, List
|
||||
import importlib
|
||||
import pkgutil
|
||||
|
||||
class CollectorRegistry:
|
||||
"""Registry for metrics collectors."""
|
||||
|
||||
def __init__(self):
|
||||
self._collectors: Dict[str, Type[MetricsCollector]] = {}
|
||||
|
||||
def register(self, collector_class: Type[MetricsCollector]) -> None:
|
||||
"""Register a collector class."""
|
||||
collector = collector_class()
|
||||
self._collectors[collector.collector_name] = collector_class
|
||||
|
||||
def get_all_collectors(self) -> List[MetricsCollector]:
|
||||
"""Get instances of all registered collectors."""
|
||||
return [cls() for cls in self._collectors.values()]
|
||||
|
||||
def discover_collectors(self, package_path: str) -> None:
|
||||
"""Auto-discover collectors in a package."""
|
||||
# Implementation to scan for @register_collector decorators
|
||||
pass
|
||||
|
||||
# Global registry instance
|
||||
collector_registry = CollectorRegistry()
|
||||
|
||||
def register_collector(name: str):
|
||||
"""Decorator to register a collector."""
|
||||
def decorator(cls: Type[MetricsCollector]) -> Type[MetricsCollector]:
|
||||
collector_registry.register(cls)
|
||||
return cls
|
||||
return decorator
|
||||
```
|
||||
|
||||
#### 4.2.3 Example Collector Implementation
|
||||
|
||||
```python
|
||||
@register_collector("system_metrics")
|
||||
class SystemMetricsCollector(MetricsCollector):
|
||||
"""Collects basic system and usage metrics."""
|
||||
|
||||
@property
|
||||
def collector_name(self) -> str:
|
||||
return "system_metrics"
|
||||
|
||||
def collect(self) -> List[MetricResult]:
|
||||
results = []
|
||||
|
||||
# Collect user count
|
||||
with session_maker() as session:
|
||||
user_count = session.query(UserSettings).count()
|
||||
results.append(MetricResult(
|
||||
key="total_users",
|
||||
value=user_count
|
||||
))
|
||||
|
||||
# Collect conversation count (last 30 days)
|
||||
thirty_days_ago = datetime.now(timezone.utc) - timedelta(days=30)
|
||||
conversation_count = session.query(StoredConversationMetadata)\
|
||||
.filter(StoredConversationMetadata.created_at >= thirty_days_ago)\
|
||||
.count()
|
||||
|
||||
results.append(MetricResult(
|
||||
key="conversations_30d",
|
||||
value=conversation_count
|
||||
))
|
||||
|
||||
return results
|
||||
```
|
||||
|
||||
### 4.3 Collection and Upload System
|
||||
|
||||
#### 4.3.1 Metrics Collection Processor
|
||||
|
||||
```python
|
||||
class TelemetryCollectionProcessor(MaintenanceTaskProcessor):
|
||||
"""Maintenance task processor for collecting metrics."""
|
||||
|
||||
collection_interval_days: int = 7
|
||||
|
||||
async def __call__(self, task: MaintenanceTask) -> dict:
|
||||
"""Collect metrics from all registered collectors."""
|
||||
|
||||
# Check if collection is needed
|
||||
if not self._should_collect():
|
||||
return {"status": "skipped", "reason": "too_recent"}
|
||||
|
||||
# Collect metrics from all registered collectors
|
||||
all_metrics = {}
|
||||
collector_results = {}
|
||||
|
||||
for collector in collector_registry.get_all_collectors():
|
||||
try:
|
||||
if collector.should_collect():
|
||||
results = collector.collect()
|
||||
for result in results:
|
||||
all_metrics[result.key] = result.value
|
||||
collector_results[collector.collector_name] = len(results)
|
||||
except Exception as e:
|
||||
logger.error(f"Collector {collector.collector_name} failed: {e}")
|
||||
collector_results[collector.collector_name] = f"error: {e}"
|
||||
|
||||
# Store metrics in database
|
||||
with session_maker() as session:
|
||||
telemetry_record = TelemetryMetrics(
|
||||
metrics_data=all_metrics,
|
||||
collected_at=datetime.now(timezone.utc)
|
||||
)
|
||||
session.add(telemetry_record)
|
||||
session.commit()
|
||||
|
||||
# Note: No need to track last_collection_at separately
|
||||
# Can be derived from MAX(collected_at) in telemetry_metrics
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"metrics_collected": len(all_metrics),
|
||||
"collectors_run": collector_results
|
||||
}
|
||||
|
||||
def _should_collect(self) -> bool:
|
||||
"""Check if collection is needed based on interval."""
|
||||
with session_maker() as session:
|
||||
# Get last collection time from metrics table
|
||||
last_collected = session.query(func.max(TelemetryMetrics.collected_at)).scalar()
|
||||
if not last_collected:
|
||||
return True
|
||||
|
||||
time_since_last = datetime.now(timezone.utc) - last_collected
|
||||
return time_since_last.days >= self.collection_interval_days
|
||||
```
|
||||
|
||||
#### 4.3.2 Replicated Upload Processor
|
||||
|
||||
```python
|
||||
from replicated import AsyncReplicatedClient, InstanceStatus
|
||||
|
||||
class TelemetryUploadProcessor(MaintenanceTaskProcessor):
|
||||
"""Maintenance task processor for uploading metrics to Replicated."""
|
||||
|
||||
replicated_publishable_key: str
|
||||
replicated_app_slug: str
|
||||
|
||||
async def __call__(self, task: MaintenanceTask) -> dict:
|
||||
"""Upload pending metrics to Replicated."""
|
||||
|
||||
# Get pending metrics
|
||||
with session_maker() as session:
|
||||
pending_metrics = session.query(TelemetryMetrics)\
|
||||
.filter(TelemetryMetrics.uploaded_at.is_(None))\
|
||||
.order_by(TelemetryMetrics.collected_at)\
|
||||
.all()
|
||||
|
||||
if not pending_metrics:
|
||||
return {"status": "no_pending_metrics"}
|
||||
|
||||
# Get admin email - skip if not available
|
||||
admin_email = self._get_admin_email()
|
||||
if not admin_email:
|
||||
logger.info("Skipping telemetry upload - no admin email available")
|
||||
return {
|
||||
"status": "skipped",
|
||||
"reason": "no_admin_email",
|
||||
"total_processed": 0
|
||||
}
|
||||
|
||||
uploaded_count = 0
|
||||
failed_count = 0
|
||||
|
||||
async with AsyncReplicatedClient(
|
||||
publishable_key=self.replicated_publishable_key,
|
||||
app_slug=self.replicated_app_slug
|
||||
) as client:
|
||||
|
||||
# Get or create customer and instance
|
||||
customer = await client.customer.get_or_create(
|
||||
email_address=admin_email
|
||||
)
|
||||
instance = await customer.get_or_create_instance()
|
||||
|
||||
# Store customer/instance IDs for future use
|
||||
await self._update_telemetry_identity(customer.customer_id, instance.instance_id)
|
||||
|
||||
# Upload each metric batch
|
||||
for metric_record in pending_metrics:
|
||||
try:
|
||||
# Send individual metrics
|
||||
for key, value in metric_record.metrics_data.items():
|
||||
await instance.send_metric(key, value)
|
||||
|
||||
# Update instance status
|
||||
await instance.set_status(InstanceStatus.RUNNING)
|
||||
|
||||
# Mark as uploaded
|
||||
with session_maker() as session:
|
||||
record = session.query(TelemetryMetrics)\
|
||||
.filter(TelemetryMetrics.id == metric_record.id)\
|
||||
.first()
|
||||
if record:
|
||||
record.uploaded_at = datetime.now(timezone.utc)
|
||||
session.commit()
|
||||
|
||||
uploaded_count += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"Failed to upload metrics {metric_record.id}: {e}")
|
||||
|
||||
# Update error info
|
||||
with session_maker() as session:
|
||||
record = session.query(TelemetryMetrics)\
|
||||
.filter(TelemetryMetrics.id == metric_record.id)\
|
||||
.first()
|
||||
if record:
|
||||
record.upload_attempts += 1
|
||||
record.last_upload_error = str(e)
|
||||
session.commit()
|
||||
|
||||
failed_count += 1
|
||||
|
||||
# Note: No need to track last_successful_upload_at separately
|
||||
# Can be derived from MAX(uploaded_at) in telemetry_metrics
|
||||
|
||||
return {
|
||||
"status": "completed",
|
||||
"uploaded": uploaded_count,
|
||||
"failed": failed_count,
|
||||
"total_processed": len(pending_metrics)
|
||||
}
|
||||
|
||||
def _get_admin_email(self) -> str | None:
|
||||
"""Get administrator email for customer identification."""
|
||||
# 1. Check environment variable first
|
||||
env_admin_email = os.getenv('OPENHANDS_ADMIN_EMAIL')
|
||||
if env_admin_email:
|
||||
logger.info("Using admin email from environment variable")
|
||||
return env_admin_email
|
||||
|
||||
# 2. Use first active user's email (earliest accepted_tos)
|
||||
with session_maker() as session:
|
||||
first_user = session.query(UserSettings)\
|
||||
.filter(UserSettings.email.isnot(None))\
|
||||
.filter(UserSettings.accepted_tos.isnot(None))\
|
||||
.order_by(UserSettings.accepted_tos.asc())\
|
||||
.first()
|
||||
|
||||
if first_user and first_user.email:
|
||||
logger.info(f"Using first active user email: {first_user.email}")
|
||||
return first_user.email
|
||||
|
||||
# No admin email available - skip telemetry
|
||||
logger.info("No admin email available - skipping telemetry collection")
|
||||
return None
|
||||
|
||||
async def _update_telemetry_identity(self, customer_id: str, instance_id: str) -> None:
|
||||
"""Update or create telemetry identity record."""
|
||||
with session_maker() as session:
|
||||
identity = session.query(TelemetryIdentity).first()
|
||||
if not identity:
|
||||
identity = TelemetryIdentity()
|
||||
session.add(identity)
|
||||
|
||||
identity.customer_id = customer_id
|
||||
identity.instance_id = instance_id
|
||||
session.commit()
|
||||
```
|
||||
|
||||
### 4.4 License Warning System
|
||||
|
||||
#### 4.4.1 License Status Endpoint
|
||||
|
||||
```python
|
||||
from fastapi import APIRouter
|
||||
from datetime import datetime, timezone, timedelta
|
||||
|
||||
license_router = APIRouter()
|
||||
|
||||
@license_router.get("/license-status")
|
||||
async def get_license_status():
|
||||
"""Get license warning status for UI display."""
|
||||
|
||||
# Only show warnings for OHE installations
|
||||
if not _is_openhands_enterprise():
|
||||
return {"warn": False, "message": ""}
|
||||
|
||||
with session_maker() as session:
|
||||
# Get last successful upload time from metrics table
|
||||
last_upload = session.query(func.max(TelemetryMetrics.uploaded_at))\
|
||||
.filter(TelemetryMetrics.uploaded_at.isnot(None))\
|
||||
.scalar()
|
||||
|
||||
if not last_upload:
|
||||
# No successful uploads yet - show warning after 4 days
|
||||
return {
|
||||
"warn": True,
|
||||
"message": "OpenHands Enterprise license verification pending. Please ensure network connectivity."
|
||||
}
|
||||
|
||||
# Check if last successful upload was more than 4 days ago
|
||||
days_since_upload = (datetime.now(timezone.utc) - last_upload).days
|
||||
|
||||
if days_since_upload > 4:
|
||||
# Find oldest unsent batch
|
||||
oldest_unsent = session.query(TelemetryMetrics)\
|
||||
.filter(TelemetryMetrics.uploaded_at.is_(None))\
|
||||
.order_by(TelemetryMetrics.collected_at)\
|
||||
.first()
|
||||
|
||||
if oldest_unsent:
|
||||
# Calculate expiration date (oldest unsent + 34 days)
|
||||
expiration_date = oldest_unsent.collected_at + timedelta(days=34)
|
||||
days_until_expiration = (expiration_date - datetime.now(timezone.utc)).days
|
||||
|
||||
if days_until_expiration <= 0:
|
||||
message = "Your OpenHands Enterprise license has expired. Please contact support immediately."
|
||||
else:
|
||||
message = f"Your OpenHands Enterprise license will expire in {days_until_expiration} days. Please contact support if this issue persists."
|
||||
|
||||
return {"warn": True, "message": message}
|
||||
|
||||
return {"warn": False, "message": ""}
|
||||
|
||||
def _is_openhands_enterprise() -> bool:
|
||||
"""Detect if this is an OHE installation."""
|
||||
# Check for required OHE environment variables
|
||||
required_vars = [
|
||||
'GITHUB_APP_CLIENT_ID',
|
||||
'KEYCLOAK_SERVER_URL',
|
||||
'KEYCLOAK_REALM_NAME'
|
||||
]
|
||||
|
||||
return all(os.getenv(var) for var in required_vars)
|
||||
```
|
||||
|
||||
#### 4.4.2 UI Integration
|
||||
|
||||
The frontend will poll the license status endpoint and display warnings using the existing banner component pattern:
|
||||
|
||||
```typescript
|
||||
// New component: LicenseWarningBanner.tsx
|
||||
interface LicenseStatus {
|
||||
warn: boolean;
|
||||
message: string;
|
||||
}
|
||||
|
||||
export function LicenseWarningBanner() {
|
||||
const [licenseStatus, setLicenseStatus] = useState<LicenseStatus>({ warn: false, message: "" });
|
||||
|
||||
useEffect(() => {
|
||||
const checkLicenseStatus = async () => {
|
||||
try {
|
||||
const response = await fetch('/api/license-status');
|
||||
const status = await response.json();
|
||||
setLicenseStatus(status);
|
||||
} catch (error) {
|
||||
console.error('Failed to check license status:', error);
|
||||
}
|
||||
};
|
||||
|
||||
// Check immediately and then every hour
|
||||
checkLicenseStatus();
|
||||
const interval = setInterval(checkLicenseStatus, 60 * 60 * 1000);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, []);
|
||||
|
||||
if (!licenseStatus.warn) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return (
|
||||
<div className="bg-red-600 text-white p-4 rounded flex items-center justify-between">
|
||||
<div className="flex items-center">
|
||||
<FaExclamationTriangle className="mr-3" />
|
||||
<span>{licenseStatus.message}</span>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
```
|
||||
|
||||
### 4.5 Cronjob Configuration
|
||||
|
||||
The cronjob configurations will be deployed via the OpenHands-Cloud helm charts.
|
||||
|
||||
#### 4.5.1 Collection Cronjob
|
||||
|
||||
The collection cronjob runs weekly to gather metrics:
|
||||
|
||||
```yaml
|
||||
# charts/openhands/templates/telemetry-collection-cronjob.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ include "openhands.fullname" . }}-telemetry-collection
|
||||
labels:
|
||||
{{- include "openhands.labels" . | nindent 4 }}
|
||||
spec:
|
||||
schedule: "0 2 * * 0" # Weekly on Sunday at 2 AM
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: telemetry-collector
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
env:
|
||||
{{- include "openhands.env" . | nindent 12 }}
|
||||
command:
|
||||
- python
|
||||
- -c
|
||||
- |
|
||||
from enterprise.storage.maintenance_task import MaintenanceTask, MaintenanceTaskStatus
|
||||
from enterprise.storage.database import session_maker
|
||||
from enterprise.server.telemetry.collection_processor import TelemetryCollectionProcessor
|
||||
|
||||
# Create collection task
|
||||
processor = TelemetryCollectionProcessor()
|
||||
task = MaintenanceTask()
|
||||
task.set_processor(processor)
|
||||
task.status = MaintenanceTaskStatus.PENDING
|
||||
|
||||
with session_maker() as session:
|
||||
session.add(task)
|
||||
session.commit()
|
||||
restartPolicy: OnFailure
|
||||
```
|
||||
|
||||
#### 4.5.2 Upload Cronjob
|
||||
|
||||
The upload cronjob runs daily to send metrics to Replicated:
|
||||
|
||||
```yaml
|
||||
# charts/openhands/templates/telemetry-upload-cronjob.yaml
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ include "openhands.fullname" . }}-telemetry-upload
|
||||
labels:
|
||||
{{- include "openhands.labels" . | nindent 4 }}
|
||||
spec:
|
||||
schedule: "0 3 * * *" # Daily at 3 AM
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: telemetry-uploader
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
env:
|
||||
{{- include "openhands.env" . | nindent 12 }}
|
||||
- name: REPLICATED_PUBLISHABLE_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
name: {{ include "openhands.fullname" . }}-replicated-config
|
||||
key: publishable-key
|
||||
- name: REPLICATED_APP_SLUG
|
||||
value: {{ .Values.telemetry.replicatedAppSlug | default "openhands-enterprise" | quote }}
|
||||
command:
|
||||
- python
|
||||
- -c
|
||||
- |
|
||||
from enterprise.storage.maintenance_task import MaintenanceTask, MaintenanceTaskStatus
|
||||
from enterprise.storage.database import session_maker
|
||||
from enterprise.server.telemetry.upload_processor import TelemetryUploadProcessor
|
||||
import os
|
||||
|
||||
# Create upload task
|
||||
processor = TelemetryUploadProcessor(
|
||||
replicated_publishable_key=os.getenv('REPLICATED_PUBLISHABLE_KEY'),
|
||||
replicated_app_slug=os.getenv('REPLICATED_APP_SLUG', 'openhands-enterprise')
|
||||
)
|
||||
task = MaintenanceTask()
|
||||
task.set_processor(processor)
|
||||
task.status = MaintenanceTaskStatus.PENDING
|
||||
|
||||
with session_maker() as session:
|
||||
session.add(task)
|
||||
session.commit()
|
||||
restartPolicy: OnFailure
|
||||
```
|
||||
|
||||
## 5. Implementation Plan
|
||||
|
||||
All implementation must pass existing lints and tests. New functionality requires comprehensive unit tests with >90% coverage. Integration tests should verify end-to-end telemetry flow including collection, storage, upload, and warning display.
|
||||
|
||||
### 5.1 Database Schema and Models (M1)
|
||||
|
||||
**Repository**: OpenHands
|
||||
Establish the foundational database schema and SQLAlchemy models for telemetry data storage.
|
||||
|
||||
#### 5.1.1 OpenHands - Database Migration
|
||||
|
||||
- [ ] `enterprise/migrations/versions/077_create_telemetry_tables.py`
|
||||
- [ ] `enterprise/storage/telemetry_metrics.py`
|
||||
- [ ] `enterprise/storage/telemetry_config.py`
|
||||
|
||||
#### 5.1.2 OpenHands - Model Tests
|
||||
|
||||
- [ ] `enterprise/tests/unit/storage/test_telemetry_metrics.py`
|
||||
- [ ] `enterprise/tests/unit/storage/test_telemetry_config.py`
|
||||
|
||||
**Demo**: Database tables created and models can store/retrieve telemetry data.
|
||||
|
||||
### 5.2 Metrics Collection Framework (M2)
|
||||
|
||||
**Repository**: OpenHands
|
||||
Implement the pluggable metrics collection system with registry and base classes.
|
||||
|
||||
#### 5.2.1 OpenHands - Core Collection Framework
|
||||
|
||||
- [ ] `enterprise/server/telemetry/__init__.py`
|
||||
- [ ] `enterprise/server/telemetry/collector_base.py`
|
||||
- [ ] `enterprise/server/telemetry/collector_registry.py`
|
||||
- [ ] `enterprise/server/telemetry/decorators.py`
|
||||
|
||||
#### 5.2.2 OpenHands - Example Collectors
|
||||
|
||||
- [ ] `enterprise/server/telemetry/collectors/__init__.py`
|
||||
- [ ] `enterprise/server/telemetry/collectors/system_metrics.py`
|
||||
- [ ] `enterprise/server/telemetry/collectors/user_activity.py`
|
||||
|
||||
#### 5.2.3 OpenHands - Framework Tests
|
||||
|
||||
- [ ] `enterprise/tests/unit/telemetry/test_collector_base.py`
|
||||
- [ ] `enterprise/tests/unit/telemetry/test_collector_registry.py`
|
||||
- [ ] `enterprise/tests/unit/telemetry/test_system_metrics.py`
|
||||
|
||||
**Demo**: Developers can create new collectors with a single file change using the @register_collector decorator.
|
||||
|
||||
### 5.3 Collection and Upload Processors (M3)
|
||||
|
||||
**Repository**: OpenHands
|
||||
Implement maintenance task processors for collecting metrics and uploading to Replicated.
|
||||
|
||||
#### 5.3.1 OpenHands - Collection Processor
|
||||
|
||||
- [ ] `enterprise/server/telemetry/collection_processor.py`
|
||||
- [ ] `enterprise/tests/unit/telemetry/test_collection_processor.py`
|
||||
|
||||
#### 5.3.2 OpenHands - Upload Processor
|
||||
|
||||
- [ ] `enterprise/server/telemetry/upload_processor.py`
|
||||
- [ ] `enterprise/tests/unit/telemetry/test_upload_processor.py`
|
||||
|
||||
#### 5.3.3 OpenHands - Integration Tests
|
||||
|
||||
- [ ] `enterprise/tests/integration/test_telemetry_flow.py`
|
||||
|
||||
**Demo**: Metrics are automatically collected weekly and uploaded daily to Replicated vendor portal.
|
||||
|
||||
### 5.4 License Warning API (M4)
|
||||
|
||||
**Repository**: OpenHands
|
||||
Implement the license status endpoint for the warning system.
|
||||
|
||||
#### 5.4.1 OpenHands - License Status API
|
||||
|
||||
- [ ] `enterprise/server/routes/license.py`
|
||||
- [ ] `enterprise/tests/unit/routes/test_license.py`
|
||||
|
||||
#### 5.4.2 OpenHands - API Integration
|
||||
|
||||
- [ ] Update `enterprise/saas_server.py` to include license router
|
||||
|
||||
**Demo**: License status API returns warning status based on telemetry upload success.
|
||||
|
||||
### 5.5 UI Warning Banner (M5)
|
||||
|
||||
**Repository**: OpenHands
|
||||
Implement the frontend warning banner component and integration.
|
||||
|
||||
#### 5.5.1 OpenHands - UI Warning Banner
|
||||
|
||||
- [ ] `frontend/src/components/features/license/license-warning-banner.tsx`
|
||||
- [ ] `frontend/src/components/features/license/license-warning-banner.test.tsx`
|
||||
|
||||
#### 5.5.2 OpenHands - UI Integration
|
||||
|
||||
- [ ] Update main UI layout to include license warning banner
|
||||
- [ ] Add license status polling service
|
||||
|
||||
**Demo**: License warnings appear in UI when telemetry uploads fail for >4 days, with accurate expiration countdown.
|
||||
|
||||
### 5.6 Helm Chart Deployment Configuration (M6)
|
||||
|
||||
**Repository**: OpenHands-Cloud
|
||||
Create Kubernetes cronjob configurations and deployment scripts.
|
||||
|
||||
#### 5.6.1 OpenHands-Cloud - Cronjob Manifests
|
||||
|
||||
- [ ] `charts/openhands/templates/telemetry-collection-cronjob.yaml`
|
||||
- [ ] `charts/openhands/templates/telemetry-upload-cronjob.yaml`
|
||||
|
||||
#### 5.6.2 OpenHands-Cloud - Configuration Management
|
||||
|
||||
- [ ] `charts/openhands/templates/replicated-secret.yaml`
|
||||
- [ ] Update `charts/openhands/values.yaml` with telemetry configuration options:
|
||||
```yaml
|
||||
# Add to values.yaml
|
||||
telemetry:
|
||||
enabled: true
|
||||
replicatedAppSlug: "openhands-enterprise"
|
||||
adminEmail: "" # Optional: admin email for customer identification
|
||||
|
||||
# Add to deployment environment variables
|
||||
env:
|
||||
OPENHANDS_ADMIN_EMAIL: "{{ .Values.telemetry.adminEmail }}"
|
||||
```
|
||||
|
||||
**Demo**: Complete telemetry system deployed via helm chart with configurable collection intervals and Replicated integration.
|
||||
|
||||
### 5.7 Documentation and Enhanced Collectors (M7)
|
||||
|
||||
**Repository**: OpenHands
|
||||
Add comprehensive metrics collectors, monitoring capabilities, and documentation.
|
||||
|
||||
#### 5.7.1 OpenHands - Advanced Collectors
|
||||
|
||||
- [ ] `enterprise/server/telemetry/collectors/conversation_metrics.py`
|
||||
- [ ] `enterprise/server/telemetry/collectors/integration_usage.py`
|
||||
- [ ] `enterprise/server/telemetry/collectors/performance_metrics.py`
|
||||
|
||||
#### 5.7.2 OpenHands - Monitoring and Testing
|
||||
|
||||
- [ ] `enterprise/server/telemetry/monitoring.py`
|
||||
- [ ] `enterprise/tests/e2e/test_telemetry_system.py`
|
||||
- [ ] Performance tests for large-scale metric collection
|
||||
|
||||
#### 5.7.3 OpenHands - Technical Documentation
|
||||
|
||||
- [ ] `enterprise/server/telemetry/README.md`
|
||||
- [ ] Update deployment documentation with telemetry configuration instructions
|
||||
- [ ] Add troubleshooting guide for telemetry issues
|
||||
|
||||
**Demo**: Rich telemetry data flowing to vendor portal with comprehensive monitoring, alerting for system health, and complete documentation.
|
||||
@ -31,7 +31,7 @@ from server.utils.conversation_callback_utils import register_callback_processor
|
||||
from openhands.core.logger import openhands_logger as logger
|
||||
from openhands.integrations.provider import ProviderToken, ProviderType
|
||||
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
|
||||
from openhands.storage.data_models.user_secrets import UserSecrets
|
||||
from openhands.storage.data_models.secrets import Secrets
|
||||
from openhands.utils.async_utils import call_sync_from_async
|
||||
|
||||
|
||||
@ -250,7 +250,7 @@ class GithubManager(Manager):
|
||||
f'[GitHub] Creating new conversation for user {user_info.username}'
|
||||
)
|
||||
|
||||
secret_store = UserSecrets(
|
||||
secret_store = Secrets(
|
||||
provider_tokens=MappingProxyType(
|
||||
{
|
||||
ProviderType.GITHUB: ProviderToken(
|
||||
|
||||
@ -25,7 +25,7 @@ from openhands.core.logger import openhands_logger as logger
|
||||
from openhands.integrations.gitlab.gitlab_service import GitLabServiceImpl
|
||||
from openhands.integrations.provider import ProviderToken, ProviderType
|
||||
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
|
||||
from openhands.storage.data_models.user_secrets import UserSecrets
|
||||
from openhands.storage.data_models.secrets import Secrets
|
||||
|
||||
|
||||
class GitlabManager(Manager):
|
||||
@ -198,7 +198,7 @@ class GitlabManager(Manager):
|
||||
f'[GitLab] Creating new conversation for user {user_info.username}'
|
||||
)
|
||||
|
||||
secret_store = UserSecrets(
|
||||
secret_store = Secrets(
|
||||
provider_tokens=MappingProxyType(
|
||||
{
|
||||
ProviderType.GITLAB: ProviderToken(
|
||||
|
||||
@ -32,6 +32,7 @@ from openhands.integrations.service_types import Repository
|
||||
from openhands.server.shared import server_config
|
||||
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
|
||||
from openhands.server.user_auth.user_auth import UserAuth
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
|
||||
JIRA_CLOUD_API_URL = 'https://api.atlassian.com/ex/jira'
|
||||
|
||||
@ -408,7 +409,7 @@ class JiraManager(Manager):
|
||||
svc_acc_api_key: str,
|
||||
) -> Tuple[str, str]:
|
||||
url = f'{JIRA_CLOUD_API_URL}/{jira_cloud_id}/rest/api/2/issue/{job_context.issue_key}'
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.get(url, auth=(svc_acc_email, svc_acc_api_key))
|
||||
response.raise_for_status()
|
||||
issue_payload = response.json()
|
||||
@ -443,7 +444,7 @@ class JiraManager(Manager):
|
||||
f'{JIRA_CLOUD_API_URL}/{jira_cloud_id}/rest/api/2/issue/{issue_key}/comment'
|
||||
)
|
||||
data = {'body': message.message}
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.post(
|
||||
url, auth=(svc_acc_email, svc_acc_api_key), json=data
|
||||
)
|
||||
|
||||
@ -57,7 +57,7 @@ class JiraNewConversationView(JiraViewInterface):
|
||||
raise StartingConvoException('No repository selected for this conversation')
|
||||
|
||||
provider_tokens = await self.saas_user_auth.get_provider_tokens()
|
||||
user_secrets = await self.saas_user_auth.get_user_secrets()
|
||||
user_secrets = await self.saas_user_auth.get_secrets()
|
||||
instructions, user_msg = self._get_instructions(jinja_env)
|
||||
|
||||
try:
|
||||
|
||||
@ -34,6 +34,7 @@ from openhands.integrations.service_types import Repository
|
||||
from openhands.server.shared import server_config
|
||||
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
|
||||
from openhands.server.user_auth.user_auth import UserAuth
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
|
||||
|
||||
class JiraDcManager(Manager):
|
||||
@ -422,7 +423,7 @@ class JiraDcManager(Manager):
|
||||
"""Get issue details from Jira DC API."""
|
||||
url = f'{job_context.base_api_url}/rest/api/2/issue/{job_context.issue_key}'
|
||||
headers = {'Authorization': f'Bearer {svc_acc_api_key}'}
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.get(url, headers=headers)
|
||||
response.raise_for_status()
|
||||
issue_payload = response.json()
|
||||
@ -452,7 +453,7 @@ class JiraDcManager(Manager):
|
||||
url = f'{base_api_url}/rest/api/2/issue/{issue_key}/comment'
|
||||
headers = {'Authorization': f'Bearer {svc_acc_api_key}'}
|
||||
data = {'body': message.message}
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.post(url, headers=headers, json=data)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
|
||||
@ -60,7 +60,7 @@ class JiraDcNewConversationView(JiraDcViewInterface):
|
||||
raise StartingConvoException('No repository selected for this conversation')
|
||||
|
||||
provider_tokens = await self.saas_user_auth.get_provider_tokens()
|
||||
user_secrets = await self.saas_user_auth.get_user_secrets()
|
||||
user_secrets = await self.saas_user_auth.get_secrets()
|
||||
instructions, user_msg = self._get_instructions(jinja_env)
|
||||
|
||||
try:
|
||||
|
||||
@ -31,6 +31,7 @@ from openhands.integrations.service_types import Repository
|
||||
from openhands.server.shared import server_config
|
||||
from openhands.server.types import LLMAuthenticationError, MissingSettingsError
|
||||
from openhands.server.user_auth.user_auth import UserAuth
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
|
||||
|
||||
class LinearManager(Manager):
|
||||
@ -408,7 +409,7 @@ class LinearManager(Manager):
|
||||
async def _query_api(self, query: str, variables: Dict, api_key: str) -> Dict:
|
||||
"""Query Linear GraphQL API."""
|
||||
headers = {'Authorization': api_key}
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.post(
|
||||
self.api_url,
|
||||
headers=headers,
|
||||
|
||||
@ -57,7 +57,7 @@ class LinearNewConversationView(LinearViewInterface):
|
||||
raise StartingConvoException('No repository selected for this conversation')
|
||||
|
||||
provider_tokens = await self.saas_user_auth.get_provider_tokens()
|
||||
user_secrets = await self.saas_user_auth.get_user_secrets()
|
||||
user_secrets = await self.saas_user_auth.get_secrets()
|
||||
instructions, user_msg = self._get_instructions(jinja_env)
|
||||
|
||||
try:
|
||||
|
||||
@ -186,7 +186,7 @@ class SlackNewConversationView(SlackViewInterface):
|
||||
self._verify_necessary_values_are_set()
|
||||
|
||||
provider_tokens = await self.saas_user_auth.get_provider_tokens()
|
||||
user_secrets = await self.saas_user_auth.get_user_secrets()
|
||||
user_secrets = await self.saas_user_auth.get_secrets()
|
||||
user_instructions, conversation_instructions = self._get_instructions(jinja)
|
||||
|
||||
# Determine git provider from repository
|
||||
|
||||
129
enterprise/migrations/versions/078_create_telemetry_tables.py
Normal file
129
enterprise/migrations/versions/078_create_telemetry_tables.py
Normal file
@ -0,0 +1,129 @@
|
||||
"""create telemetry tables
|
||||
|
||||
Revision ID: 078
|
||||
Revises: 077
|
||||
Create Date: 2025-10-21
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
import sqlalchemy as sa
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '078'
|
||||
down_revision: Union[str, None] = '077'
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
"""Create telemetry tables for metrics collection and configuration."""
|
||||
# Create telemetry_metrics table
|
||||
op.create_table(
|
||||
'telemetry_metrics',
|
||||
sa.Column(
|
||||
'id',
|
||||
sa.String(), # UUID as string
|
||||
nullable=False,
|
||||
primary_key=True,
|
||||
),
|
||||
sa.Column(
|
||||
'collected_at',
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text('CURRENT_TIMESTAMP'),
|
||||
),
|
||||
sa.Column(
|
||||
'metrics_data',
|
||||
sa.JSON(),
|
||||
nullable=False,
|
||||
),
|
||||
sa.Column(
|
||||
'uploaded_at',
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=True,
|
||||
),
|
||||
sa.Column(
|
||||
'upload_attempts',
|
||||
sa.Integer(),
|
||||
nullable=False,
|
||||
server_default='0',
|
||||
),
|
||||
sa.Column(
|
||||
'last_upload_error',
|
||||
sa.Text(),
|
||||
nullable=True,
|
||||
),
|
||||
sa.Column(
|
||||
'created_at',
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text('CURRENT_TIMESTAMP'),
|
||||
),
|
||||
sa.Column(
|
||||
'updated_at',
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text('CURRENT_TIMESTAMP'),
|
||||
),
|
||||
)
|
||||
|
||||
# Create indexes for telemetry_metrics
|
||||
op.create_index(
|
||||
'ix_telemetry_metrics_collected_at', 'telemetry_metrics', ['collected_at']
|
||||
)
|
||||
op.create_index(
|
||||
'ix_telemetry_metrics_uploaded_at', 'telemetry_metrics', ['uploaded_at']
|
||||
)
|
||||
|
||||
# Create telemetry_replicated_identity table (minimal persistent identity data)
|
||||
op.create_table(
|
||||
'telemetry_replicated_identity',
|
||||
sa.Column(
|
||||
'id',
|
||||
sa.Integer(),
|
||||
nullable=False,
|
||||
primary_key=True,
|
||||
server_default='1',
|
||||
),
|
||||
sa.Column(
|
||||
'customer_id',
|
||||
sa.String(255),
|
||||
nullable=True,
|
||||
),
|
||||
sa.Column(
|
||||
'instance_id',
|
||||
sa.String(255),
|
||||
nullable=True,
|
||||
),
|
||||
sa.Column(
|
||||
'created_at',
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text('CURRENT_TIMESTAMP'),
|
||||
),
|
||||
sa.Column(
|
||||
'updated_at',
|
||||
sa.DateTime(timezone=True),
|
||||
nullable=False,
|
||||
server_default=sa.text('CURRENT_TIMESTAMP'),
|
||||
),
|
||||
)
|
||||
|
||||
# Add constraint to ensure single row in telemetry_replicated_identity
|
||||
op.create_check_constraint(
|
||||
'single_identity_row', 'telemetry_replicated_identity', 'id = 1'
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
"""Drop telemetry tables."""
|
||||
# Drop indexes first
|
||||
op.drop_index('ix_telemetry_metrics_uploaded_at', 'telemetry_metrics')
|
||||
op.drop_index('ix_telemetry_metrics_collected_at', 'telemetry_metrics')
|
||||
|
||||
# Drop tables
|
||||
op.drop_table('telemetry_replicated_identity')
|
||||
op.drop_table('telemetry_metrics')
|
||||
@ -0,0 +1,39 @@
|
||||
"""rename user_secrets table to custom_secrets
|
||||
|
||||
Revision ID: 079
|
||||
Revises: 078
|
||||
Create Date: 2025-10-27 00:00:00.000000
|
||||
|
||||
"""
|
||||
|
||||
from typing import Sequence, Union
|
||||
|
||||
from alembic import op
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision: str = '079'
|
||||
down_revision: Union[str, None] = '078'
|
||||
branch_labels: Union[str, Sequence[str], None] = None
|
||||
depends_on: Union[str, Sequence[str], None] = None
|
||||
|
||||
|
||||
def upgrade() -> None:
|
||||
# Rename the table from user_secrets to custom_secrets
|
||||
op.rename_table('user_secrets', 'custom_secrets')
|
||||
|
||||
# Rename the index to match the new table name
|
||||
op.drop_index('idx_user_secrets_keycloak_user_id', 'custom_secrets')
|
||||
op.create_index(
|
||||
'idx_custom_secrets_keycloak_user_id', 'custom_secrets', ['keycloak_user_id']
|
||||
)
|
||||
|
||||
|
||||
def downgrade() -> None:
|
||||
# Rename the index back to the original name
|
||||
op.drop_index('idx_custom_secrets_keycloak_user_id', 'custom_secrets')
|
||||
op.create_index(
|
||||
'idx_user_secrets_keycloak_user_id', 'custom_secrets', ['keycloak_user_id']
|
||||
)
|
||||
|
||||
# Rename the table back from custom_secrets to user_secrets
|
||||
op.rename_table('custom_secrets', 'user_secrets')
|
||||
32
enterprise/poetry.lock
generated
32
enterprise/poetry.lock
generated
@ -5737,7 +5737,7 @@ llama = ["llama-index (>=0.12.29,<0.13.0)", "llama-index-core (>=0.12.29,<0.13.0
|
||||
|
||||
[[package]]
|
||||
name = "openhands-agent-server"
|
||||
version = "1.0.0a3"
|
||||
version = "1.0.0a4"
|
||||
description = "OpenHands Agent Server - REST/WebSocket interface for OpenHands AI Agent"
|
||||
optional = false
|
||||
python-versions = ">=3.12"
|
||||
@ -5758,14 +5758,14 @@ wsproto = ">=1.2.0"
|
||||
|
||||
[package.source]
|
||||
type = "git"
|
||||
url = "https://github.com/All-Hands-AI/agent-sdk.git"
|
||||
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
|
||||
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
|
||||
url = "https://github.com/OpenHands/agent-sdk.git"
|
||||
reference = "3d8af53b2f0259dc98555a4acd4238f90e0afbce"
|
||||
resolved_reference = "3d8af53b2f0259dc98555a4acd4238f90e0afbce"
|
||||
subdirectory = "openhands-agent-server"
|
||||
|
||||
[[package]]
|
||||
name = "openhands-ai"
|
||||
version = "0.59.0"
|
||||
version = "0.0.0-post.5456+15c207c40"
|
||||
description = "OpenHands: Code Less, Make More"
|
||||
optional = false
|
||||
python-versions = "^3.12,<3.14"
|
||||
@ -5805,9 +5805,9 @@ memory-profiler = "^0.61.0"
|
||||
numpy = "*"
|
||||
openai = "1.99.9"
|
||||
openhands-aci = "0.3.2"
|
||||
openhands-agent-server = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-agent-server"}
|
||||
openhands-sdk = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-sdk"}
|
||||
openhands-tools = {git = "https://github.com/All-Hands-AI/agent-sdk.git", rev = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e", subdirectory = "openhands-tools"}
|
||||
openhands-agent-server = {git = "https://github.com/OpenHands/agent-sdk.git", rev = "3d8af53b2f0259dc98555a4acd4238f90e0afbce", subdirectory = "openhands-agent-server"}
|
||||
openhands-sdk = {git = "https://github.com/OpenHands/agent-sdk.git", rev = "3d8af53b2f0259dc98555a4acd4238f90e0afbce", subdirectory = "openhands-sdk"}
|
||||
openhands-tools = {git = "https://github.com/OpenHands/agent-sdk.git", rev = "3d8af53b2f0259dc98555a4acd4238f90e0afbce", subdirectory = "openhands-tools"}
|
||||
opentelemetry-api = "^1.33.1"
|
||||
opentelemetry-exporter-otlp-proto-grpc = "^1.33.1"
|
||||
pathspec = "^0.12.1"
|
||||
@ -5863,7 +5863,7 @@ url = ".."
|
||||
|
||||
[[package]]
|
||||
name = "openhands-sdk"
|
||||
version = "1.0.0a3"
|
||||
version = "1.0.0a4"
|
||||
description = "OpenHands SDK - Core functionality for building AI agents"
|
||||
optional = false
|
||||
python-versions = ">=3.12"
|
||||
@ -5886,14 +5886,14 @@ boto3 = ["boto3 (>=1.35.0)"]
|
||||
|
||||
[package.source]
|
||||
type = "git"
|
||||
url = "https://github.com/All-Hands-AI/agent-sdk.git"
|
||||
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
|
||||
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
|
||||
url = "https://github.com/OpenHands/agent-sdk.git"
|
||||
reference = "3d8af53b2f0259dc98555a4acd4238f90e0afbce"
|
||||
resolved_reference = "3d8af53b2f0259dc98555a4acd4238f90e0afbce"
|
||||
subdirectory = "openhands-sdk"
|
||||
|
||||
[[package]]
|
||||
name = "openhands-tools"
|
||||
version = "1.0.0a3"
|
||||
version = "1.0.0a4"
|
||||
description = "OpenHands Tools - Runtime tools for AI agents"
|
||||
optional = false
|
||||
python-versions = ">=3.12"
|
||||
@ -5913,9 +5913,9 @@ pydantic = ">=2.11.7"
|
||||
|
||||
[package.source]
|
||||
type = "git"
|
||||
url = "https://github.com/All-Hands-AI/agent-sdk.git"
|
||||
reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
|
||||
resolved_reference = "8d8134ca5a87cc3e90e3ff968327a7f4c961e22e"
|
||||
url = "https://github.com/OpenHands/agent-sdk.git"
|
||||
reference = "3d8af53b2f0259dc98555a4acd4238f90e0afbce"
|
||||
resolved_reference = "3d8af53b2f0259dc98555a4acd4238f90e0afbce"
|
||||
subdirectory = "openhands-tools"
|
||||
|
||||
[[package]]
|
||||
|
||||
@ -31,7 +31,7 @@ from openhands.integrations.provider import (
|
||||
)
|
||||
from openhands.server.settings import Settings
|
||||
from openhands.server.user_auth.user_auth import AuthType, UserAuth
|
||||
from openhands.storage.data_models.user_secrets import UserSecrets
|
||||
from openhands.storage.data_models.secrets import Secrets
|
||||
from openhands.storage.settings.settings_store import SettingsStore
|
||||
|
||||
token_manager = TokenManager()
|
||||
@ -52,7 +52,7 @@ class SaasUserAuth(UserAuth):
|
||||
settings_store: SaasSettingsStore | None = None
|
||||
secrets_store: SaasSecretsStore | None = None
|
||||
_settings: Settings | None = None
|
||||
_user_secrets: UserSecrets | None = None
|
||||
_secrets: Secrets | None = None
|
||||
accepted_tos: bool | None = None
|
||||
auth_type: AuthType = AuthType.COOKIE
|
||||
|
||||
@ -119,13 +119,13 @@ class SaasUserAuth(UserAuth):
|
||||
self.secrets_store = secrets_store
|
||||
return secrets_store
|
||||
|
||||
async def get_user_secrets(self):
|
||||
user_secrets = self._user_secrets
|
||||
async def get_secrets(self):
|
||||
user_secrets = self._secrets
|
||||
if user_secrets:
|
||||
return user_secrets
|
||||
secrets_store = await self.get_secrets_store()
|
||||
user_secrets = await secrets_store.load()
|
||||
self._user_secrets = user_secrets
|
||||
self._secrets = user_secrets
|
||||
return user_secrets
|
||||
|
||||
async def get_access_token(self) -> SecretStr | None:
|
||||
@ -148,7 +148,7 @@ class SaasUserAuth(UserAuth):
|
||||
if not access_token:
|
||||
raise AuthError()
|
||||
|
||||
user_secrets = await self.get_user_secrets()
|
||||
user_secrets = await self.get_secrets()
|
||||
|
||||
try:
|
||||
# TODO: I think we can do this in a single request if we refactor
|
||||
|
||||
@ -37,6 +37,7 @@ from storage.offline_token_store import OfflineTokenStore
|
||||
from tenacity import RetryCallState, retry, retry_if_exception_type, stop_after_attempt
|
||||
|
||||
from openhands.integrations.service_types import ProviderType
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
|
||||
|
||||
def _before_sleep_callback(retry_state: RetryCallState) -> None:
|
||||
@ -191,7 +192,7 @@ class TokenManager:
|
||||
access_token: str,
|
||||
idp: ProviderType,
|
||||
) -> dict[str, str | int]:
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
base_url = KEYCLOAK_SERVER_URL_EXT if self.external else KEYCLOAK_SERVER_URL
|
||||
url = f'{base_url}/realms/{KEYCLOAK_REALM_NAME}/broker/{idp.value}/token'
|
||||
headers = {
|
||||
@ -350,7 +351,7 @@ class TokenManager:
|
||||
'refresh_token': refresh_token,
|
||||
'grant_type': 'refresh_token',
|
||||
}
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.post(url, data=payload)
|
||||
response.raise_for_status()
|
||||
logger.info('Successfully refreshed GitHub token')
|
||||
@ -376,7 +377,7 @@ class TokenManager:
|
||||
'refresh_token': refresh_token,
|
||||
'grant_type': 'refresh_token',
|
||||
}
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.post(url, data=payload)
|
||||
response.raise_for_status()
|
||||
logger.info('Successfully refreshed GitLab token')
|
||||
@ -404,7 +405,7 @@ class TokenManager:
|
||||
'refresh_token': refresh_token,
|
||||
}
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.post(url, data=data, headers=headers)
|
||||
response.raise_for_status()
|
||||
logger.info('Successfully refreshed Bitbucket token')
|
||||
|
||||
@ -12,6 +12,7 @@ from storage.saas_settings_store import SaasSettingsStore
|
||||
from openhands.core.logger import openhands_logger as logger
|
||||
from openhands.server.user_auth import get_user_id
|
||||
from openhands.utils.async_utils import call_sync_from_async
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
|
||||
|
||||
# Helper functions for BYOR API key management
|
||||
@ -68,9 +69,10 @@ async def generate_byor_key(user_id: str) -> str | None:
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(
|
||||
verify=httpx_verify_option(),
|
||||
headers={
|
||||
'x-goog-api-key': LITE_LLM_API_KEY,
|
||||
}
|
||||
},
|
||||
) as client:
|
||||
response = await client.post(
|
||||
f'{LITE_LLM_API_URL}/key/generate',
|
||||
@ -120,9 +122,10 @@ async def delete_byor_key_from_litellm(user_id: str, byor_key: str) -> bool:
|
||||
|
||||
try:
|
||||
async with httpx.AsyncClient(
|
||||
verify=httpx_verify_option(),
|
||||
headers={
|
||||
'x-goog-api-key': LITE_LLM_API_KEY,
|
||||
}
|
||||
},
|
||||
) as client:
|
||||
# Delete the key directly using the key value
|
||||
delete_url = f'{LITE_LLM_API_URL}/key/delete'
|
||||
|
||||
@ -27,6 +27,7 @@ from storage.saas_settings_store import SaasSettingsStore
|
||||
from storage.subscription_access import SubscriptionAccess
|
||||
|
||||
from openhands.server.user_auth import get_user_id
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
|
||||
stripe.api_key = STRIPE_API_KEY
|
||||
billing_router = APIRouter(prefix='/api/billing')
|
||||
@ -110,7 +111,7 @@ def calculate_credits(user_info: LiteLlmUserInfo) -> float:
|
||||
async def get_credits(user_id: str = Depends(get_user_id)) -> GetCreditsResponse:
|
||||
if not stripe_service.STRIPE_API_KEY:
|
||||
return GetCreditsResponse()
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
user_json = await _get_litellm_user(client, user_id)
|
||||
credits = calculate_credits(user_json['user_info'])
|
||||
return GetCreditsResponse(credits=Decimal('{:.2f}'.format(credits)))
|
||||
@ -430,7 +431,7 @@ async def success_callback(session_id: str, request: Request):
|
||||
)
|
||||
raise HTTPException(status.HTTP_400_BAD_REQUEST)
|
||||
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
# Update max budget in litellm
|
||||
user_json = await _get_litellm_user(client, billing_session.user_id)
|
||||
amount_subtotal = stripe_session.amount_subtotal or 0
|
||||
|
||||
@ -11,6 +11,7 @@ from fastapi.responses import RedirectResponse
|
||||
from server.logger import logger
|
||||
|
||||
from openhands.server.shared import config
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
|
||||
GITHUB_PROXY_ENDPOINTS = bool(os.environ.get('GITHUB_PROXY_ENDPOINTS'))
|
||||
|
||||
@ -87,7 +88,7 @@ def add_github_proxy_routes(app: FastAPI):
|
||||
]
|
||||
body = urlencode(query_params, doseq=True)
|
||||
url = 'https://github.com/login/oauth/access_token'
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.post(url, content=body)
|
||||
return Response(
|
||||
response.content,
|
||||
@ -101,7 +102,7 @@ def add_github_proxy_routes(app: FastAPI):
|
||||
logger.info(f'github_proxy_post:1:{path}')
|
||||
body = await request.body()
|
||||
url = f'https://github.com/{path}'
|
||||
async with httpx.AsyncClient() as client:
|
||||
async with httpx.AsyncClient(verify=httpx_verify_option()) as client:
|
||||
response = await client.post(url, content=body, headers=request.headers)
|
||||
return Response(
|
||||
response.content,
|
||||
|
||||
@ -52,6 +52,7 @@ from openhands.storage.locations import (
|
||||
get_conversation_events_dir,
|
||||
)
|
||||
from openhands.utils.async_utils import call_sync_from_async
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
from openhands.utils.import_utils import get_impl
|
||||
from openhands.utils.shutdown_listener import should_continue
|
||||
from openhands.utils.utils import create_registry_and_conversation_stats
|
||||
@ -266,9 +267,10 @@ class SaasNestedConversationManager(ConversationManager):
|
||||
):
|
||||
logger.info('starting_nested_conversation', extra={'sid': sid})
|
||||
async with httpx.AsyncClient(
|
||||
verify=httpx_verify_option(),
|
||||
headers={
|
||||
'X-Session-API-Key': session_api_key,
|
||||
}
|
||||
},
|
||||
) as client:
|
||||
await self._setup_nested_settings(client, api_url, settings)
|
||||
await self._setup_provider_tokens(client, api_url, settings)
|
||||
@ -484,9 +486,10 @@ class SaasNestedConversationManager(ConversationManager):
|
||||
raise ValueError(f'no_such_conversation:{sid}')
|
||||
nested_url = self._get_nested_url_for_runtime(runtime['runtime_id'], sid)
|
||||
async with httpx.AsyncClient(
|
||||
verify=httpx_verify_option(),
|
||||
headers={
|
||||
'X-Session-API-Key': runtime['session_api_key'],
|
||||
}
|
||||
},
|
||||
) as client:
|
||||
response = await client.post(f'{nested_url}/events', json=data)
|
||||
response.raise_for_status()
|
||||
@ -551,9 +554,10 @@ class SaasNestedConversationManager(ConversationManager):
|
||||
return None
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
verify=httpx_verify_option(),
|
||||
headers={
|
||||
'X-Session-API-Key': session_api_key,
|
||||
}
|
||||
},
|
||||
) as client:
|
||||
# Query the nested runtime for conversation info
|
||||
response = await client.get(nested_url)
|
||||
@ -828,6 +832,7 @@ class SaasNestedConversationManager(ConversationManager):
|
||||
@contextlib.asynccontextmanager
|
||||
async def _httpx_client(self):
|
||||
async with httpx.AsyncClient(
|
||||
verify=httpx_verify_option(),
|
||||
headers={'X-API-Key': self.config.sandbox.api_key or ''},
|
||||
timeout=_HTTP_TIMEOUT,
|
||||
) as client:
|
||||
|
||||
@ -7,11 +7,11 @@ from dataclasses import dataclass
|
||||
from cryptography.fernet import Fernet
|
||||
from sqlalchemy.orm import sessionmaker
|
||||
from storage.database import session_maker
|
||||
from storage.stored_user_secrets import StoredUserSecrets
|
||||
from storage.stored_custom_secrets import StoredCustomSecrets
|
||||
|
||||
from openhands.core.config.openhands_config import OpenHandsConfig
|
||||
from openhands.core.logger import openhands_logger as logger
|
||||
from openhands.storage.data_models.user_secrets import UserSecrets
|
||||
from openhands.storage.data_models.secrets import Secrets
|
||||
from openhands.storage.secrets.secrets_store import SecretsStore
|
||||
|
||||
|
||||
@ -21,20 +21,20 @@ class SaasSecretsStore(SecretsStore):
|
||||
session_maker: sessionmaker
|
||||
config: OpenHandsConfig
|
||||
|
||||
async def load(self) -> UserSecrets | None:
|
||||
async def load(self) -> Secrets | None:
|
||||
if not self.user_id:
|
||||
return None
|
||||
|
||||
with self.session_maker() as session:
|
||||
# Fetch all secrets for the given user ID
|
||||
settings = (
|
||||
session.query(StoredUserSecrets)
|
||||
.filter(StoredUserSecrets.keycloak_user_id == self.user_id)
|
||||
session.query(StoredCustomSecrets)
|
||||
.filter(StoredCustomSecrets.keycloak_user_id == self.user_id)
|
||||
.all()
|
||||
)
|
||||
|
||||
if not settings:
|
||||
return UserSecrets()
|
||||
return Secrets()
|
||||
|
||||
kwargs = {}
|
||||
for secret in settings:
|
||||
@ -45,14 +45,14 @@ class SaasSecretsStore(SecretsStore):
|
||||
|
||||
self._decrypt_kwargs(kwargs)
|
||||
|
||||
return UserSecrets(custom_secrets=kwargs) # type: ignore[arg-type]
|
||||
return Secrets(custom_secrets=kwargs) # type: ignore[arg-type]
|
||||
|
||||
async def store(self, item: UserSecrets):
|
||||
async def store(self, item: Secrets):
|
||||
with self.session_maker() as session:
|
||||
# Incoming secrets are always the most updated ones
|
||||
# Delete all existing records and override with incoming ones
|
||||
session.query(StoredUserSecrets).filter(
|
||||
StoredUserSecrets.keycloak_user_id == self.user_id
|
||||
session.query(StoredCustomSecrets).filter(
|
||||
StoredCustomSecrets.keycloak_user_id == self.user_id
|
||||
).delete()
|
||||
|
||||
# Prepare the new secrets data
|
||||
@ -74,7 +74,7 @@ class SaasSecretsStore(SecretsStore):
|
||||
|
||||
# Add the new secrets
|
||||
for secret_name, secret_value, description in secret_tuples:
|
||||
new_secret = StoredUserSecrets(
|
||||
new_secret = StoredCustomSecrets(
|
||||
keycloak_user_id=self.user_id,
|
||||
secret_name=secret_name,
|
||||
secret_value=secret_value,
|
||||
|
||||
@ -31,6 +31,7 @@ from openhands.server.settings import Settings
|
||||
from openhands.storage import get_file_store
|
||||
from openhands.storage.settings.settings_store import SettingsStore
|
||||
from openhands.utils.async_utils import call_sync_from_async
|
||||
from openhands.utils.http_session import httpx_verify_option
|
||||
|
||||
|
||||
@dataclass
|
||||
@ -215,9 +216,10 @@ class SaasSettingsStore(SettingsStore):
|
||||
)
|
||||
|
||||
async with httpx.AsyncClient(
|
||||
verify=httpx_verify_option(),
|
||||
headers={
|
||||
'x-goog-api-key': LITE_LLM_API_KEY,
|
||||
}
|
||||
},
|
||||
) as client:
|
||||
# Get the previous max budget to prevent accidental loss
|
||||
# In Litellm a get always succeeds, regardless of whether the user actually exists
|
||||
|
||||
@ -2,8 +2,8 @@ from sqlalchemy import Column, Identity, Integer, String
|
||||
from storage.base import Base
|
||||
|
||||
|
||||
class StoredUserSecrets(Base): # type: ignore
|
||||
__tablename__ = 'user_secrets'
|
||||
class StoredCustomSecrets(Base): # type: ignore
|
||||
__tablename__ = 'custom_secrets'
|
||||
id = Column(Integer, Identity(), primary_key=True)
|
||||
keycloak_user_id = Column(String, nullable=True, index=True)
|
||||
secret_name = Column(String, nullable=False)
|
||||
98
enterprise/storage/telemetry_identity.py
Normal file
98
enterprise/storage/telemetry_identity.py
Normal file
@ -0,0 +1,98 @@
|
||||
"""SQLAlchemy model for telemetry identity.
|
||||
|
||||
This model stores persistent identity information that must survive container restarts
|
||||
for the OpenHands Enterprise Telemetry Service.
|
||||
"""
|
||||
|
||||
from datetime import UTC, datetime
|
||||
from typing import Optional
|
||||
|
||||
from sqlalchemy import CheckConstraint, Column, DateTime, Integer, String
|
||||
from storage.base import Base
|
||||
|
||||
|
||||
class TelemetryIdentity(Base): # type: ignore
|
||||
"""Stores persistent identity information for telemetry.
|
||||
|
||||
This table is designed to contain exactly one row (enforced by database constraint)
|
||||
that maintains only the identity data that cannot be reliably recomputed:
|
||||
- customer_id: Established relationship with Replicated
|
||||
- instance_id: Generated once, must remain stable
|
||||
|
||||
Operational data like timestamps are derived from the telemetry_metrics table.
|
||||
"""
|
||||
|
||||
__tablename__ = 'telemetry_replicated_identity'
|
||||
__table_args__ = (CheckConstraint('id = 1', name='single_identity_row'),)
|
||||
|
||||
id = Column(Integer, primary_key=True, default=1)
|
||||
customer_id = Column(String(255), nullable=True)
|
||||
instance_id = Column(String(255), nullable=True)
|
||||
created_at = Column(
|
||||
DateTime(timezone=True),
|
||||
default=lambda: datetime.now(UTC),
|
||||
nullable=False,
|
||||
)
|
||||
updated_at = Column(
|
||||
DateTime(timezone=True),
|
||||
default=lambda: datetime.now(UTC),
|
||||
onupdate=lambda: datetime.now(UTC),
|
||||
nullable=False,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
customer_id: Optional[str] = None,
|
||||
instance_id: Optional[str] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initialize telemetry identity.
|
||||
|
||||
Args:
|
||||
customer_id: Unique identifier for the customer
|
||||
instance_id: Unique identifier for this OpenHands instance
|
||||
**kwargs: Additional keyword arguments for SQLAlchemy
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# Set defaults for fields that would normally be set by SQLAlchemy
|
||||
now = datetime.now(UTC)
|
||||
if not hasattr(self, 'created_at') or self.created_at is None:
|
||||
self.created_at = now
|
||||
if not hasattr(self, 'updated_at') or self.updated_at is None:
|
||||
self.updated_at = now
|
||||
|
||||
# Force id to be 1 to maintain single-row constraint
|
||||
self.id = 1
|
||||
self.customer_id = customer_id
|
||||
self.instance_id = instance_id
|
||||
|
||||
def set_customer_info(
|
||||
self,
|
||||
customer_id: Optional[str] = None,
|
||||
instance_id: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Update customer and instance identification information.
|
||||
|
||||
Args:
|
||||
customer_id: Unique identifier for the customer
|
||||
instance_id: Unique identifier for this OpenHands instance
|
||||
"""
|
||||
if customer_id is not None:
|
||||
self.customer_id = customer_id
|
||||
if instance_id is not None:
|
||||
self.instance_id = instance_id
|
||||
|
||||
@property
|
||||
def has_customer_info(self) -> bool:
|
||||
"""Check if customer identification information is configured."""
|
||||
return bool(self.customer_id and self.instance_id)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<TelemetryIdentity(customer_id='{self.customer_id}', "
|
||||
f"instance_id='{self.instance_id}')>"
|
||||
)
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
112
enterprise/storage/telemetry_metrics.py
Normal file
112
enterprise/storage/telemetry_metrics.py
Normal file
@ -0,0 +1,112 @@
|
||||
"""SQLAlchemy model for telemetry metrics data.
|
||||
|
||||
This model stores individual metric collection records with upload tracking
|
||||
and retry logic for the OpenHands Enterprise Telemetry Service.
|
||||
"""
|
||||
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
from typing import Any, Dict, Optional
|
||||
|
||||
from sqlalchemy import JSON, Column, DateTime, Integer, String, Text
|
||||
from storage.base import Base
|
||||
|
||||
|
||||
class TelemetryMetrics(Base): # type: ignore
|
||||
"""Stores collected telemetry metrics with upload tracking.
|
||||
|
||||
Each record represents a single metrics collection event with associated
|
||||
metadata for upload status and retry logic.
|
||||
"""
|
||||
|
||||
__tablename__ = 'telemetry_metrics'
|
||||
|
||||
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
|
||||
collected_at = Column(
|
||||
DateTime(timezone=True),
|
||||
nullable=False,
|
||||
default=lambda: datetime.now(UTC),
|
||||
index=True,
|
||||
)
|
||||
metrics_data = Column(JSON, nullable=False)
|
||||
uploaded_at = Column(DateTime(timezone=True), nullable=True, index=True)
|
||||
upload_attempts = Column(Integer, nullable=False, default=0)
|
||||
last_upload_error = Column(Text, nullable=True)
|
||||
created_at = Column(
|
||||
DateTime(timezone=True),
|
||||
default=lambda: datetime.now(UTC),
|
||||
nullable=False,
|
||||
)
|
||||
updated_at = Column(
|
||||
DateTime(timezone=True),
|
||||
default=lambda: datetime.now(UTC),
|
||||
onupdate=lambda: datetime.now(UTC),
|
||||
nullable=False,
|
||||
)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metrics_data: Dict[str, Any],
|
||||
collected_at: Optional[datetime] = None,
|
||||
**kwargs,
|
||||
):
|
||||
"""Initialize a new telemetry metrics record.
|
||||
|
||||
Args:
|
||||
metrics_data: Dictionary containing the collected metrics
|
||||
collected_at: Timestamp when metrics were collected (defaults to now)
|
||||
**kwargs: Additional keyword arguments for SQLAlchemy
|
||||
"""
|
||||
super().__init__(**kwargs)
|
||||
|
||||
# Set defaults for fields that would normally be set by SQLAlchemy
|
||||
now = datetime.now(UTC)
|
||||
if not hasattr(self, 'id') or self.id is None:
|
||||
self.id = str(uuid.uuid4())
|
||||
if not hasattr(self, 'upload_attempts') or self.upload_attempts is None:
|
||||
self.upload_attempts = 0
|
||||
if not hasattr(self, 'created_at') or self.created_at is None:
|
||||
self.created_at = now
|
||||
if not hasattr(self, 'updated_at') or self.updated_at is None:
|
||||
self.updated_at = now
|
||||
|
||||
self.metrics_data = metrics_data
|
||||
if collected_at:
|
||||
self.collected_at = collected_at
|
||||
elif not hasattr(self, 'collected_at') or self.collected_at is None:
|
||||
self.collected_at = now
|
||||
|
||||
def mark_uploaded(self) -> None:
|
||||
"""Mark this metrics record as successfully uploaded."""
|
||||
self.uploaded_at = datetime.now(UTC)
|
||||
self.last_upload_error = None
|
||||
|
||||
def mark_upload_failed(self, error_message: str) -> None:
|
||||
"""Mark this metrics record as having failed upload.
|
||||
|
||||
Args:
|
||||
error_message: Description of the upload failure
|
||||
"""
|
||||
self.upload_attempts += 1
|
||||
self.last_upload_error = error_message
|
||||
self.uploaded_at = None
|
||||
|
||||
@property
|
||||
def is_uploaded(self) -> bool:
|
||||
"""Check if this metrics record has been successfully uploaded."""
|
||||
return self.uploaded_at is not None
|
||||
|
||||
@property
|
||||
def needs_retry(self) -> bool:
|
||||
"""Check if this metrics record needs upload retry (failed but not too many attempts)."""
|
||||
return not self.is_uploaded and self.upload_attempts < 3
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"<TelemetryMetrics(id='{self.id}', "
|
||||
f"collected_at='{self.collected_at}', "
|
||||
f'uploaded={self.is_uploaded})>'
|
||||
)
|
||||
|
||||
class Config:
|
||||
from_attributes = True
|
||||
@ -309,7 +309,7 @@ class TestJiraViewEdgeCases:
|
||||
mock_agent_loop_info,
|
||||
):
|
||||
"""Test conversation creation when user has no secrets"""
|
||||
new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
|
||||
new_conversation_view.saas_user_auth.get_secrets.return_value = None
|
||||
mock_create_conversation.return_value = mock_agent_loop_info
|
||||
mock_store.create_conversation = AsyncMock()
|
||||
|
||||
|
||||
@ -309,7 +309,7 @@ class TestJiraDcViewEdgeCases:
|
||||
mock_agent_loop_info,
|
||||
):
|
||||
"""Test conversation creation when user has no secrets"""
|
||||
new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
|
||||
new_conversation_view.saas_user_auth.get_secrets.return_value = None
|
||||
mock_create_conversation.return_value = mock_agent_loop_info
|
||||
mock_store.create_conversation = AsyncMock()
|
||||
|
||||
|
||||
@ -309,7 +309,7 @@ class TestLinearViewEdgeCases:
|
||||
mock_agent_loop_info,
|
||||
):
|
||||
"""Test conversation creation when user has no secrets"""
|
||||
new_conversation_view.saas_user_auth.get_user_secrets.return_value = None
|
||||
new_conversation_view.saas_user_auth.get_secrets.return_value = None
|
||||
mock_create_conversation.return_value = mock_agent_loop_info
|
||||
mock_store.create_conversation = AsyncMock()
|
||||
|
||||
|
||||
1
enterprise/tests/unit/storage/__init__.py
Normal file
1
enterprise/tests/unit/storage/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
# Storage unit tests
|
||||
129
enterprise/tests/unit/storage/test_telemetry_identity.py
Normal file
129
enterprise/tests/unit/storage/test_telemetry_identity.py
Normal file
@ -0,0 +1,129 @@
|
||||
"""Unit tests for TelemetryIdentity model.
|
||||
|
||||
Tests the persistent identity storage for the OpenHands Enterprise Telemetry Service.
|
||||
"""
|
||||
|
||||
from datetime import datetime
|
||||
|
||||
from storage.telemetry_identity import TelemetryIdentity
|
||||
|
||||
|
||||
class TestTelemetryIdentity:
|
||||
"""Test cases for TelemetryIdentity model."""
|
||||
|
||||
def test_create_identity_with_defaults(self):
|
||||
"""Test creating identity with default values."""
|
||||
identity = TelemetryIdentity()
|
||||
|
||||
assert identity.id == 1
|
||||
assert identity.customer_id is None
|
||||
assert identity.instance_id is None
|
||||
assert isinstance(identity.created_at, datetime)
|
||||
assert isinstance(identity.updated_at, datetime)
|
||||
|
||||
def test_create_identity_with_values(self):
|
||||
"""Test creating identity with specific values."""
|
||||
customer_id = 'cust_123'
|
||||
instance_id = 'inst_456'
|
||||
|
||||
identity = TelemetryIdentity(customer_id=customer_id, instance_id=instance_id)
|
||||
|
||||
assert identity.id == 1
|
||||
assert identity.customer_id == customer_id
|
||||
assert identity.instance_id == instance_id
|
||||
|
||||
def test_set_customer_info(self):
|
||||
"""Test updating customer information."""
|
||||
identity = TelemetryIdentity()
|
||||
|
||||
# Update customer info
|
||||
identity.set_customer_info(
|
||||
customer_id='new_customer', instance_id='new_instance'
|
||||
)
|
||||
|
||||
assert identity.customer_id == 'new_customer'
|
||||
assert identity.instance_id == 'new_instance'
|
||||
|
||||
def test_set_customer_info_partial(self):
|
||||
"""Test partial updates of customer information."""
|
||||
identity = TelemetryIdentity(
|
||||
customer_id='original_customer', instance_id='original_instance'
|
||||
)
|
||||
|
||||
# Update only customer_id
|
||||
identity.set_customer_info(customer_id='updated_customer')
|
||||
assert identity.customer_id == 'updated_customer'
|
||||
assert identity.instance_id == 'original_instance'
|
||||
|
||||
# Update only instance_id
|
||||
identity.set_customer_info(instance_id='updated_instance')
|
||||
assert identity.customer_id == 'updated_customer'
|
||||
assert identity.instance_id == 'updated_instance'
|
||||
|
||||
def test_set_customer_info_with_none(self):
|
||||
"""Test that None values don't overwrite existing data."""
|
||||
identity = TelemetryIdentity(
|
||||
customer_id='existing_customer', instance_id='existing_instance'
|
||||
)
|
||||
|
||||
# Call with None values - should not change existing data
|
||||
identity.set_customer_info(customer_id=None, instance_id=None)
|
||||
assert identity.customer_id == 'existing_customer'
|
||||
assert identity.instance_id == 'existing_instance'
|
||||
|
||||
def test_has_customer_info_property(self):
|
||||
"""Test has_customer_info property logic."""
|
||||
identity = TelemetryIdentity()
|
||||
|
||||
# Initially false (both None)
|
||||
assert not identity.has_customer_info
|
||||
|
||||
# Still false with only customer_id
|
||||
identity.customer_id = 'customer_123'
|
||||
assert not identity.has_customer_info
|
||||
|
||||
# Still false with only instance_id
|
||||
identity.customer_id = None
|
||||
identity.instance_id = 'instance_456'
|
||||
assert not identity.has_customer_info
|
||||
|
||||
# True when both are set
|
||||
identity.customer_id = 'customer_123'
|
||||
identity.instance_id = 'instance_456'
|
||||
assert identity.has_customer_info
|
||||
|
||||
def test_has_customer_info_with_empty_strings(self):
|
||||
"""Test has_customer_info with empty strings."""
|
||||
identity = TelemetryIdentity(customer_id='', instance_id='')
|
||||
|
||||
# Empty strings should be falsy
|
||||
assert not identity.has_customer_info
|
||||
|
||||
def test_repr_method(self):
|
||||
"""Test string representation of identity."""
|
||||
identity = TelemetryIdentity(
|
||||
customer_id='test_customer', instance_id='test_instance'
|
||||
)
|
||||
|
||||
repr_str = repr(identity)
|
||||
assert 'TelemetryIdentity' in repr_str
|
||||
assert 'test_customer' in repr_str
|
||||
assert 'test_instance' in repr_str
|
||||
|
||||
def test_id_forced_to_one(self):
|
||||
"""Test that ID is always forced to 1."""
|
||||
identity = TelemetryIdentity()
|
||||
assert identity.id == 1
|
||||
|
||||
# Even if we try to set a different ID in constructor
|
||||
identity2 = TelemetryIdentity(customer_id='test')
|
||||
assert identity2.id == 1
|
||||
|
||||
def test_timestamps_are_set(self):
|
||||
"""Test that timestamps are properly set."""
|
||||
identity = TelemetryIdentity()
|
||||
|
||||
assert identity.created_at is not None
|
||||
assert identity.updated_at is not None
|
||||
assert isinstance(identity.created_at, datetime)
|
||||
assert isinstance(identity.updated_at, datetime)
|
||||
190
enterprise/tests/unit/storage/test_telemetry_metrics.py
Normal file
190
enterprise/tests/unit/storage/test_telemetry_metrics.py
Normal file
@ -0,0 +1,190 @@
|
||||
"""Unit tests for TelemetryMetrics model."""
|
||||
|
||||
import uuid
|
||||
from datetime import UTC, datetime
|
||||
|
||||
from storage.telemetry_metrics import TelemetryMetrics
|
||||
|
||||
|
||||
class TestTelemetryMetrics:
|
||||
"""Test cases for TelemetryMetrics model."""
|
||||
|
||||
def test_init_with_metrics_data(self):
|
||||
"""Test initialization with metrics data."""
|
||||
metrics_data = {
|
||||
'cpu_usage': 75.5,
|
||||
'memory_usage': 1024,
|
||||
'active_sessions': 5,
|
||||
}
|
||||
|
||||
metrics = TelemetryMetrics(metrics_data=metrics_data)
|
||||
|
||||
assert metrics.metrics_data == metrics_data
|
||||
assert metrics.upload_attempts == 0
|
||||
assert metrics.uploaded_at is None
|
||||
assert metrics.last_upload_error is None
|
||||
assert metrics.collected_at is not None
|
||||
assert metrics.created_at is not None
|
||||
assert metrics.updated_at is not None
|
||||
|
||||
def test_init_with_custom_collected_at(self):
|
||||
"""Test initialization with custom collected_at timestamp."""
|
||||
metrics_data = {'test': 'value'}
|
||||
custom_time = datetime(2023, 1, 1, 12, 0, 0, tzinfo=UTC)
|
||||
|
||||
metrics = TelemetryMetrics(metrics_data=metrics_data, collected_at=custom_time)
|
||||
|
||||
assert metrics.collected_at == custom_time
|
||||
|
||||
def test_mark_uploaded(self):
|
||||
"""Test marking metrics as uploaded."""
|
||||
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
|
||||
|
||||
# Initially not uploaded
|
||||
assert not metrics.is_uploaded
|
||||
assert metrics.uploaded_at is None
|
||||
|
||||
# Mark as uploaded
|
||||
metrics.mark_uploaded()
|
||||
|
||||
assert metrics.is_uploaded
|
||||
|
||||
def test_mark_upload_failed(self):
|
||||
"""Test marking upload as failed."""
|
||||
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
|
||||
error_message = 'Network timeout'
|
||||
|
||||
# Initially no failures
|
||||
assert metrics.upload_attempts == 0
|
||||
assert metrics.last_upload_error is None
|
||||
|
||||
# Mark as failed
|
||||
metrics.mark_upload_failed(error_message)
|
||||
|
||||
assert metrics.upload_attempts == 1
|
||||
assert metrics.last_upload_error == error_message
|
||||
assert metrics.uploaded_at is None
|
||||
assert not metrics.is_uploaded
|
||||
|
||||
def test_multiple_upload_failures(self):
|
||||
"""Test multiple upload failures increment attempts."""
|
||||
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
|
||||
|
||||
metrics.mark_upload_failed('Error 1')
|
||||
assert metrics.upload_attempts == 1
|
||||
|
||||
metrics.mark_upload_failed('Error 2')
|
||||
assert metrics.upload_attempts == 2
|
||||
assert metrics.last_upload_error == 'Error 2'
|
||||
|
||||
def test_is_uploaded_property(self):
|
||||
"""Test is_uploaded property."""
|
||||
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
|
||||
|
||||
# Initially not uploaded
|
||||
assert not metrics.is_uploaded
|
||||
|
||||
# After marking uploaded
|
||||
metrics.mark_uploaded()
|
||||
assert metrics.is_uploaded
|
||||
|
||||
def test_needs_retry_property(self):
|
||||
"""Test needs_retry property logic."""
|
||||
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
|
||||
|
||||
# Initially needs retry (0 attempts, not uploaded)
|
||||
assert metrics.needs_retry
|
||||
|
||||
# After 1 failure, still needs retry
|
||||
metrics.mark_upload_failed('Error 1')
|
||||
assert metrics.needs_retry
|
||||
|
||||
# After 2 failures, still needs retry
|
||||
metrics.mark_upload_failed('Error 2')
|
||||
assert metrics.needs_retry
|
||||
|
||||
# After 3 failures, no more retries
|
||||
metrics.mark_upload_failed('Error 3')
|
||||
assert not metrics.needs_retry
|
||||
|
||||
# Reset and test successful upload
|
||||
metrics2 = TelemetryMetrics(metrics_data={'test': 'data'}) # type: ignore[unreachable]
|
||||
metrics2.mark_uploaded()
|
||||
# After upload, needs_retry should be False since is_uploaded is True
|
||||
|
||||
def test_upload_failure_clears_uploaded_at(self):
|
||||
"""Test that upload failure clears uploaded_at timestamp."""
|
||||
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
|
||||
|
||||
# Mark as uploaded first
|
||||
metrics.mark_uploaded()
|
||||
assert metrics.uploaded_at is not None
|
||||
|
||||
# Mark as failed - should clear uploaded_at
|
||||
metrics.mark_upload_failed('Network error')
|
||||
assert metrics.uploaded_at is None
|
||||
|
||||
def test_successful_upload_clears_error(self):
|
||||
"""Test that successful upload clears error message."""
|
||||
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
|
||||
|
||||
# Mark as failed first
|
||||
metrics.mark_upload_failed('Network error')
|
||||
assert metrics.last_upload_error == 'Network error'
|
||||
|
||||
# Mark as uploaded - should clear error
|
||||
metrics.mark_uploaded()
|
||||
assert metrics.last_upload_error is None
|
||||
|
||||
def test_uuid_generation(self):
|
||||
"""Test that each instance gets a unique UUID."""
|
||||
metrics1 = TelemetryMetrics(metrics_data={'test': 'data1'})
|
||||
metrics2 = TelemetryMetrics(metrics_data={'test': 'data2'})
|
||||
|
||||
assert metrics1.id != metrics2.id
|
||||
assert isinstance(uuid.UUID(metrics1.id), uuid.UUID)
|
||||
assert isinstance(uuid.UUID(metrics2.id), uuid.UUID)
|
||||
|
||||
def test_repr(self):
|
||||
"""Test string representation."""
|
||||
metrics = TelemetryMetrics(metrics_data={'test': 'data'})
|
||||
repr_str = repr(metrics)
|
||||
|
||||
assert 'TelemetryMetrics' in repr_str
|
||||
assert metrics.id in repr_str
|
||||
assert str(metrics.collected_at) in repr_str
|
||||
assert 'uploaded=False' in repr_str
|
||||
|
||||
# Test after upload
|
||||
metrics.mark_uploaded()
|
||||
repr_str = repr(metrics)
|
||||
assert 'uploaded=True' in repr_str
|
||||
|
||||
def test_complex_metrics_data(self):
|
||||
"""Test with complex nested metrics data."""
|
||||
complex_data = {
|
||||
'system': {
|
||||
'cpu': {'usage': 75.5, 'cores': 8},
|
||||
'memory': {'total': 16384, 'used': 8192},
|
||||
},
|
||||
'sessions': [
|
||||
{'id': 'session1', 'duration': 3600},
|
||||
{'id': 'session2', 'duration': 1800},
|
||||
],
|
||||
'timestamp': '2023-01-01T12:00:00Z',
|
||||
}
|
||||
|
||||
metrics = TelemetryMetrics(metrics_data=complex_data)
|
||||
|
||||
assert metrics.metrics_data == complex_data
|
||||
|
||||
def test_empty_metrics_data(self):
|
||||
"""Test with empty metrics data."""
|
||||
metrics = TelemetryMetrics(metrics_data={})
|
||||
|
||||
assert metrics.metrics_data == {}
|
||||
|
||||
def test_config_class(self):
|
||||
"""Test that Config class is properly set."""
|
||||
assert hasattr(TelemetryMetrics, 'Config')
|
||||
assert TelemetryMetrics.Config.from_attributes is True
|
||||
@ -5,11 +5,11 @@ from unittest.mock import MagicMock
|
||||
import pytest
|
||||
from pydantic import SecretStr
|
||||
from storage.saas_secrets_store import SaasSecretsStore
|
||||
from storage.stored_user_secrets import StoredUserSecrets
|
||||
from storage.stored_custom_secrets import StoredCustomSecrets
|
||||
|
||||
from openhands.core.config.openhands_config import OpenHandsConfig
|
||||
from openhands.integrations.provider import CustomSecret
|
||||
from openhands.storage.data_models.user_secrets import UserSecrets
|
||||
from openhands.storage.data_models.secrets import Secrets
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@ -27,8 +27,8 @@ def secrets_store(session_maker, mock_config):
|
||||
class TestSaasSecretsStore:
|
||||
@pytest.mark.asyncio
|
||||
async def test_store_and_load(self, secrets_store):
|
||||
# Create a UserSecrets object with some test data
|
||||
user_secrets = UserSecrets(
|
||||
# Create a Secrets object with some test data
|
||||
user_secrets = Secrets(
|
||||
custom_secrets=MappingProxyType(
|
||||
{
|
||||
'api_token': CustomSecret.from_value(
|
||||
@ -60,8 +60,8 @@ class TestSaasSecretsStore:
|
||||
|
||||
@pytest.mark.asyncio
|
||||
async def test_encryption_decryption(self, secrets_store):
|
||||
# Create a UserSecrets object with sensitive data
|
||||
user_secrets = UserSecrets(
|
||||
# Create a Secrets object with sensitive data
|
||||
user_secrets = Secrets(
|
||||
custom_secrets=MappingProxyType(
|
||||
{
|
||||
'api_token': CustomSecret.from_value(
|
||||
@ -87,8 +87,8 @@ class TestSaasSecretsStore:
|
||||
# Verify the data is encrypted in the database
|
||||
with secrets_store.session_maker() as session:
|
||||
stored = (
|
||||
session.query(StoredUserSecrets)
|
||||
.filter(StoredUserSecrets.keycloak_user_id == 'user-id')
|
||||
session.query(StoredCustomSecrets)
|
||||
.filter(StoredCustomSecrets.keycloak_user_id == 'user-id')
|
||||
.first()
|
||||
)
|
||||
|
||||
@ -154,7 +154,7 @@ class TestSaasSecretsStore:
|
||||
@pytest.mark.asyncio
|
||||
async def test_update_existing_secrets(self, secrets_store):
|
||||
# Create and store initial secrets
|
||||
initial_secrets = UserSecrets(
|
||||
initial_secrets = Secrets(
|
||||
custom_secrets=MappingProxyType(
|
||||
{
|
||||
'api_token': CustomSecret.from_value(
|
||||
@ -169,7 +169,7 @@ class TestSaasSecretsStore:
|
||||
await secrets_store.store(initial_secrets)
|
||||
|
||||
# Create and store updated secrets
|
||||
updated_secrets = UserSecrets(
|
||||
updated_secrets = Secrets(
|
||||
custom_secrets=MappingProxyType(
|
||||
{
|
||||
'api_token': CustomSecret.from_value(
|
||||
|
||||
79
evaluation/benchmarks/multi_swe_bench/compute_skip_ids.py
Normal file
79
evaluation/benchmarks/multi_swe_bench/compute_skip_ids.py
Normal file
@ -0,0 +1,79 @@
|
||||
import argparse
|
||||
import fnmatch
|
||||
import json
|
||||
from collections import Counter
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def find_final_reports(base_dir, pattern=None):
|
||||
base_path = Path(base_dir)
|
||||
if not base_path.exists():
|
||||
raise FileNotFoundError(f'Base directory does not exist: {base_dir}')
|
||||
|
||||
# Find all final_report.json files
|
||||
all_reports = list(base_path.rglob('final_report.json'))
|
||||
|
||||
if pattern is None:
|
||||
return all_reports
|
||||
|
||||
# Filter by pattern
|
||||
filtered_reports = []
|
||||
for report in all_reports:
|
||||
# Get relative path from base_dir for matching
|
||||
rel_path = report.relative_to(base_path)
|
||||
if fnmatch.fnmatch(str(rel_path), pattern):
|
||||
filtered_reports.append(report)
|
||||
|
||||
return filtered_reports
|
||||
|
||||
|
||||
def collect_resolved_ids(report_files):
|
||||
id_counter = Counter()
|
||||
|
||||
for report_file in report_files:
|
||||
with open(report_file, 'r') as f:
|
||||
data = json.load(f)
|
||||
if 'resolved_ids' not in data:
|
||||
raise KeyError(f"'resolved_ids' key not found in {report_file}")
|
||||
resolved_ids = data['resolved_ids']
|
||||
id_counter.update(resolved_ids)
|
||||
|
||||
return id_counter
|
||||
|
||||
|
||||
def get_skip_ids(id_counter, threshold):
|
||||
return [id_str for id_str, count in id_counter.items() if count >= threshold]
|
||||
|
||||
|
||||
def main():
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Compute SKIP_IDS from resolved IDs in final_report.json files'
|
||||
)
|
||||
parser.add_argument(
|
||||
'threshold',
|
||||
type=int,
|
||||
help='Minimum number of times an ID must be resolved to be skipped',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--base-dir',
|
||||
default='evaluation/evaluation_outputs/outputs',
|
||||
help='Base directory to search for final_report.json files (default: evaluation/evaluation_outputs/outputs)',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--pattern',
|
||||
default=None,
|
||||
help='Glob pattern to filter paths (e.g., "*Multi-SWE-RL*/**/*gpt*")',
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
report_files = find_final_reports(args.base_dir, args.pattern)
|
||||
id_counter = collect_resolved_ids(report_files)
|
||||
|
||||
skip_ids = get_skip_ids(id_counter, args.threshold)
|
||||
skip_ids = [s.replace('/', '__').replace(':pr-', '-') for s in skip_ids]
|
||||
skip_ids = ','.join(sorted(skip_ids))
|
||||
print(skip_ids)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
@ -747,10 +747,14 @@ def filter_dataset(dataset: pd.DataFrame, filter_column: str) -> pd.DataFrame:
|
||||
subset = dataset[dataset[filter_column].isin(selected_ids)]
|
||||
logger.info(f'Retained {subset.shape[0]} tasks after filtering')
|
||||
return subset
|
||||
skip_ids = os.environ.get('SKIP_IDS', '').split(',')
|
||||
skip_ids = [id for id in os.environ.get('SKIP_IDS', '').split(',') if id]
|
||||
if len(skip_ids) > 0:
|
||||
logger.info(f'Dataset size before filtering: {dataset.shape[0]} tasks')
|
||||
logger.info(f'Filtering {len(skip_ids)} tasks from "SKIP_IDS"...')
|
||||
return dataset[~dataset[filter_column].isin(skip_ids)]
|
||||
logger.info(f'SKIP_IDS:\n{skip_ids}')
|
||||
filtered_dataset = dataset[~dataset[filter_column].isin(skip_ids)]
|
||||
logger.info(f'Dataset size after filtering: {filtered_dataset.shape[0]} tasks')
|
||||
return filtered_dataset
|
||||
return dataset
|
||||
|
||||
|
||||
@ -768,6 +772,11 @@ if __name__ == '__main__':
|
||||
default='test',
|
||||
help='split to evaluate on',
|
||||
)
|
||||
parser.add_argument(
|
||||
'--filter_dataset_after_sampling',
|
||||
action='store_true',
|
||||
help='if provided, filter dataset after sampling instead of before',
|
||||
)
|
||||
args, _ = parser.parse_known_args()
|
||||
|
||||
# NOTE: It is preferable to load datasets from huggingface datasets and perform post-processing
|
||||
@ -777,10 +786,24 @@ if __name__ == '__main__':
|
||||
logger.info(f'Loading dataset {args.dataset} with split {args.split} ')
|
||||
dataset = load_dataset('json', data_files=args.dataset)
|
||||
dataset = dataset[args.split]
|
||||
swe_bench_tests = filter_dataset(dataset.to_pandas(), 'instance_id')
|
||||
logger.info(
|
||||
f'Loaded dataset {args.dataset} with split {args.split}: {len(swe_bench_tests)} tasks'
|
||||
)
|
||||
swe_bench_tests = dataset.to_pandas()
|
||||
|
||||
# Determine filter strategy based on flag
|
||||
filter_func = None
|
||||
if args.filter_dataset_after_sampling:
|
||||
# Pass filter as callback to apply after sampling
|
||||
def filter_func(df):
|
||||
return filter_dataset(df, 'instance_id')
|
||||
|
||||
logger.info(
|
||||
f'Loaded dataset {args.dataset} with split {args.split}: {len(swe_bench_tests)} tasks (filtering will occur after sampling)'
|
||||
)
|
||||
else:
|
||||
# Apply filter before sampling
|
||||
swe_bench_tests = filter_dataset(swe_bench_tests, 'instance_id')
|
||||
logger.info(
|
||||
f'Loaded dataset {args.dataset} with split {args.split}: {len(swe_bench_tests)} tasks'
|
||||
)
|
||||
|
||||
llm_config = None
|
||||
if args.llm_config:
|
||||
@ -810,7 +833,9 @@ if __name__ == '__main__':
|
||||
|
||||
output_file = os.path.join(metadata.eval_output_dir, 'output.jsonl')
|
||||
print(f'### OUTPUT FILE: {output_file} ###')
|
||||
instances = prepare_dataset(swe_bench_tests, output_file, args.eval_n_limit)
|
||||
instances = prepare_dataset(
|
||||
swe_bench_tests, output_file, args.eval_n_limit, filter_func=filter_func
|
||||
)
|
||||
|
||||
if len(instances) > 0 and not isinstance(
|
||||
instances['FAIL_TO_PASS'][instances['FAIL_TO_PASS'].index[0]], str
|
||||
|
||||
@ -8,8 +8,14 @@
|
||||
MODEL=$1 # eg your llm config name in config.toml (eg: "llm.claude-3-5-sonnet-20241022-t05")
|
||||
EXP_NAME=$2 # "train-t05"
|
||||
EVAL_DATASET=$3 # path to original dataset (jsonl file)
|
||||
N_WORKERS=${4:-64}
|
||||
N_RUNS=${5:-1}
|
||||
MAX_ITER=$4
|
||||
N_WORKERS=${5:-64}
|
||||
N_RUNS=${6:-1}
|
||||
EVAL_LIMIT=${7:-}
|
||||
SKIP_IDS_THRESHOLD=$8
|
||||
SKIP_IDS_PATTERN=$9
|
||||
INPUT_SKIP_IDS=${10}
|
||||
FILTER_DATASET_AFTER_SAMPLING=${11:-}
|
||||
|
||||
export EXP_NAME=$EXP_NAME
|
||||
# use 2x resources for rollout since some codebases are pretty resource-intensive
|
||||
@ -17,6 +23,7 @@ export DEFAULT_RUNTIME_RESOURCE_FACTOR=2
|
||||
echo "MODEL: $MODEL"
|
||||
echo "EXP_NAME: $EXP_NAME"
|
||||
echo "EVAL_DATASET: $EVAL_DATASET"
|
||||
echo "INPUT_SKIP_IDS: $INPUT_SKIP_IDS"
|
||||
# Generate DATASET path by adding _with_runtime_ before .jsonl extension
|
||||
DATASET="${EVAL_DATASET%.jsonl}_with_runtime_.jsonl" # path to converted dataset
|
||||
|
||||
@ -35,9 +42,6 @@ else
|
||||
export SANDBOX_REMOTE_RUNTIME_API_URL="https://runtime.eval.all-hands.dev"
|
||||
fi
|
||||
|
||||
#EVAL_LIMIT=3000
|
||||
MAX_ITER=100
|
||||
|
||||
|
||||
# ===== Run inference =====
|
||||
source "evaluation/utils/version_control.sh"
|
||||
@ -69,17 +73,52 @@ function run_eval() {
|
||||
--dataset $DATASET \
|
||||
--split $SPLIT"
|
||||
|
||||
# Conditionally add filter flag
|
||||
if [ "$FILTER_DATASET_AFTER_SAMPLING" = "true" ]; then
|
||||
COMMAND="$COMMAND --filter_dataset_after_sampling"
|
||||
fi
|
||||
|
||||
echo "Running command: $COMMAND"
|
||||
if [ -n "$EVAL_LIMIT" ]; then
|
||||
echo "EVAL_LIMIT: $EVAL_LIMIT"
|
||||
COMMAND="$COMMAND --eval-n-limit $EVAL_LIMIT"
|
||||
fi
|
||||
|
||||
# Run the command
|
||||
eval $COMMAND
|
||||
}
|
||||
|
||||
for run_idx in $(seq 1 $N_RUNS); do
|
||||
if [ -n "$SKIP_IDS_THRESHOLD" ]; then
|
||||
echo "Computing SKIP_IDS for run $run_idx..."
|
||||
SKIP_CMD="poetry run python evaluation/benchmarks/multi_swe_bench/compute_skip_ids.py $SKIP_IDS_THRESHOLD"
|
||||
if [ -n "$SKIP_IDS_PATTERN" ]; then
|
||||
SKIP_CMD="$SKIP_CMD --pattern \"$SKIP_IDS_PATTERN\""
|
||||
fi
|
||||
COMPUTED_SKIP_IDS=$(eval $SKIP_CMD)
|
||||
SKIP_STATUS=$?
|
||||
if [ $SKIP_STATUS -ne 0 ]; then
|
||||
echo "ERROR: Skip IDs computation failed with exit code $SKIP_STATUS"
|
||||
exit $SKIP_STATUS
|
||||
fi
|
||||
echo "COMPUTED_SKIP_IDS: $COMPUTED_SKIP_IDS"
|
||||
else
|
||||
echo "SKIP_IDS_THRESHOLD not provided, skipping SKIP_IDS computation"
|
||||
COMPUTED_SKIP_IDS=""
|
||||
fi
|
||||
|
||||
# Concatenate COMPUTED_SKIP_IDS and INPUT_SKIP_IDS
|
||||
if [ -n "$COMPUTED_SKIP_IDS" ] && [ -n "$INPUT_SKIP_IDS" ]; then
|
||||
export SKIP_IDS="${COMPUTED_SKIP_IDS},${INPUT_SKIP_IDS}"
|
||||
elif [ -n "$COMPUTED_SKIP_IDS" ]; then
|
||||
export SKIP_IDS="$COMPUTED_SKIP_IDS"
|
||||
elif [ -n "$INPUT_SKIP_IDS" ]; then
|
||||
export SKIP_IDS="$INPUT_SKIP_IDS"
|
||||
else
|
||||
unset SKIP_IDS
|
||||
fi
|
||||
|
||||
echo "FINAL SKIP_IDS: $SKIP_IDS"
|
||||
echo ""
|
||||
|
||||
while true; do
|
||||
echo "### Running inference... ###"
|
||||
|
||||
@ -259,6 +259,9 @@ def get_config(
|
||||
condenser=metadata.condenser_config,
|
||||
enable_prompt_extensions=False,
|
||||
model_routing=model_routing_config,
|
||||
system_prompt_filename=metadata.agent_config.system_prompt_filename
|
||||
if metadata.agent_config
|
||||
else 'system_prompt.j2',
|
||||
)
|
||||
config.set_agent_config(agent_config)
|
||||
|
||||
|
||||
@ -9,7 +9,7 @@ import time
|
||||
import traceback
|
||||
from contextlib import contextmanager
|
||||
from inspect import signature
|
||||
from typing import Any, Awaitable, Callable, TextIO
|
||||
from typing import Any, Awaitable, Callable, Optional, TextIO
|
||||
|
||||
import pandas as pd
|
||||
from pydantic import BaseModel
|
||||
@ -222,6 +222,7 @@ def prepare_dataset(
|
||||
eval_n_limit: int,
|
||||
eval_ids: list[str] | None = None,
|
||||
skip_num: int | None = None,
|
||||
filter_func: Optional[Callable[[pd.DataFrame], pd.DataFrame]] = None,
|
||||
):
|
||||
assert 'instance_id' in dataset.columns, (
|
||||
"Expected 'instance_id' column in the dataset. You should define your own unique identifier for each instance and use it as the 'instance_id' column."
|
||||
@ -265,6 +266,12 @@ def prepare_dataset(
|
||||
f'Randomly sampling {eval_n_limit} unique instances with random seed 42.'
|
||||
)
|
||||
|
||||
if filter_func is not None:
|
||||
dataset = filter_func(dataset)
|
||||
logger.info(
|
||||
f'Applied filter after sampling: {len(dataset)} instances remaining'
|
||||
)
|
||||
|
||||
def make_serializable(instance_dict: dict) -> dict:
|
||||
import numpy as np
|
||||
|
||||
|
||||
@ -188,172 +188,4 @@ describe("PaymentForm", () => {
|
||||
expect(mockMutate).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
describe("Cancel Subscription", () => {
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
const cancelSubscriptionSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"cancelSubscription",
|
||||
);
|
||||
|
||||
beforeEach(() => {
|
||||
// Mock active subscription
|
||||
getSubscriptionAccessSpy.mockResolvedValue({
|
||||
start_at: "2024-01-01T00:00:00Z",
|
||||
end_at: "2024-12-31T23:59:59Z",
|
||||
created_at: "2024-01-01T00:00:00Z",
|
||||
});
|
||||
});
|
||||
|
||||
it("should render cancel subscription button when user has active subscription", async () => {
|
||||
renderPaymentForm();
|
||||
|
||||
await waitFor(() => {
|
||||
const cancelButton = screen.getByTestId("cancel-subscription-button");
|
||||
expect(cancelButton).toBeInTheDocument();
|
||||
expect(cancelButton).toHaveTextContent("PAYMENT$CANCEL_SUBSCRIPTION");
|
||||
});
|
||||
});
|
||||
|
||||
it("should not render cancel subscription button when user has no subscription", async () => {
|
||||
getSubscriptionAccessSpy.mockResolvedValue(null);
|
||||
renderPaymentForm();
|
||||
|
||||
await waitFor(() => {
|
||||
const cancelButton = screen.queryByTestId("cancel-subscription-button");
|
||||
expect(cancelButton).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
it("should show confirmation modal when cancel subscription button is clicked", async () => {
|
||||
const user = userEvent.setup();
|
||||
renderPaymentForm();
|
||||
|
||||
const cancelButton = await screen.findByTestId(
|
||||
"cancel-subscription-button",
|
||||
);
|
||||
await user.click(cancelButton);
|
||||
|
||||
// Should show confirmation modal
|
||||
expect(
|
||||
screen.getByTestId("cancel-subscription-modal"),
|
||||
).toBeInTheDocument();
|
||||
expect(
|
||||
screen.getByText("PAYMENT$CANCEL_SUBSCRIPTION_TITLE"),
|
||||
).toBeInTheDocument();
|
||||
// The message should be rendered (either with Trans component or regular text)
|
||||
const modalContent = screen.getByTestId("cancel-subscription-modal");
|
||||
expect(modalContent).toBeInTheDocument();
|
||||
expect(screen.getByTestId("confirm-cancel-button")).toBeInTheDocument();
|
||||
expect(screen.getByTestId("modal-cancel-button")).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("should close modal when cancel button in modal is clicked", async () => {
|
||||
const user = userEvent.setup();
|
||||
renderPaymentForm();
|
||||
|
||||
const cancelButton = await screen.findByTestId(
|
||||
"cancel-subscription-button",
|
||||
);
|
||||
await user.click(cancelButton);
|
||||
|
||||
// Modal should be visible
|
||||
expect(
|
||||
screen.getByTestId("cancel-subscription-modal"),
|
||||
).toBeInTheDocument();
|
||||
|
||||
// Click cancel in modal
|
||||
const modalCancelButton = screen.getByTestId("modal-cancel-button");
|
||||
await user.click(modalCancelButton);
|
||||
|
||||
// Modal should be closed
|
||||
expect(
|
||||
screen.queryByTestId("cancel-subscription-modal"),
|
||||
).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("should call cancel subscription API when confirm button is clicked", async () => {
|
||||
const user = userEvent.setup();
|
||||
renderPaymentForm();
|
||||
|
||||
const cancelButton = await screen.findByTestId(
|
||||
"cancel-subscription-button",
|
||||
);
|
||||
await user.click(cancelButton);
|
||||
|
||||
// Click confirm in modal
|
||||
const confirmButton = screen.getByTestId("confirm-cancel-button");
|
||||
await user.click(confirmButton);
|
||||
|
||||
// Should call the cancel subscription API
|
||||
expect(cancelSubscriptionSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should close modal after successful cancellation", async () => {
|
||||
const user = userEvent.setup();
|
||||
cancelSubscriptionSpy.mockResolvedValue({
|
||||
status: "success",
|
||||
message: "Subscription cancelled successfully",
|
||||
});
|
||||
renderPaymentForm();
|
||||
|
||||
const cancelButton = await screen.findByTestId(
|
||||
"cancel-subscription-button",
|
||||
);
|
||||
await user.click(cancelButton);
|
||||
|
||||
const confirmButton = screen.getByTestId("confirm-cancel-button");
|
||||
await user.click(confirmButton);
|
||||
|
||||
// Wait for API call to complete and modal to close
|
||||
await waitFor(() => {
|
||||
expect(
|
||||
screen.queryByTestId("cancel-subscription-modal"),
|
||||
).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
|
||||
it("should show next billing date for active subscription", async () => {
|
||||
// Mock active subscription with end_at as next billing date
|
||||
getSubscriptionAccessSpy.mockResolvedValue({
|
||||
start_at: "2024-01-01T00:00:00Z",
|
||||
end_at: "2025-01-01T00:00:00Z",
|
||||
created_at: "2024-01-01T00:00:00Z",
|
||||
cancelled_at: null,
|
||||
stripe_subscription_id: "sub_123",
|
||||
});
|
||||
|
||||
renderPaymentForm();
|
||||
|
||||
await waitFor(() => {
|
||||
const nextBillingInfo = screen.getByTestId("next-billing-date");
|
||||
expect(nextBillingInfo).toBeInTheDocument();
|
||||
// Check that it contains some date-related content (translation key or actual date)
|
||||
expect(nextBillingInfo).toHaveTextContent(
|
||||
/2025|PAYMENT.*BILLING.*DATE/,
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("should not show next billing date when subscription is cancelled", async () => {
|
||||
// Mock cancelled subscription
|
||||
getSubscriptionAccessSpy.mockResolvedValue({
|
||||
start_at: "2024-01-01T00:00:00Z",
|
||||
end_at: "2025-01-01T00:00:00Z",
|
||||
created_at: "2024-01-01T00:00:00Z",
|
||||
cancelled_at: "2024-06-15T10:30:00Z",
|
||||
stripe_subscription_id: "sub_123",
|
||||
});
|
||||
|
||||
renderPaymentForm();
|
||||
|
||||
await waitFor(() => {
|
||||
const nextBillingInfo = screen.queryByTestId("next-billing-date");
|
||||
expect(nextBillingInfo).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -1,47 +0,0 @@
|
||||
import { render, screen } from "@testing-library/react";
|
||||
import { JupyterEditor } from "#/components/features/jupyter/jupyter";
|
||||
import { vi, describe, it, expect, beforeEach } from "vitest";
|
||||
import { AgentState } from "#/types/agent-state";
|
||||
import { useAgentState } from "#/hooks/use-agent-state";
|
||||
import { useJupyterStore } from "#/state/jupyter-store";
|
||||
|
||||
// Mock the agent state hook
|
||||
vi.mock("#/hooks/use-agent-state", () => ({
|
||||
useAgentState: vi.fn(),
|
||||
}));
|
||||
|
||||
// Mock react-i18next
|
||||
vi.mock("react-i18next", () => ({
|
||||
useTranslation: () => ({
|
||||
t: (key: string) => key,
|
||||
}),
|
||||
}));
|
||||
|
||||
describe("JupyterEditor", () => {
|
||||
beforeEach(() => {
|
||||
// Reset the Zustand store before each test
|
||||
useJupyterStore.setState({
|
||||
cells: Array(20).fill({
|
||||
content: "Test cell content",
|
||||
type: "input",
|
||||
imageUrls: undefined,
|
||||
}),
|
||||
});
|
||||
});
|
||||
|
||||
it("should have a scrollable container", () => {
|
||||
// Mock agent state to return RUNNING state (not in RUNTIME_INACTIVE_STATES)
|
||||
vi.mocked(useAgentState).mockReturnValue({
|
||||
curAgentState: AgentState.RUNNING,
|
||||
});
|
||||
|
||||
render(
|
||||
<div style={{ height: "100vh" }}>
|
||||
<JupyterEditor maxWidth={800} />
|
||||
</div>,
|
||||
);
|
||||
|
||||
const container = screen.getByTestId("jupyter-container");
|
||||
expect(container).toHaveClass("flex-1 overflow-y-auto");
|
||||
});
|
||||
});
|
||||
@ -11,6 +11,7 @@ const renderTerminal = (commands: Command[] = []) => {
|
||||
};
|
||||
|
||||
describe.skip("Terminal", () => {
|
||||
// Terminal is now read-only - no user input functionality
|
||||
global.ResizeObserver = vi.fn().mockImplementation(() => ({
|
||||
observe: vi.fn(),
|
||||
disconnect: vi.fn(),
|
||||
@ -21,8 +22,6 @@ describe.skip("Terminal", () => {
|
||||
write: vi.fn(),
|
||||
writeln: vi.fn(),
|
||||
dispose: vi.fn(),
|
||||
onKey: vi.fn(),
|
||||
attachCustomKeyEventHandler: vi.fn(),
|
||||
loadAddon: vi.fn(),
|
||||
};
|
||||
|
||||
|
||||
@ -1,6 +1,7 @@
|
||||
import { describe, it, expect, beforeAll, afterAll, afterEach } from "vitest";
|
||||
import { screen, waitFor, render, cleanup } from "@testing-library/react";
|
||||
import { QueryClient, QueryClientProvider } from "@tanstack/react-query";
|
||||
import { http, HttpResponse } from "msw";
|
||||
import { useOptimisticUserMessageStore } from "#/stores/optimistic-user-message-store";
|
||||
import {
|
||||
createMockMessageEvent,
|
||||
@ -13,8 +14,12 @@ import {
|
||||
OptimisticUserMessageStoreComponent,
|
||||
ErrorMessageStoreComponent,
|
||||
} from "./helpers/websocket-test-components";
|
||||
import { ConversationWebSocketProvider } from "#/contexts/conversation-websocket-context";
|
||||
import {
|
||||
ConversationWebSocketProvider,
|
||||
useConversationWebSocket,
|
||||
} from "#/contexts/conversation-websocket-context";
|
||||
import { conversationWebSocketTestSetup } from "./helpers/msw-websocket-setup";
|
||||
import { useEventStore } from "#/stores/use-event-store";
|
||||
|
||||
// MSW WebSocket mock setup
|
||||
const { wsLink, server: mswServer } = conversationWebSocketTestSetup();
|
||||
@ -417,7 +422,206 @@ describe("Conversation WebSocket Handler", () => {
|
||||
it.todo("should handle send attempts when disconnected");
|
||||
});
|
||||
|
||||
// 8. Terminal I/O Tests (ExecuteBashAction and ExecuteBashObservation)
|
||||
// 8. History Loading State Tests
|
||||
describe("History Loading State", () => {
|
||||
it("should track history loading state using event count from API", async () => {
|
||||
const conversationId = "test-conversation-with-history";
|
||||
|
||||
// Mock the event count API to return 3 events
|
||||
const expectedEventCount = 3;
|
||||
|
||||
// Create 3 mock events to simulate history
|
||||
const mockHistoryEvents = [
|
||||
createMockUserMessageEvent({ id: "history-event-1" }),
|
||||
createMockMessageEvent({ id: "history-event-2" }),
|
||||
createMockMessageEvent({ id: "history-event-3" }),
|
||||
];
|
||||
|
||||
// Set up MSW to mock both the HTTP API and WebSocket connection
|
||||
mswServer.use(
|
||||
http.get("/api/v1/events/count", ({ request }) => {
|
||||
const url = new URL(request.url);
|
||||
const conversationIdParam = url.searchParams.get(
|
||||
"conversation_id__eq",
|
||||
);
|
||||
|
||||
if (conversationIdParam === conversationId) {
|
||||
return HttpResponse.json(expectedEventCount);
|
||||
}
|
||||
|
||||
return HttpResponse.json(0);
|
||||
}),
|
||||
wsLink.addEventListener("connection", ({ client, server }) => {
|
||||
server.connect();
|
||||
// Send all history events
|
||||
mockHistoryEvents.forEach((event) => {
|
||||
client.send(JSON.stringify(event));
|
||||
});
|
||||
}),
|
||||
);
|
||||
|
||||
// Create a test component that displays loading state
|
||||
const HistoryLoadingComponent = () => {
|
||||
const context = useConversationWebSocket();
|
||||
const { events } = useEventStore();
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div data-testid="is-loading-history">
|
||||
{context?.isLoadingHistory ? "true" : "false"}
|
||||
</div>
|
||||
<div data-testid="events-received">{events.length}</div>
|
||||
<div data-testid="expected-event-count">{expectedEventCount}</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// Render with WebSocket context
|
||||
renderWithWebSocketContext(
|
||||
<HistoryLoadingComponent />,
|
||||
conversationId,
|
||||
`http://localhost:3000/api/conversations/${conversationId}`,
|
||||
);
|
||||
|
||||
// Initially should be loading history
|
||||
expect(screen.getByTestId("is-loading-history")).toHaveTextContent("true");
|
||||
|
||||
// Wait for all events to be received
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("events-received")).toHaveTextContent("3");
|
||||
});
|
||||
|
||||
// Once all events are received, loading should be complete
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("is-loading-history")).toHaveTextContent(
|
||||
"false",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle empty conversation history", async () => {
|
||||
const conversationId = "test-conversation-empty";
|
||||
|
||||
// Set up MSW to mock both the HTTP API and WebSocket connection
|
||||
mswServer.use(
|
||||
http.get("/api/v1/events/count", ({ request }) => {
|
||||
const url = new URL(request.url);
|
||||
const conversationIdParam = url.searchParams.get(
|
||||
"conversation_id__eq",
|
||||
);
|
||||
|
||||
if (conversationIdParam === conversationId) {
|
||||
return HttpResponse.json(0);
|
||||
}
|
||||
|
||||
return HttpResponse.json(0);
|
||||
}),
|
||||
wsLink.addEventListener("connection", ({ server }) => {
|
||||
server.connect();
|
||||
// No events sent for empty history
|
||||
}),
|
||||
);
|
||||
|
||||
// Create a test component that displays loading state
|
||||
const HistoryLoadingComponent = () => {
|
||||
const context = useConversationWebSocket();
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div data-testid="is-loading-history">
|
||||
{context?.isLoadingHistory ? "true" : "false"}
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// Render with WebSocket context
|
||||
renderWithWebSocketContext(
|
||||
<HistoryLoadingComponent />,
|
||||
conversationId,
|
||||
`http://localhost:3000/api/conversations/${conversationId}`,
|
||||
);
|
||||
|
||||
// Should quickly transition from loading to not loading when count is 0
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("is-loading-history")).toHaveTextContent(
|
||||
"false",
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it("should handle history loading with large event count", async () => {
|
||||
const conversationId = "test-conversation-large-history";
|
||||
|
||||
// Create 50 mock events to simulate large history
|
||||
const expectedEventCount = 50;
|
||||
const mockHistoryEvents = Array.from({ length: 50 }, (_, i) =>
|
||||
createMockMessageEvent({ id: `history-event-${i + 1}` }),
|
||||
);
|
||||
|
||||
// Set up MSW to mock both the HTTP API and WebSocket connection
|
||||
mswServer.use(
|
||||
http.get("/api/v1/events/count", ({ request }) => {
|
||||
const url = new URL(request.url);
|
||||
const conversationIdParam = url.searchParams.get(
|
||||
"conversation_id__eq",
|
||||
);
|
||||
|
||||
if (conversationIdParam === conversationId) {
|
||||
return HttpResponse.json(expectedEventCount);
|
||||
}
|
||||
|
||||
return HttpResponse.json(0);
|
||||
}),
|
||||
wsLink.addEventListener("connection", ({ client, server }) => {
|
||||
server.connect();
|
||||
// Send all history events
|
||||
mockHistoryEvents.forEach((event) => {
|
||||
client.send(JSON.stringify(event));
|
||||
});
|
||||
}),
|
||||
);
|
||||
|
||||
// Create a test component that displays loading state
|
||||
const HistoryLoadingComponent = () => {
|
||||
const context = useConversationWebSocket();
|
||||
const { events } = useEventStore();
|
||||
|
||||
return (
|
||||
<div>
|
||||
<div data-testid="is-loading-history">
|
||||
{context?.isLoadingHistory ? "true" : "false"}
|
||||
</div>
|
||||
<div data-testid="events-received">{events.length}</div>
|
||||
</div>
|
||||
);
|
||||
};
|
||||
|
||||
// Render with WebSocket context
|
||||
renderWithWebSocketContext(
|
||||
<HistoryLoadingComponent />,
|
||||
conversationId,
|
||||
`http://localhost:3000/api/conversations/${conversationId}`,
|
||||
);
|
||||
|
||||
// Initially should be loading history
|
||||
expect(screen.getByTestId("is-loading-history")).toHaveTextContent("true");
|
||||
|
||||
// Wait for all events to be received
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("events-received")).toHaveTextContent("50");
|
||||
});
|
||||
|
||||
// Once all events are received, loading should be complete
|
||||
await waitFor(() => {
|
||||
expect(screen.getByTestId("is-loading-history")).toHaveTextContent(
|
||||
"false",
|
||||
);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
// 9. Terminal I/O Tests (ExecuteBashAction and ExecuteBashObservation)
|
||||
describe("Terminal I/O Integration", () => {
|
||||
it("should append command to store when ExecuteBashAction event is received", async () => {
|
||||
const { createMockExecuteBashActionEvent } = await import(
|
||||
|
||||
@ -38,8 +38,7 @@ export const createWebSocketTestSetup = (
|
||||
/**
|
||||
* Standard WebSocket test setup for conversation WebSocket handler tests
|
||||
* Updated to use the V1 WebSocket URL pattern: /sockets/events/{conversationId}
|
||||
* Uses a wildcard pattern to match any conversation ID
|
||||
*/
|
||||
export const conversationWebSocketTestSetup = () =>
|
||||
createWebSocketTestSetup(
|
||||
"ws://localhost:3000/sockets/events/test-conversation-default",
|
||||
);
|
||||
createWebSocketTestSetup("ws://localhost:3000/sockets/events/*");
|
||||
|
||||
@ -35,13 +35,12 @@ function TestTerminalComponent() {
|
||||
}
|
||||
|
||||
describe("useTerminal", () => {
|
||||
// Terminal is read-only - no longer tests user input functionality
|
||||
const mockTerminal = vi.hoisted(() => ({
|
||||
loadAddon: vi.fn(),
|
||||
open: vi.fn(),
|
||||
write: vi.fn(),
|
||||
writeln: vi.fn(),
|
||||
onKey: vi.fn(),
|
||||
attachCustomKeyEventHandler: vi.fn(),
|
||||
dispose: vi.fn(),
|
||||
}));
|
||||
|
||||
|
||||
@ -4,14 +4,12 @@ import { beforeEach, describe, expect, it, vi } from "vitest";
|
||||
import { QueryClientProvider, QueryClient } from "@tanstack/react-query";
|
||||
import LlmSettingsScreen from "#/routes/llm-settings";
|
||||
import SettingsService from "#/settings-service/settings-service.api";
|
||||
import OptionService from "#/api/option-service/option-service.api";
|
||||
import {
|
||||
MOCK_DEFAULT_USER_SETTINGS,
|
||||
resetTestHandlersMockSettings,
|
||||
} from "#/mocks/handlers";
|
||||
import * as AdvancedSettingsUtlls from "#/utils/has-advanced-settings-set";
|
||||
import * as ToastHandlers from "#/utils/custom-toast-handlers";
|
||||
import BillingService from "#/api/billing-service/billing-service.api";
|
||||
|
||||
// Mock react-router hooks
|
||||
const mockUseSearchParams = vi.fn();
|
||||
@ -25,12 +23,6 @@ vi.mock("#/hooks/query/use-is-authed", () => ({
|
||||
useIsAuthed: () => mockUseIsAuthed(),
|
||||
}));
|
||||
|
||||
// Mock useIsAllHandsSaaSEnvironment hook
|
||||
const mockUseIsAllHandsSaaSEnvironment = vi.fn();
|
||||
vi.mock("#/hooks/use-is-all-hands-saas-environment", () => ({
|
||||
useIsAllHandsSaaSEnvironment: () => mockUseIsAllHandsSaaSEnvironment(),
|
||||
}));
|
||||
|
||||
const renderLlmSettingsScreen = () =>
|
||||
render(<LlmSettingsScreen />, {
|
||||
wrapper: ({ children }) => (
|
||||
@ -54,9 +46,6 @@ beforeEach(() => {
|
||||
|
||||
// Default mock for useIsAuthed - returns authenticated by default
|
||||
mockUseIsAuthed.mockReturnValue({ data: true, isLoading: false });
|
||||
|
||||
// Default mock for useIsAllHandsSaaSEnvironment - returns true for SaaS environment
|
||||
mockUseIsAllHandsSaaSEnvironment.mockReturnValue(true);
|
||||
});
|
||||
|
||||
describe("Content", () => {
|
||||
@ -605,9 +594,14 @@ describe("Form submission", () => {
|
||||
renderLlmSettingsScreen();
|
||||
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
// Component automatically shows advanced view when advanced settings exist
|
||||
// Switch to basic view to test clearing advanced settings
|
||||
const advancedSwitch = screen.getByTestId("advanced-settings-switch");
|
||||
await userEvent.click(advancedSwitch);
|
||||
|
||||
// Now we should be in basic view
|
||||
await screen.findByTestId("llm-settings-form-basic");
|
||||
|
||||
const provider = screen.getByTestId("llm-provider-input");
|
||||
const model = screen.getByTestId("llm-model-input");
|
||||
|
||||
@ -731,405 +725,3 @@ describe("Status toasts", () => {
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
describe("SaaS mode", () => {
|
||||
describe("SaaS subscription", () => {
|
||||
// Common mock configurations
|
||||
const MOCK_SAAS_CONFIG = {
|
||||
APP_MODE: "saas" as const,
|
||||
GITHUB_CLIENT_ID: "fake-github-client-id",
|
||||
POSTHOG_CLIENT_KEY: "fake-posthog-client-key",
|
||||
FEATURE_FLAGS: {
|
||||
ENABLE_BILLING: true,
|
||||
HIDE_LLM_SETTINGS: false,
|
||||
ENABLE_JIRA: false,
|
||||
ENABLE_JIRA_DC: false,
|
||||
ENABLE_LINEAR: false,
|
||||
},
|
||||
};
|
||||
|
||||
const MOCK_ACTIVE_SUBSCRIPTION = {
|
||||
start_at: "2024-01-01",
|
||||
end_at: "2024-12-31",
|
||||
created_at: "2024-01-01",
|
||||
};
|
||||
|
||||
it("should show upgrade banner and prevent all interactions for unsubscribed SaaS users", async () => {
|
||||
// Mock SaaS mode without subscription
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access to return null (no subscription)
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(null);
|
||||
|
||||
// Mock saveSettings to ensure it's not called
|
||||
const saveSettingsSpy = vi.spyOn(SettingsService, "saveSettings");
|
||||
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Should show upgrade banner
|
||||
expect(screen.getByTestId("upgrade-banner")).toBeInTheDocument();
|
||||
|
||||
// Should have a clickable upgrade button
|
||||
const upgradeButton = screen.getByRole("button", { name: /upgrade/i });
|
||||
expect(upgradeButton).toBeInTheDocument();
|
||||
expect(upgradeButton).not.toBeDisabled();
|
||||
|
||||
// Form should be disabled
|
||||
const form = screen.getByTestId("llm-settings-form-basic");
|
||||
expect(form).toHaveAttribute("aria-disabled", "true");
|
||||
|
||||
// All form inputs should be disabled or non-interactive
|
||||
const providerInput = screen.getByTestId("llm-provider-input");
|
||||
const modelInput = screen.getByTestId("llm-model-input");
|
||||
const apiKeyInput = screen.getByTestId("llm-api-key-input");
|
||||
const advancedSwitch = screen.getByTestId("advanced-settings-switch");
|
||||
const submitButton = screen.getByTestId("submit-button");
|
||||
|
||||
// Inputs should be disabled
|
||||
expect(providerInput).toBeDisabled();
|
||||
expect(modelInput).toBeDisabled();
|
||||
expect(apiKeyInput).toBeDisabled();
|
||||
expect(advancedSwitch).toBeDisabled();
|
||||
expect(submitButton).toBeDisabled();
|
||||
|
||||
// Confirmation mode switch is in advanced view, so it's not visible in basic view
|
||||
expect(
|
||||
screen.queryByTestId("enable-confirmation-mode-switch"),
|
||||
).not.toBeInTheDocument();
|
||||
|
||||
// Try to interact with inputs - they should not respond
|
||||
await userEvent.click(providerInput);
|
||||
await userEvent.type(apiKeyInput, "test-key");
|
||||
|
||||
// Values should not change
|
||||
expect(apiKeyInput).toHaveValue("");
|
||||
|
||||
// Try to submit form - should not call API
|
||||
await userEvent.click(submitButton);
|
||||
expect(saveSettingsSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should call subscription checkout API when upgrade button is clicked", async () => {
|
||||
// Mock SaaS mode without subscription
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access to return null (no subscription)
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(null);
|
||||
|
||||
// Mock the subscription checkout API call
|
||||
const createSubscriptionCheckoutSessionSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"createSubscriptionCheckoutSession",
|
||||
);
|
||||
createSubscriptionCheckoutSessionSpy.mockResolvedValue({});
|
||||
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Click the upgrade button
|
||||
const upgradeButton = screen.getByRole("button", { name: /upgrade/i });
|
||||
await userEvent.click(upgradeButton);
|
||||
|
||||
// Should call the subscription checkout API
|
||||
expect(createSubscriptionCheckoutSessionSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should disable upgrade button for unauthenticated users in SaaS mode", async () => {
|
||||
// Mock SaaS mode without subscription
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access to return null (no subscription)
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(null);
|
||||
|
||||
// Mock subscription checkout API
|
||||
const createSubscriptionCheckoutSessionSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"createSubscriptionCheckoutSession",
|
||||
);
|
||||
|
||||
// Mock authentication to return false (unauthenticated) from the start
|
||||
mockUseIsAuthed.mockReturnValue({ data: false, isLoading: false });
|
||||
|
||||
// Mock settings to return default settings even when unauthenticated
|
||||
// This is necessary because the useSettings hook is disabled when user is not authenticated
|
||||
const getSettingsSpy = vi.spyOn(SettingsService, "getSettings");
|
||||
getSettingsSpy.mockResolvedValue(MOCK_DEFAULT_USER_SETTINGS);
|
||||
|
||||
renderLlmSettingsScreen();
|
||||
|
||||
// Wait for either the settings screen or skeleton to appear
|
||||
await waitFor(() => {
|
||||
const settingsScreen = screen.queryByTestId("llm-settings-screen");
|
||||
const skeleton = screen.queryByTestId("app-settings-skeleton");
|
||||
expect(settingsScreen || skeleton).toBeInTheDocument();
|
||||
});
|
||||
|
||||
// If we get the skeleton, the test scenario isn't valid - skip the rest
|
||||
if (screen.queryByTestId("app-settings-skeleton")) {
|
||||
// For unauthenticated users, the settings don't load, so no upgrade banner is shown
|
||||
// This is the expected behavior - unauthenticated users see a skeleton loading state
|
||||
expect(screen.queryByTestId("upgrade-banner")).not.toBeInTheDocument();
|
||||
return;
|
||||
}
|
||||
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Should show upgrade banner
|
||||
expect(screen.getByTestId("upgrade-banner")).toBeInTheDocument();
|
||||
|
||||
// Upgrade button should be disabled for unauthenticated users
|
||||
const upgradeButton = screen.getByRole("button", { name: /upgrade/i });
|
||||
expect(upgradeButton).toBeInTheDocument();
|
||||
expect(upgradeButton).toBeDisabled();
|
||||
|
||||
// Clicking disabled button should not call the API
|
||||
await userEvent.click(upgradeButton);
|
||||
expect(createSubscriptionCheckoutSessionSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should not show upgrade banner and allow form interaction for subscribed SaaS users", async () => {
|
||||
// Mock SaaS mode with subscription
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access to return active subscription
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(MOCK_ACTIVE_SUBSCRIPTION);
|
||||
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Wait for subscription data to load
|
||||
await waitFor(() => {
|
||||
expect(getSubscriptionAccessSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Should NOT show upgrade banner
|
||||
expect(screen.queryByTestId("upgrade-banner")).not.toBeInTheDocument();
|
||||
|
||||
// Form should NOT be disabled
|
||||
const form = screen.getByTestId("llm-settings-form-basic");
|
||||
expect(form).not.toHaveAttribute("aria-disabled", "true");
|
||||
});
|
||||
|
||||
it("should not call save settings API when making changes in disabled form for unsubscribed users", async () => {
|
||||
// Mock SaaS mode without subscription
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access to return null (no subscription)
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(null);
|
||||
|
||||
// Mock saveSettings to track calls
|
||||
const saveSettingsSpy = vi.spyOn(SettingsService, "saveSettings");
|
||||
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Verify that basic form elements are disabled for unsubscribed users
|
||||
const advancedSwitch = screen.getByTestId("advanced-settings-switch");
|
||||
const submitButton = screen.getByTestId("submit-button");
|
||||
|
||||
expect(advancedSwitch).toBeDisabled();
|
||||
expect(submitButton).toBeDisabled();
|
||||
|
||||
// Confirmation mode switch is in advanced view, which can't be accessed when form is disabled
|
||||
expect(
|
||||
screen.queryByTestId("enable-confirmation-mode-switch"),
|
||||
).not.toBeInTheDocument();
|
||||
|
||||
// Try to submit the form - button should remain disabled
|
||||
await userEvent.click(submitButton);
|
||||
|
||||
// Should NOT call save settings API for unsubscribed users
|
||||
expect(saveSettingsSpy).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should show backdrop overlay for unsubscribed users", async () => {
|
||||
// Mock SaaS mode without subscription
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access to return null (no subscription)
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(null);
|
||||
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Wait for subscription data to load
|
||||
await waitFor(() => {
|
||||
expect(getSubscriptionAccessSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Should show upgrade banner
|
||||
expect(screen.getByTestId("upgrade-banner")).toBeInTheDocument();
|
||||
|
||||
// Should show backdrop overlay
|
||||
const backdrop = screen.getByTestId("settings-backdrop");
|
||||
expect(backdrop).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("should not show backdrop overlay for subscribed users", async () => {
|
||||
// Mock SaaS mode with subscription
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access to return active subscription
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(MOCK_ACTIVE_SUBSCRIPTION);
|
||||
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Wait for subscription data to load
|
||||
await waitFor(() => {
|
||||
expect(getSubscriptionAccessSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Should NOT show backdrop overlay
|
||||
expect(screen.queryByTestId("settings-backdrop")).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("should display success toast when redirected back with ?checkout=success parameter", async () => {
|
||||
// Mock SaaS mode
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(MOCK_ACTIVE_SUBSCRIPTION);
|
||||
|
||||
// Mock toast handler
|
||||
const displaySuccessToastSpy = vi.spyOn(
|
||||
ToastHandlers,
|
||||
"displaySuccessToast",
|
||||
);
|
||||
|
||||
// Mock URL search params with ?checkout=success
|
||||
mockUseSearchParams.mockReturnValue([
|
||||
{
|
||||
get: (param: string) => (param === "checkout" ? "success" : null),
|
||||
},
|
||||
vi.fn(),
|
||||
]);
|
||||
|
||||
// Render component with checkout=success parameter
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Verify success toast is displayed with correct message
|
||||
expect(displaySuccessToastSpy).toHaveBeenCalledWith(
|
||||
"SUBSCRIPTION$SUCCESS",
|
||||
);
|
||||
});
|
||||
|
||||
it("should display error toast when redirected back with ?checkout=cancel parameter", async () => {
|
||||
// Mock SaaS mode
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(MOCK_ACTIVE_SUBSCRIPTION);
|
||||
|
||||
// Mock toast handler
|
||||
const displayErrorToastSpy = vi.spyOn(ToastHandlers, "displayErrorToast");
|
||||
|
||||
// Mock URL search params with ?checkout=cancel
|
||||
mockUseSearchParams.mockReturnValue([
|
||||
{
|
||||
get: (param: string) => (param === "checkout" ? "cancel" : null),
|
||||
},
|
||||
vi.fn(),
|
||||
]);
|
||||
|
||||
// Render component with checkout=cancel parameter
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Verify error toast is displayed with correct message
|
||||
expect(displayErrorToastSpy).toHaveBeenCalledWith("SUBSCRIPTION$FAILURE");
|
||||
});
|
||||
|
||||
it("should show upgrade banner when subscription is expired or disabled", async () => {
|
||||
// Mock SaaS mode
|
||||
const getConfigSpy = vi.spyOn(OptionService, "getConfig");
|
||||
getConfigSpy.mockResolvedValue(MOCK_SAAS_CONFIG);
|
||||
|
||||
// Mock subscription access to return null (expired/disabled subscriptions return null from backend)
|
||||
// The backend only returns active subscriptions within their validity period
|
||||
const getSubscriptionAccessSpy = vi.spyOn(
|
||||
BillingService,
|
||||
"getSubscriptionAccess",
|
||||
);
|
||||
getSubscriptionAccessSpy.mockResolvedValue(null);
|
||||
|
||||
renderLlmSettingsScreen();
|
||||
await screen.findByTestId("llm-settings-screen");
|
||||
|
||||
// Wait for subscription data to load
|
||||
await waitFor(() => {
|
||||
expect(getSubscriptionAccessSpy).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
// Should show upgrade banner for expired/disabled subscriptions (when API returns null)
|
||||
expect(screen.getByTestId("upgrade-banner")).toBeInTheDocument();
|
||||
|
||||
// Form should be disabled
|
||||
const form = screen.getByTestId("llm-settings-form-basic");
|
||||
expect(form).toHaveAttribute("aria-disabled", "true");
|
||||
|
||||
// All form inputs should be disabled
|
||||
const providerInput = screen.getByTestId("llm-provider-input");
|
||||
const modelInput = screen.getByTestId("llm-model-input");
|
||||
const apiKeyInput = screen.getByTestId("llm-api-key-input");
|
||||
const advancedSwitch = screen.getByTestId("advanced-settings-switch");
|
||||
|
||||
expect(providerInput).toBeDisabled();
|
||||
expect(modelInput).toBeDisabled();
|
||||
expect(apiKeyInput).toBeDisabled();
|
||||
expect(advancedSwitch).toBeDisabled();
|
||||
|
||||
// Confirmation mode switch is in advanced view, which can't be accessed when form is disabled
|
||||
expect(
|
||||
screen.queryByTestId("enable-confirmation-mode-switch"),
|
||||
).not.toBeInTheDocument();
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@ -5,7 +5,6 @@ import { ActionMessage } from "#/types/message";
|
||||
// Mock the store and actions
|
||||
const mockDispatch = vi.fn();
|
||||
const mockAppendInput = vi.fn();
|
||||
const mockAppendJupyterInput = vi.fn();
|
||||
|
||||
vi.mock("#/store", () => ({
|
||||
default: {
|
||||
@ -21,14 +20,6 @@ vi.mock("#/state/command-store", () => ({
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("#/state/jupyter-store", () => ({
|
||||
useJupyterStore: {
|
||||
getState: () => ({
|
||||
appendJupyterInput: mockAppendJupyterInput,
|
||||
}),
|
||||
},
|
||||
}));
|
||||
|
||||
vi.mock("#/state/metrics-slice", () => ({
|
||||
setMetrics: vi.fn(),
|
||||
}));
|
||||
@ -63,10 +54,9 @@ describe("handleActionMessage", () => {
|
||||
// Check that appendInput was called with the command
|
||||
expect(mockAppendInput).toHaveBeenCalledWith("ls -la");
|
||||
expect(mockDispatch).not.toHaveBeenCalled();
|
||||
expect(mockAppendJupyterInput).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it("should handle RUN_IPYTHON actions by adding input to Jupyter", async () => {
|
||||
it("should handle RUN_IPYTHON actions as no-op (Jupyter removed)", async () => {
|
||||
const { handleActionMessage } = await import("#/services/actions");
|
||||
|
||||
const ipythonAction: ActionMessage = {
|
||||
@ -84,10 +74,7 @@ describe("handleActionMessage", () => {
|
||||
// Handle the action
|
||||
handleActionMessage(ipythonAction);
|
||||
|
||||
// Check that appendJupyterInput was called with the code
|
||||
expect(mockAppendJupyterInput).toHaveBeenCalledWith(
|
||||
"print('Hello from Jupyter!')",
|
||||
);
|
||||
// Jupyter functionality has been removed, so nothing should be called
|
||||
expect(mockAppendInput).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
@ -112,6 +99,5 @@ describe("handleActionMessage", () => {
|
||||
// Check that nothing was dispatched or called
|
||||
expect(mockDispatch).not.toHaveBeenCalled();
|
||||
expect(mockAppendInput).not.toHaveBeenCalled();
|
||||
expect(mockAppendJupyterInput).not.toHaveBeenCalled();
|
||||
});
|
||||
});
|
||||
|
||||
4
frontend/package-lock.json
generated
4
frontend/package-lock.json
generated
@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "openhands-frontend",
|
||||
"version": "0.59.0",
|
||||
"version": "0.60.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "openhands-frontend",
|
||||
"version": "0.59.0",
|
||||
"version": "0.60.0",
|
||||
"dependencies": {
|
||||
"@heroui/react": "^2.8.4",
|
||||
"@heroui/use-infinite-scroll": "^2.2.11",
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "openhands-frontend",
|
||||
"version": "0.59.0",
|
||||
"version": "0.60.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"engines": {
|
||||
|
||||
@ -30,6 +30,9 @@ export default defineConfig({
|
||||
|
||||
/* Collect trace when retrying the failed test. See https://playwright.dev/docs/trace-viewer */
|
||||
trace: "on-first-retry",
|
||||
/* Ignore SSL errors for browser agent test */
|
||||
/* Solution inspired by StackOverflow post: https://stackoverflow.com/questions/67048422/ignore-ssl-errors-with-playwright-code-generation */
|
||||
ignoreHTTPSErrors: true,
|
||||
},
|
||||
|
||||
/* Configure projects for major browsers */
|
||||
|
||||
@ -187,7 +187,7 @@ class ConversationService {
|
||||
static async getRuntimeId(
|
||||
conversationId: string,
|
||||
): Promise<{ runtime_id: string }> {
|
||||
const url = `/api/conversations/${conversationId}/config`;
|
||||
const url = `${this.getConversationUrl(conversationId)}/config`;
|
||||
const { data } = await openHands.get<{ runtime_id: string }>(url, {
|
||||
headers: this.getConversationHeaders(),
|
||||
});
|
||||
|
||||
@ -3,6 +3,7 @@ import { openHands } from "../open-hands-axios";
|
||||
import { ConversationTrigger, GetVSCodeUrlResponse } from "../open-hands.types";
|
||||
import { Provider } from "#/types/settings";
|
||||
import { buildHttpBaseUrl } from "#/utils/websocket-url";
|
||||
import { buildSessionHeaders } from "#/utils/utils";
|
||||
import type {
|
||||
V1SendMessageRequest,
|
||||
V1SendMessageResponse,
|
||||
@ -10,24 +11,10 @@ import type {
|
||||
V1AppConversationStartTask,
|
||||
V1AppConversationStartTaskPage,
|
||||
V1AppConversation,
|
||||
V1SandboxInfo,
|
||||
} from "./v1-conversation-service.types";
|
||||
|
||||
class V1ConversationService {
|
||||
/**
|
||||
* Build headers for V1 API requests that require session authentication
|
||||
* @param sessionApiKey Session API key for authentication
|
||||
* @returns Headers object with X-Session-API-Key if provided
|
||||
*/
|
||||
private static buildSessionHeaders(
|
||||
sessionApiKey?: string | null,
|
||||
): Record<string, string> {
|
||||
const headers: Record<string, string> = {};
|
||||
if (sessionApiKey) {
|
||||
headers["X-Session-API-Key"] = sessionApiKey;
|
||||
}
|
||||
return headers;
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the full URL for V1 runtime-specific endpoints
|
||||
* @param conversationUrl The conversation URL (e.g., "http://localhost:54928/api/conversations/...")
|
||||
@ -160,7 +147,7 @@ class V1ConversationService {
|
||||
sessionApiKey?: string | null,
|
||||
): Promise<GetVSCodeUrlResponse> {
|
||||
const url = this.buildRuntimeUrl(conversationUrl, "/api/vscode/url");
|
||||
const headers = this.buildSessionHeaders(sessionApiKey);
|
||||
const headers = buildSessionHeaders(sessionApiKey);
|
||||
|
||||
// V1 API returns {url: '...'} instead of {vscode_url: '...'}
|
||||
// Map it to match the expected interface
|
||||
@ -188,7 +175,35 @@ class V1ConversationService {
|
||||
conversationUrl,
|
||||
`/api/conversations/${conversationId}/pause`,
|
||||
);
|
||||
const headers = this.buildSessionHeaders(sessionApiKey);
|
||||
const headers = buildSessionHeaders(sessionApiKey);
|
||||
|
||||
const { data } = await axios.post<{ success: boolean }>(
|
||||
url,
|
||||
{},
|
||||
{ headers },
|
||||
);
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resume a V1 conversation
|
||||
* Uses the custom runtime URL from the conversation
|
||||
*
|
||||
* @param conversationId The conversation ID
|
||||
* @param conversationUrl The conversation URL (e.g., "http://localhost:54928/api/conversations/...")
|
||||
* @param sessionApiKey Session API key for authentication (required for V1)
|
||||
* @returns Success response
|
||||
*/
|
||||
static async resumeConversation(
|
||||
conversationId: string,
|
||||
conversationUrl: string | null | undefined,
|
||||
sessionApiKey?: string | null,
|
||||
): Promise<{ success: boolean }> {
|
||||
const url = this.buildRuntimeUrl(
|
||||
conversationUrl,
|
||||
`/api/conversations/${conversationId}/run`,
|
||||
);
|
||||
const headers = buildSessionHeaders(sessionApiKey);
|
||||
|
||||
const { data } = await axios.post<{ success: boolean }>(
|
||||
url,
|
||||
@ -254,6 +269,32 @@ class V1ConversationService {
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch get V1 sandboxes by their IDs
|
||||
* Returns null for any missing sandboxes
|
||||
*
|
||||
* @param ids Array of sandbox IDs (max 100)
|
||||
* @returns Array of sandboxes or null for missing ones
|
||||
*/
|
||||
static async batchGetSandboxes(
|
||||
ids: string[],
|
||||
): Promise<(V1SandboxInfo | null)[]> {
|
||||
if (ids.length === 0) {
|
||||
return [];
|
||||
}
|
||||
if (ids.length > 100) {
|
||||
throw new Error("Cannot request more than 100 sandboxes at once");
|
||||
}
|
||||
|
||||
const params = new URLSearchParams();
|
||||
ids.forEach((id) => params.append("id", id));
|
||||
|
||||
const { data } = await openHands.get<(V1SandboxInfo | null)[]>(
|
||||
`/api/v1/sandboxes?${params.toString()}`,
|
||||
);
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Upload a single file to the V1 conversation workspace
|
||||
* V1 API endpoint: POST /api/file/upload/{path}
|
||||
@ -277,7 +318,7 @@ class V1ConversationService {
|
||||
conversationUrl,
|
||||
`/api/file/upload/${encodedPath}`,
|
||||
);
|
||||
const headers = this.buildSessionHeaders(sessionApiKey);
|
||||
const headers = buildSessionHeaders(sessionApiKey);
|
||||
|
||||
// Create FormData with the file
|
||||
const formData = new FormData();
|
||||
@ -291,6 +332,37 @@ class V1ConversationService {
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the conversation config (runtime_id) for a V1 conversation
|
||||
* @param conversationId The conversation ID
|
||||
* @returns Object containing runtime_id
|
||||
*/
|
||||
static async getConversationConfig(
|
||||
conversationId: string,
|
||||
): Promise<{ runtime_id: string }> {
|
||||
const url = `/api/conversations/${conversationId}/config`;
|
||||
const { data } = await openHands.get<{ runtime_id: string }>(url);
|
||||
return data;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the count of events for a conversation
|
||||
* Uses the V1 API endpoint: GET /api/v1/events/count
|
||||
*
|
||||
* @param conversationId The conversation ID to get event count for
|
||||
* @returns The number of events in the conversation
|
||||
*/
|
||||
static async getEventCount(conversationId: string): Promise<number> {
|
||||
const params = new URLSearchParams();
|
||||
params.append("conversation_id__eq", conversationId);
|
||||
|
||||
const { data } = await openHands.get<number>(
|
||||
`/api/v1/events/count?${params.toString()}`,
|
||||
);
|
||||
|
||||
return data;
|
||||
}
|
||||
}
|
||||
|
||||
export default V1ConversationService;
|
||||
|
||||
@ -98,3 +98,18 @@ export interface V1AppConversation {
|
||||
conversation_url: string | null;
|
||||
session_api_key: string | null;
|
||||
}
|
||||
|
||||
export interface V1ExposedUrl {
|
||||
name: string;
|
||||
url: string;
|
||||
}
|
||||
|
||||
export interface V1SandboxInfo {
|
||||
id: string;
|
||||
created_by_user_id: string | null;
|
||||
sandbox_spec_id: string;
|
||||
status: V1SandboxStatus;
|
||||
session_api_key: string | null;
|
||||
exposed_urls: V1ExposedUrl[] | null;
|
||||
created_at: string;
|
||||
}
|
||||
|
||||
41
frontend/src/api/event-service/event-service.api.ts
Normal file
41
frontend/src/api/event-service/event-service.api.ts
Normal file
@ -0,0 +1,41 @@
|
||||
import axios from "axios";
|
||||
import { buildHttpBaseUrl } from "#/utils/websocket-url";
|
||||
import { buildSessionHeaders } from "#/utils/utils";
|
||||
import type {
|
||||
ConfirmationResponseRequest,
|
||||
ConfirmationResponseResponse,
|
||||
} from "./event-service.types";
|
||||
|
||||
class EventService {
|
||||
/**
|
||||
* Respond to a confirmation request in a V1 conversation
|
||||
* @param conversationId The conversation ID
|
||||
* @param conversationUrl The conversation URL (e.g., "http://localhost:54928/api/conversations/...")
|
||||
* @param request The confirmation response request
|
||||
* @param sessionApiKey Session API key for authentication (required for V1)
|
||||
* @returns The confirmation response
|
||||
*/
|
||||
static async respondToConfirmation(
|
||||
conversationId: string,
|
||||
conversationUrl: string,
|
||||
request: ConfirmationResponseRequest,
|
||||
sessionApiKey?: string | null,
|
||||
): Promise<ConfirmationResponseResponse> {
|
||||
// Build the runtime URL using the conversation URL
|
||||
const runtimeUrl = buildHttpBaseUrl(conversationUrl);
|
||||
|
||||
// Build session headers for authentication
|
||||
const headers = buildSessionHeaders(sessionApiKey);
|
||||
|
||||
// Make the API call to the runtime endpoint
|
||||
const { data } = await axios.post<ConfirmationResponseResponse>(
|
||||
`${runtimeUrl}/api/conversations/${conversationId}/events/respond_to_confirmation`,
|
||||
request,
|
||||
{ headers },
|
||||
);
|
||||
|
||||
return data;
|
||||
}
|
||||
}
|
||||
|
||||
export default EventService;
|
||||
8
frontend/src/api/event-service/event-service.types.ts
Normal file
8
frontend/src/api/event-service/event-service.types.ts
Normal file
@ -0,0 +1,8 @@
|
||||
export interface ConfirmationResponseRequest {
|
||||
accept: boolean;
|
||||
reason?: string;
|
||||
}
|
||||
|
||||
export interface ConfirmationResponseResponse {
|
||||
success: boolean;
|
||||
}
|
||||
89
frontend/src/api/git-service/v1-git-service.api.ts
Normal file
89
frontend/src/api/git-service/v1-git-service.api.ts
Normal file
@ -0,0 +1,89 @@
|
||||
import axios from "axios";
|
||||
import { buildHttpBaseUrl } from "#/utils/websocket-url";
|
||||
import { buildSessionHeaders } from "#/utils/utils";
|
||||
import { mapV1ToV0Status } from "#/utils/git-status-mapper";
|
||||
import type {
|
||||
GitChange,
|
||||
GitChangeDiff,
|
||||
V1GitChangeStatus,
|
||||
} from "../open-hands.types";
|
||||
|
||||
interface V1GitChange {
|
||||
status: V1GitChangeStatus;
|
||||
path: string;
|
||||
}
|
||||
|
||||
class V1GitService {
|
||||
/**
|
||||
* Build the full URL for V1 runtime-specific endpoints
|
||||
* @param conversationUrl The conversation URL (e.g., "http://localhost:54928/api/conversations/...")
|
||||
* @param path The API path (e.g., "/api/git/changes")
|
||||
* @returns Full URL to the runtime endpoint
|
||||
*/
|
||||
private static buildRuntimeUrl(
|
||||
conversationUrl: string | null | undefined,
|
||||
path: string,
|
||||
): string {
|
||||
const baseUrl = buildHttpBaseUrl(conversationUrl);
|
||||
return `${baseUrl}${path}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get git changes for a V1 conversation
|
||||
* Uses the agent server endpoint: GET /api/git/changes/{path}
|
||||
* Maps V1 status types (ADDED, DELETED, etc.) to V0 format (A, D, etc.)
|
||||
*
|
||||
* @param conversationUrl The conversation URL (e.g., "http://localhost:54928/api/conversations/...")
|
||||
* @param sessionApiKey Session API key for authentication (required for V1)
|
||||
* @param path The git repository path (e.g., /workspace/project or /workspace/project/OpenHands)
|
||||
* @returns List of git changes with V0-compatible status types
|
||||
*/
|
||||
static async getGitChanges(
|
||||
conversationUrl: string | null | undefined,
|
||||
sessionApiKey: string | null | undefined,
|
||||
path: string,
|
||||
): Promise<GitChange[]> {
|
||||
const encodedPath = encodeURIComponent(path);
|
||||
const url = this.buildRuntimeUrl(
|
||||
conversationUrl,
|
||||
`/api/git/changes/${encodedPath}`,
|
||||
);
|
||||
const headers = buildSessionHeaders(sessionApiKey);
|
||||
|
||||
// V1 API returns V1GitChangeStatus types, we need to map them to V0 format
|
||||
const { data } = await axios.get<V1GitChange[]>(url, { headers });
|
||||
|
||||
// Map V1 statuses to V0 format for compatibility
|
||||
return data.map((change) => ({
|
||||
status: mapV1ToV0Status(change.status),
|
||||
path: change.path,
|
||||
}));
|
||||
}
|
||||
|
||||
/**
|
||||
* Get git change diff for a specific file in a V1 conversation
|
||||
* Uses the agent server endpoint: GET /api/git/diff/{path}
|
||||
*
|
||||
* @param conversationUrl The conversation URL (e.g., "http://localhost:54928/api/conversations/...")
|
||||
* @param sessionApiKey Session API key for authentication (required for V1)
|
||||
* @param path The file path to get diff for
|
||||
* @returns Git change diff
|
||||
*/
|
||||
static async getGitChangeDiff(
|
||||
conversationUrl: string | null | undefined,
|
||||
sessionApiKey: string | null | undefined,
|
||||
path: string,
|
||||
): Promise<GitChangeDiff> {
|
||||
const encodedPath = encodeURIComponent(path);
|
||||
const url = this.buildRuntimeUrl(
|
||||
conversationUrl,
|
||||
`/api/git/diff/${encodedPath}`,
|
||||
);
|
||||
const headers = buildSessionHeaders(sessionApiKey);
|
||||
|
||||
const { data } = await axios.get<GitChangeDiff>(url, { headers });
|
||||
return data;
|
||||
}
|
||||
}
|
||||
|
||||
export default V1GitService;
|
||||
@ -84,8 +84,13 @@ export interface ResultSet<T> {
|
||||
next_page_id: string | null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use V1GitChangeStatus for new code. This type is maintained for backward compatibility with V0 API.
|
||||
*/
|
||||
export type GitChangeStatus = "M" | "A" | "D" | "R" | "U";
|
||||
|
||||
export type V1GitChangeStatus = "MOVED" | "ADDED" | "DELETED" | "UPDATED";
|
||||
|
||||
export interface GitChange {
|
||||
status: GitChangeStatus;
|
||||
path: string;
|
||||
|
||||
@ -48,6 +48,7 @@ import {
|
||||
} from "#/types/v1/type-guards";
|
||||
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
|
||||
import { useTaskPolling } from "#/hooks/query/use-task-polling";
|
||||
import { useConversationWebSocket } from "#/contexts/conversation-websocket-context";
|
||||
|
||||
function getEntryPoint(
|
||||
hasRepository: boolean | null,
|
||||
@ -64,6 +65,7 @@ export function ChatInterface() {
|
||||
const { errorMessage } = useErrorMessageStore();
|
||||
const { isLoadingMessages } = useWsClient();
|
||||
const { isTask } = useTaskPolling();
|
||||
const conversationWebSocket = useConversationWebSocket();
|
||||
const { send } = useSendMessage();
|
||||
const storeEvents = useEventStore((state) => state.events);
|
||||
const { setOptimisticUserMessage, getOptimisticUserMessage } =
|
||||
@ -94,6 +96,25 @@ export function ChatInterface() {
|
||||
|
||||
const isV1Conversation = conversation?.conversation_version === "V1";
|
||||
|
||||
// Instantly scroll to bottom when history loading completes
|
||||
const prevLoadingHistoryRef = React.useRef(
|
||||
conversationWebSocket?.isLoadingHistory,
|
||||
);
|
||||
React.useEffect(() => {
|
||||
const wasLoading = prevLoadingHistoryRef.current;
|
||||
const isLoading = conversationWebSocket?.isLoadingHistory;
|
||||
|
||||
// When history loading transitions from true to false, instantly scroll to bottom
|
||||
if (wasLoading && !isLoading && scrollRef.current) {
|
||||
scrollRef.current.scrollTo({
|
||||
top: scrollRef.current.scrollHeight,
|
||||
behavior: "instant",
|
||||
});
|
||||
}
|
||||
|
||||
prevLoadingHistoryRef.current = isLoading;
|
||||
}, [conversationWebSocket?.isLoadingHistory, scrollRef]);
|
||||
|
||||
// Filter V0 events
|
||||
const v0Events = storeEvents
|
||||
.filter(isV0Event)
|
||||
@ -228,6 +249,14 @@ export function ChatInterface() {
|
||||
</div>
|
||||
)}
|
||||
|
||||
{conversationWebSocket?.isLoadingHistory &&
|
||||
isV1Conversation &&
|
||||
!isTask && (
|
||||
<div className="flex justify-center">
|
||||
<LoadingSpinner size="small" />
|
||||
</div>
|
||||
)}
|
||||
|
||||
{!isLoadingMessages && v0UserEventsExist && (
|
||||
<V0Messages
|
||||
messages={v0Events}
|
||||
@ -237,13 +266,8 @@ export function ChatInterface() {
|
||||
/>
|
||||
)}
|
||||
|
||||
{v1UserEventsExist && (
|
||||
<V1Messages
|
||||
messages={v1Events}
|
||||
isAwaitingUserConfirmation={
|
||||
curAgentState === AgentState.AWAITING_USER_CONFIRMATION
|
||||
}
|
||||
/>
|
||||
{!conversationWebSocket?.isLoadingHistory && v1UserEventsExist && (
|
||||
<V1Messages messages={v1Events} />
|
||||
)}
|
||||
</div>
|
||||
|
||||
|
||||
@ -10,6 +10,8 @@ import { useActiveConversation } from "#/hooks/query/use-active-conversation";
|
||||
import { useSendMessage } from "#/hooks/use-send-message";
|
||||
import { generateAgentStateChangeEvent } from "#/services/agent-state-service";
|
||||
import { AgentState } from "#/types/agent-state";
|
||||
import { useV1PauseConversation } from "#/hooks/mutation/use-v1-pause-conversation";
|
||||
import { useV1ResumeConversation } from "#/hooks/mutation/use-v1-resume-conversation";
|
||||
|
||||
interface ChatInputActionsProps {
|
||||
conversationStatus: ConversationStatus | null;
|
||||
@ -26,6 +28,8 @@ export function ChatInputActions({
|
||||
const pauseConversationSandboxMutation = useUnifiedPauseConversationSandbox();
|
||||
const resumeConversationSandboxMutation =
|
||||
useUnifiedResumeConversationSandbox();
|
||||
const v1PauseConversationMutation = useV1PauseConversation();
|
||||
const v1ResumeConversationMutation = useV1ResumeConversation();
|
||||
const { conversationId } = useConversationId();
|
||||
const { providers } = useUserProviders();
|
||||
const { send } = useSendMessage();
|
||||
@ -38,7 +42,8 @@ export function ChatInputActions({
|
||||
|
||||
const handlePauseAgent = () => {
|
||||
if (isV1Conversation) {
|
||||
// V1: Empty function for now
|
||||
// V1: Pause the conversation (agent execution)
|
||||
v1PauseConversationMutation.mutate({ conversationId });
|
||||
return;
|
||||
}
|
||||
|
||||
@ -46,11 +51,24 @@ export function ChatInputActions({
|
||||
send(generateAgentStateChangeEvent(AgentState.STOPPED));
|
||||
};
|
||||
|
||||
const handleResumeAgentClick = () => {
|
||||
if (isV1Conversation) {
|
||||
// V1: Resume the conversation (agent execution)
|
||||
v1ResumeConversationMutation.mutate({ conversationId });
|
||||
return;
|
||||
}
|
||||
|
||||
// V0: Call the original handleResumeAgent (sends "continue" message)
|
||||
handleResumeAgent();
|
||||
};
|
||||
|
||||
const handleStartClick = () => {
|
||||
resumeConversationSandboxMutation.mutate({ conversationId, providers });
|
||||
};
|
||||
|
||||
const isPausing = pauseConversationSandboxMutation.isPending;
|
||||
const isPausing =
|
||||
pauseConversationSandboxMutation.isPending ||
|
||||
v1PauseConversationMutation.isPending;
|
||||
|
||||
return (
|
||||
<div className="w-full flex items-center justify-between">
|
||||
@ -66,7 +84,7 @@ export function ChatInputActions({
|
||||
<AgentStatus
|
||||
className="ml-2 md:ml-3"
|
||||
handleStop={handlePauseAgent}
|
||||
handleResumeAgent={handleResumeAgent}
|
||||
handleResumeAgent={handleResumeAgentClick}
|
||||
disabled={disabled}
|
||||
isPausing={isPausing}
|
||||
/>
|
||||
|
||||
@ -17,9 +17,19 @@ export const getObservationResult = (event: OpenHandsObservation) => {
|
||||
case "run_ipython":
|
||||
case "read":
|
||||
case "edit":
|
||||
case "mcp":
|
||||
if (!hasContent || contentIncludesError) return "error";
|
||||
return "success"; // Content is valid
|
||||
return "success";
|
||||
|
||||
case "mcp":
|
||||
try {
|
||||
const parsed = JSON.parse(event.content);
|
||||
if (typeof parsed?.isError === "boolean") {
|
||||
return parsed.isError ? "error" : "success";
|
||||
}
|
||||
} catch {
|
||||
return hasContent ? "success" : "error";
|
||||
}
|
||||
return hasContent ? "success" : "error";
|
||||
default:
|
||||
return "success";
|
||||
}
|
||||
|
||||
@ -10,19 +10,22 @@ interface GitControlBarPrButtonProps {
|
||||
onSuggestionsClick: (value: string) => void;
|
||||
hasRepository: boolean;
|
||||
currentGitProvider: Provider;
|
||||
isConversationReady?: boolean;
|
||||
}
|
||||
|
||||
export function GitControlBarPrButton({
|
||||
onSuggestionsClick,
|
||||
hasRepository,
|
||||
currentGitProvider,
|
||||
isConversationReady = true,
|
||||
}: GitControlBarPrButtonProps) {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const { providers } = useUserProviders();
|
||||
|
||||
const providersAreSet = providers.length > 0;
|
||||
const isButtonEnabled = providersAreSet && hasRepository;
|
||||
const isButtonEnabled =
|
||||
providersAreSet && hasRepository && isConversationReady;
|
||||
|
||||
const handlePrClick = () => {
|
||||
posthog.capture("create_pr_button_clicked");
|
||||
|
||||
@ -8,10 +8,12 @@ import { I18nKey } from "#/i18n/declaration";
|
||||
|
||||
interface GitControlBarPullButtonProps {
|
||||
onSuggestionsClick: (value: string) => void;
|
||||
isConversationReady?: boolean;
|
||||
}
|
||||
|
||||
export function GitControlBarPullButton({
|
||||
onSuggestionsClick,
|
||||
isConversationReady = true,
|
||||
}: GitControlBarPullButtonProps) {
|
||||
const { t } = useTranslation();
|
||||
|
||||
@ -20,7 +22,8 @@ export function GitControlBarPullButton({
|
||||
|
||||
const providersAreSet = providers.length > 0;
|
||||
const hasRepository = conversation?.selected_repository;
|
||||
const isButtonEnabled = providersAreSet && hasRepository;
|
||||
const isButtonEnabled =
|
||||
providersAreSet && hasRepository && isConversationReady;
|
||||
|
||||
const handlePullClick = () => {
|
||||
posthog.capture("pull_button_clicked");
|
||||
|
||||
@ -10,19 +10,22 @@ interface GitControlBarPushButtonProps {
|
||||
onSuggestionsClick: (value: string) => void;
|
||||
hasRepository: boolean;
|
||||
currentGitProvider: Provider;
|
||||
isConversationReady?: boolean;
|
||||
}
|
||||
|
||||
export function GitControlBarPushButton({
|
||||
onSuggestionsClick,
|
||||
hasRepository,
|
||||
currentGitProvider,
|
||||
isConversationReady = true,
|
||||
}: GitControlBarPushButtonProps) {
|
||||
const { t } = useTranslation();
|
||||
|
||||
const { providers } = useUserProviders();
|
||||
|
||||
const providersAreSet = providers.length > 0;
|
||||
const isButtonEnabled = providersAreSet && hasRepository;
|
||||
const isButtonEnabled =
|
||||
providersAreSet && hasRepository && isConversationReady;
|
||||
|
||||
const handlePushClick = () => {
|
||||
posthog.capture("push_button_clicked");
|
||||
|
||||
@ -6,6 +6,7 @@ import { GitControlBarPushButton } from "./git-control-bar-push-button";
|
||||
import { GitControlBarPrButton } from "./git-control-bar-pr-button";
|
||||
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
|
||||
import { useTaskPolling } from "#/hooks/query/use-task-polling";
|
||||
import { useUnifiedWebSocketStatus } from "#/hooks/use-unified-websocket-status";
|
||||
import { Provider } from "#/types/settings";
|
||||
import { I18nKey } from "#/i18n/declaration";
|
||||
import { GitControlBarTooltipWrapper } from "./git-control-bar-tooltip-wrapper";
|
||||
@ -19,6 +20,7 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
|
||||
|
||||
const { data: conversation } = useActiveConversation();
|
||||
const { repositoryInfo } = useTaskPolling();
|
||||
const webSocketStatus = useUnifiedWebSocketStatus();
|
||||
|
||||
// Priority: conversation data > task data
|
||||
// This ensures we show repository info immediately from task, then transition to conversation data
|
||||
@ -31,6 +33,9 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
|
||||
|
||||
const hasRepository = !!selectedRepository;
|
||||
|
||||
// Enable buttons only when conversation exists and WS is connected
|
||||
const isConversationReady = !!conversation && webSocketStatus === "CONNECTED";
|
||||
|
||||
return (
|
||||
<div className="flex flex-row items-center">
|
||||
<div className="flex flex-row gap-2.5 items-center overflow-x-auto flex-wrap md:flex-nowrap relative scrollbar-hide">
|
||||
@ -66,6 +71,7 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
|
||||
>
|
||||
<GitControlBarPullButton
|
||||
onSuggestionsClick={onSuggestionsClick}
|
||||
isConversationReady={isConversationReady}
|
||||
/>
|
||||
</GitControlBarTooltipWrapper>
|
||||
|
||||
@ -78,6 +84,7 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
|
||||
onSuggestionsClick={onSuggestionsClick}
|
||||
hasRepository={hasRepository}
|
||||
currentGitProvider={gitProvider}
|
||||
isConversationReady={isConversationReady}
|
||||
/>
|
||||
</GitControlBarTooltipWrapper>
|
||||
|
||||
@ -90,6 +97,7 @@ export function GitControlBar({ onSuggestionsClick }: GitControlBarProps) {
|
||||
onSuggestionsClick={onSuggestionsClick}
|
||||
hasRepository={hasRepository}
|
||||
currentGitProvider={gitProvider}
|
||||
isConversationReady={isConversationReady}
|
||||
/>
|
||||
</GitControlBarTooltipWrapper>
|
||||
</>
|
||||
|
||||
@ -74,19 +74,24 @@ export function AgentStatus({
|
||||
<div
|
||||
className={cn(
|
||||
"bg-[#525252] box-border content-stretch flex flex-row gap-[3px] items-center justify-center overflow-clip px-0.5 py-1 relative rounded-[100px] shrink-0 size-6 transition-all duration-200 active:scale-95",
|
||||
(shouldShownAgentStop || shouldShownAgentResume) &&
|
||||
!shouldShownAgentLoading &&
|
||||
(shouldShownAgentStop || shouldShownAgentResume) &&
|
||||
"hover:bg-[#737373] cursor-pointer",
|
||||
)}
|
||||
>
|
||||
{shouldShownAgentLoading && <AgentLoading />}
|
||||
{shouldShownAgentStop && <ChatStopButton handleStop={handleStop} />}
|
||||
{shouldShownAgentResume && (
|
||||
{!shouldShownAgentLoading && shouldShownAgentStop && (
|
||||
<ChatStopButton handleStop={handleStop} />
|
||||
)}
|
||||
{!shouldShownAgentLoading && shouldShownAgentResume && (
|
||||
<ChatResumeAgentButton
|
||||
onAgentResumed={handleResumeAgent}
|
||||
disabled={disabled}
|
||||
/>
|
||||
)}
|
||||
{shouldShownAgentError && <CircleErrorIcon className="w-4 h-4" />}
|
||||
{!shouldShownAgentLoading && shouldShownAgentError && (
|
||||
<CircleErrorIcon className="w-4 h-4" />
|
||||
)}
|
||||
{!shouldShownAgentLoading &&
|
||||
!shouldShownAgentStop &&
|
||||
!shouldShownAgentResume &&
|
||||
|
||||
@ -8,23 +8,23 @@ import { TabContentArea } from "./tab-content-area";
|
||||
import { ConversationTabTitle } from "../conversation-tab-title";
|
||||
import Terminal from "#/components/features/terminal/terminal";
|
||||
import { useConversationStore } from "#/state/conversation-store";
|
||||
import { useConversationId } from "#/hooks/use-conversation-id";
|
||||
|
||||
// Lazy load all tab components
|
||||
const EditorTab = lazy(() => import("#/routes/changes-tab"));
|
||||
const BrowserTab = lazy(() => import("#/routes/browser-tab"));
|
||||
const JupyterTab = lazy(() => import("#/routes/jupyter-tab"));
|
||||
const ServedTab = lazy(() => import("#/routes/served-tab"));
|
||||
const VSCodeTab = lazy(() => import("#/routes/vscode-tab"));
|
||||
|
||||
export function ConversationTabContent() {
|
||||
const { selectedTab, shouldShownAgentLoading } = useConversationStore();
|
||||
const { conversationId } = useConversationId();
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
// Determine which tab is active based on the current path
|
||||
const isEditorActive = selectedTab === "editor";
|
||||
const isBrowserActive = selectedTab === "browser";
|
||||
const isJupyterActive = selectedTab === "jupyter";
|
||||
const isServedActive = selectedTab === "served";
|
||||
const isVSCodeActive = selectedTab === "vscode";
|
||||
const isTerminalActive = selectedTab === "terminal";
|
||||
@ -37,11 +37,6 @@ export function ConversationTabContent() {
|
||||
component: BrowserTab,
|
||||
isActive: isBrowserActive,
|
||||
},
|
||||
{
|
||||
key: "jupyter",
|
||||
component: JupyterTab,
|
||||
isActive: isJupyterActive,
|
||||
},
|
||||
{ key: "served", component: ServedTab, isActive: isServedActive },
|
||||
{ key: "vscode", component: VSCodeTab, isActive: isVSCodeActive },
|
||||
{
|
||||
@ -58,9 +53,6 @@ export function ConversationTabContent() {
|
||||
if (isBrowserActive) {
|
||||
return t(I18nKey.COMMON$BROWSER);
|
||||
}
|
||||
if (isJupyterActive) {
|
||||
return t(I18nKey.COMMON$JUPYTER);
|
||||
}
|
||||
if (isServedActive) {
|
||||
return t(I18nKey.COMMON$APP);
|
||||
}
|
||||
@ -74,7 +66,6 @@ export function ConversationTabContent() {
|
||||
}, [
|
||||
isEditorActive,
|
||||
isBrowserActive,
|
||||
isJupyterActive,
|
||||
isServedActive,
|
||||
isVSCodeActive,
|
||||
isTerminalActive,
|
||||
@ -89,7 +80,11 @@ export function ConversationTabContent() {
|
||||
<ConversationTabTitle title={conversationTabTitle} />
|
||||
<TabContentArea>
|
||||
{tabs.map(({ key, component: Component, isActive }) => (
|
||||
<TabWrapper key={key} isActive={isActive}>
|
||||
<TabWrapper
|
||||
// Force Terminal tab remount to reset XTerm buffer/state when conversationId changes
|
||||
key={key === "terminal" ? `${key}-${conversationId}` : key}
|
||||
isActive={isActive}
|
||||
>
|
||||
<Component />
|
||||
</TabWrapper>
|
||||
))}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
import { useEffect } from "react";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { useLocalStorage } from "@uidotdev/usehooks";
|
||||
import JupyterIcon from "#/icons/jupyter.svg?react";
|
||||
import TerminalIcon from "#/icons/terminal.svg?react";
|
||||
import GlobeIcon from "#/icons/globe.svg?react";
|
||||
import ServerIcon from "#/icons/server.svg?react";
|
||||
@ -108,13 +107,6 @@ export function ConversationTabs() {
|
||||
tooltipContent: t(I18nKey.COMMON$TERMINAL),
|
||||
tooltipAriaLabel: t(I18nKey.COMMON$TERMINAL),
|
||||
},
|
||||
{
|
||||
isActive: isTabActive("jupyter"),
|
||||
icon: JupyterIcon,
|
||||
onClick: () => onTabSelected("jupyter"),
|
||||
tooltipContent: t(I18nKey.COMMON$JUPYTER),
|
||||
tooltipAriaLabel: t(I18nKey.COMMON$JUPYTER),
|
||||
},
|
||||
{
|
||||
isActive: isTabActive("served"),
|
||||
icon: ServerIcon,
|
||||
|
||||
@ -7,7 +7,7 @@ import { GitChangeStatus } from "#/api/open-hands.types";
|
||||
import { getLanguageFromPath } from "#/utils/get-language-from-path";
|
||||
import { cn } from "#/utils/utils";
|
||||
import ChevronUp from "#/icons/chveron-up.svg?react";
|
||||
import { useGitDiff } from "#/hooks/query/use-get-diff";
|
||||
import { useUnifiedGitDiff } from "#/hooks/query/use-unified-git-diff";
|
||||
|
||||
interface LoadingSpinnerProps {
|
||||
className?: string;
|
||||
@ -64,7 +64,7 @@ export function FileDiffViewer({ path, type }: FileDiffViewerProps) {
|
||||
isLoading,
|
||||
isSuccess,
|
||||
isRefetching,
|
||||
} = useGitDiff({
|
||||
} = useUnifiedGitDiff({
|
||||
filePath,
|
||||
type,
|
||||
enabled: !isCollapsed,
|
||||
|
||||
@ -1,22 +0,0 @@
|
||||
import SyntaxHighlighter from "react-syntax-highlighter";
|
||||
import { atomOneDark } from "react-syntax-highlighter/dist/esm/styles/hljs";
|
||||
|
||||
interface JupytrerCellInputProps {
|
||||
code: string;
|
||||
}
|
||||
|
||||
export function JupytrerCellInput({ code }: JupytrerCellInputProps) {
|
||||
return (
|
||||
<div className="rounded-lg bg-gray-800 dark:bg-gray-900 p-2 text-xs">
|
||||
<div className="mb-1 text-gray-400">EXECUTE</div>
|
||||
<pre
|
||||
className="scrollbar-custom scrollbar-thumb-gray-500 hover:scrollbar-thumb-gray-400 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20 overflow-auto px-5"
|
||||
style={{ padding: 0, marginBottom: 0, fontSize: "0.75rem" }}
|
||||
>
|
||||
<SyntaxHighlighter language="python" style={atomOneDark} wrapLongLines>
|
||||
{code}
|
||||
</SyntaxHighlighter>
|
||||
</pre>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@ -1,55 +0,0 @@
|
||||
import Markdown from "react-markdown";
|
||||
import SyntaxHighlighter from "react-syntax-highlighter";
|
||||
import { atomOneDark } from "react-syntax-highlighter/dist/esm/styles/hljs";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { I18nKey } from "#/i18n/declaration";
|
||||
import { JupyterLine } from "#/utils/parse-cell-content";
|
||||
import { paragraph } from "../markdown/paragraph";
|
||||
|
||||
interface JupyterCellOutputProps {
|
||||
lines: JupyterLine[];
|
||||
}
|
||||
|
||||
export function JupyterCellOutput({ lines }: JupyterCellOutputProps) {
|
||||
const { t } = useTranslation();
|
||||
return (
|
||||
<div className="rounded-lg bg-gray-800 dark:bg-gray-900 p-2 text-xs">
|
||||
<div className="mb-1 text-gray-400">
|
||||
{t(I18nKey.JUPYTER$OUTPUT_LABEL)}
|
||||
</div>
|
||||
<pre
|
||||
className="scrollbar-custom scrollbar-thumb-gray-500 hover:scrollbar-thumb-gray-400 dark:scrollbar-thumb-white/10 dark:hover:scrollbar-thumb-white/20 overflow-auto px-5 max-h-[60vh] bg-gray-800"
|
||||
style={{ padding: 0, marginBottom: 0, fontSize: "0.75rem" }}
|
||||
>
|
||||
{/* display the lines as plaintext or image */}
|
||||
{lines.map((line, index) => {
|
||||
if (line.type === "image") {
|
||||
// Use markdown to display the image
|
||||
const imageMarkdown = line.url
|
||||
? ``
|
||||
: line.content;
|
||||
return (
|
||||
<div key={index}>
|
||||
<Markdown
|
||||
components={{
|
||||
p: paragraph,
|
||||
}}
|
||||
urlTransform={(value: string) => value}
|
||||
>
|
||||
{imageMarkdown}
|
||||
</Markdown>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
return (
|
||||
<div key={index}>
|
||||
<SyntaxHighlighter language="plaintext" style={atomOneDark}>
|
||||
{line.content}
|
||||
</SyntaxHighlighter>
|
||||
</div>
|
||||
);
|
||||
})}
|
||||
</pre>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@ -1,23 +0,0 @@
|
||||
import React from "react";
|
||||
import { Cell } from "#/state/jupyter-store";
|
||||
import { JupyterLine, parseCellContent } from "#/utils/parse-cell-content";
|
||||
import { JupytrerCellInput } from "./jupyter-cell-input";
|
||||
import { JupyterCellOutput } from "./jupyter-cell-output";
|
||||
|
||||
interface JupyterCellProps {
|
||||
cell: Cell;
|
||||
}
|
||||
|
||||
export function JupyterCell({ cell }: JupyterCellProps) {
|
||||
const [lines, setLines] = React.useState<JupyterLine[]>([]);
|
||||
|
||||
React.useEffect(() => {
|
||||
setLines(parseCellContent(cell.content, cell.imageUrls));
|
||||
}, [cell.content, cell.imageUrls]);
|
||||
|
||||
if (cell.type === "input") {
|
||||
return <JupytrerCellInput code={cell.content} />;
|
||||
}
|
||||
|
||||
return <JupyterCellOutput lines={lines} />;
|
||||
}
|
||||
@ -1,63 +0,0 @@
|
||||
import React from "react";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { useScrollToBottom } from "#/hooks/use-scroll-to-bottom";
|
||||
import { JupyterCell } from "./jupyter-cell";
|
||||
import { ScrollToBottomButton } from "#/components/shared/buttons/scroll-to-bottom-button";
|
||||
import { RUNTIME_INACTIVE_STATES } from "#/types/agent-state";
|
||||
import { I18nKey } from "#/i18n/declaration";
|
||||
import JupyterLargeIcon from "#/icons/jupyter-large.svg?react";
|
||||
import { WaitingForRuntimeMessage } from "../chat/waiting-for-runtime-message";
|
||||
import { useAgentState } from "#/hooks/use-agent-state";
|
||||
import { useJupyterStore } from "#/state/jupyter-store";
|
||||
|
||||
interface JupyterEditorProps {
|
||||
maxWidth: number;
|
||||
}
|
||||
|
||||
export function JupyterEditor({ maxWidth }: JupyterEditorProps) {
|
||||
const { curAgentState } = useAgentState();
|
||||
|
||||
const cells = useJupyterStore((state) => state.cells);
|
||||
|
||||
const jupyterRef = React.useRef<HTMLDivElement>(null);
|
||||
|
||||
const { t } = useTranslation();
|
||||
|
||||
const isRuntimeInactive = RUNTIME_INACTIVE_STATES.includes(curAgentState);
|
||||
|
||||
const { hitBottom, scrollDomToBottom, onChatBodyScroll } =
|
||||
useScrollToBottom(jupyterRef);
|
||||
|
||||
return (
|
||||
<>
|
||||
{isRuntimeInactive && <WaitingForRuntimeMessage />}
|
||||
{!isRuntimeInactive && cells.length > 0 && (
|
||||
<div className="flex-1 h-full flex flex-col" style={{ maxWidth }}>
|
||||
<div
|
||||
data-testid="jupyter-container"
|
||||
className="flex-1 overflow-y-auto fast-smooth-scroll custom-scrollbar-always rounded-xl"
|
||||
ref={jupyterRef}
|
||||
onScroll={(e) => onChatBodyScroll(e.currentTarget)}
|
||||
>
|
||||
{cells.map((cell, index) => (
|
||||
<JupyterCell key={index} cell={cell} />
|
||||
))}
|
||||
</div>
|
||||
{!hitBottom && (
|
||||
<div className="sticky bottom-2 flex items-center justify-center">
|
||||
<ScrollToBottomButton onClick={scrollDomToBottom} />
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
{!isRuntimeInactive && cells.length === 0 && (
|
||||
<div className="flex flex-col items-center justify-center w-full h-full p-10 gap-4">
|
||||
<JupyterLargeIcon width={113} height={113} color="#A1A1A1" />
|
||||
<span className="text-[#8D95A9] text-[19px] font-normal leading-5">
|
||||
{t(I18nKey.COMMON$JUPYTER_EMPTY_MESSAGE)}
|
||||
</span>
|
||||
</div>
|
||||
)}
|
||||
</>
|
||||
);
|
||||
}
|
||||
@ -1,8 +1,7 @@
|
||||
import React from "react";
|
||||
import { useTranslation, Trans } from "react-i18next";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { useCreateStripeCheckoutSession } from "#/hooks/mutation/stripe/use-create-stripe-checkout-session";
|
||||
import { useBalance } from "#/hooks/query/use-balance";
|
||||
import { useSubscriptionAccess } from "#/hooks/query/use-subscription-access";
|
||||
import { cn } from "#/utils/utils";
|
||||
import MoneyIcon from "#/icons/money.svg?react";
|
||||
import { SettingsInput } from "../settings/settings-input";
|
||||
@ -11,24 +10,13 @@ import { LoadingSpinner } from "#/components/shared/loading-spinner";
|
||||
import { amountIsValid } from "#/utils/amount-is-valid";
|
||||
import { I18nKey } from "#/i18n/declaration";
|
||||
import { PoweredByStripeTag } from "./powered-by-stripe-tag";
|
||||
import { CancelSubscriptionModal } from "./cancel-subscription-modal";
|
||||
|
||||
export function PaymentForm() {
|
||||
const { t } = useTranslation();
|
||||
const { data: balance, isLoading } = useBalance();
|
||||
const { data: subscriptionAccess } = useSubscriptionAccess();
|
||||
const { mutate: addBalance, isPending } = useCreateStripeCheckoutSession();
|
||||
|
||||
const [buttonIsDisabled, setButtonIsDisabled] = React.useState(true);
|
||||
const [showCancelModal, setShowCancelModal] = React.useState(false);
|
||||
|
||||
const subscriptionExpiredDate =
|
||||
subscriptionAccess?.end_at &&
|
||||
new Date(subscriptionAccess.end_at).toLocaleDateString("en-US", {
|
||||
year: "numeric",
|
||||
month: "long",
|
||||
day: "numeric",
|
||||
});
|
||||
|
||||
const billingFormAction = async (formData: FormData) => {
|
||||
const amount = formData.get("top-up-input")?.toString();
|
||||
@ -94,50 +82,7 @@ export function PaymentForm() {
|
||||
{isPending && <LoadingSpinner size="small" />}
|
||||
<PoweredByStripeTag />
|
||||
</div>
|
||||
|
||||
{/* Cancel Subscription Button or Cancellation Message */}
|
||||
{subscriptionAccess && (
|
||||
<div className="flex flex-col w-[680px] gap-2 mt-4">
|
||||
{subscriptionAccess.cancelled_at ? (
|
||||
<div className="text-red-500 text-sm">
|
||||
<Trans
|
||||
i18nKey={I18nKey.PAYMENT$SUBSCRIPTION_CANCELLED_EXPIRES}
|
||||
values={{ date: subscriptionExpiredDate }}
|
||||
components={{ date: <span className="underline" /> }}
|
||||
/>
|
||||
</div>
|
||||
) : (
|
||||
<div className="flex items-center gap-4">
|
||||
<BrandButton
|
||||
testId="cancel-subscription-button"
|
||||
variant="ghost-danger"
|
||||
type="button"
|
||||
onClick={() => setShowCancelModal(true)}
|
||||
>
|
||||
{t(I18nKey.PAYMENT$CANCEL_SUBSCRIPTION)}
|
||||
</BrandButton>
|
||||
<div
|
||||
className="text-sm text-gray-300"
|
||||
data-testid="next-billing-date"
|
||||
>
|
||||
<Trans
|
||||
i18nKey={I18nKey.PAYMENT$NEXT_BILLING_DATE}
|
||||
values={{ date: subscriptionExpiredDate }}
|
||||
components={{ date: <span className="underline" /> }}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Cancel Subscription Modal */}
|
||||
<CancelSubscriptionModal
|
||||
isOpen={showCancelModal}
|
||||
onClose={() => setShowCancelModal(false)}
|
||||
endDate={subscriptionExpiredDate}
|
||||
/>
|
||||
</form>
|
||||
);
|
||||
}
|
||||
|
||||
@ -11,13 +11,11 @@ interface NavigationItem {
|
||||
interface SettingsLayoutProps {
|
||||
children: React.ReactNode;
|
||||
navigationItems: NavigationItem[];
|
||||
isSaas: boolean;
|
||||
}
|
||||
|
||||
export function SettingsLayout({
|
||||
children,
|
||||
navigationItems,
|
||||
isSaas,
|
||||
}: SettingsLayoutProps) {
|
||||
const [isMobileMenuOpen, setIsMobileMenuOpen] = useState(false);
|
||||
|
||||
@ -44,7 +42,6 @@ export function SettingsLayout({
|
||||
isMobileMenuOpen={isMobileMenuOpen}
|
||||
onCloseMobileMenu={closeMobileMenu}
|
||||
navigationItems={navigationItems}
|
||||
isSaas={isSaas}
|
||||
/>
|
||||
|
||||
{/* Main content */}
|
||||
|
||||
@ -5,7 +5,6 @@ import { Typography } from "#/ui/typography";
|
||||
import { I18nKey } from "#/i18n/declaration";
|
||||
import SettingsIcon from "#/icons/settings-gear.svg?react";
|
||||
import CloseIcon from "#/icons/close.svg?react";
|
||||
import { ProPill } from "./pro-pill";
|
||||
|
||||
interface NavigationItem {
|
||||
to: string;
|
||||
@ -17,14 +16,12 @@ interface SettingsNavigationProps {
|
||||
isMobileMenuOpen: boolean;
|
||||
onCloseMobileMenu: () => void;
|
||||
navigationItems: NavigationItem[];
|
||||
isSaas: boolean;
|
||||
}
|
||||
|
||||
export function SettingsNavigation({
|
||||
isMobileMenuOpen,
|
||||
onCloseMobileMenu,
|
||||
navigationItems,
|
||||
isSaas,
|
||||
}: SettingsNavigationProps) {
|
||||
const { t } = useTranslation();
|
||||
|
||||
@ -85,7 +82,6 @@ export function SettingsNavigation({
|
||||
<Typography.Text className="text-[#A3A3A3] whitespace-nowrap">
|
||||
{t(text as I18nKey)}
|
||||
</Typography.Text>
|
||||
{isSaas && to === "/settings" && <ProPill />}
|
||||
</div>
|
||||
</NavLink>
|
||||
))}
|
||||
|
||||
@ -0,0 +1,141 @@
|
||||
import { useCallback, useEffect } from "react";
|
||||
import { useTranslation } from "react-i18next";
|
||||
import { I18nKey } from "#/i18n/declaration";
|
||||
import { AgentState } from "#/types/agent-state";
|
||||
import { ActionTooltip } from "../action-tooltip";
|
||||
import { RiskAlert } from "#/components/shared/risk-alert";
|
||||
import WarningIcon from "#/icons/u-warning.svg?react";
|
||||
import { useEventMessageStore } from "#/stores/event-message-store";
|
||||
import { useEventStore } from "#/stores/use-event-store";
|
||||
import { isV1Event, isActionEvent } from "#/types/v1/type-guards";
|
||||
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
|
||||
import { useAgentState } from "#/hooks/use-agent-state";
|
||||
import { useRespondToConfirmation } from "#/hooks/mutation/use-respond-to-confirmation";
|
||||
import { SecurityRisk } from "#/types/v1/core/base/common";
|
||||
|
||||
export function V1ConfirmationButtons() {
|
||||
const v1SubmittedEventIds = useEventMessageStore(
|
||||
(state) => state.v1SubmittedEventIds,
|
||||
);
|
||||
const addV1SubmittedEventId = useEventMessageStore(
|
||||
(state) => state.addV1SubmittedEventId,
|
||||
);
|
||||
|
||||
const { t } = useTranslation();
|
||||
const { data: conversation } = useActiveConversation();
|
||||
const { curAgentState } = useAgentState();
|
||||
const { mutate: respondToConfirmation } = useRespondToConfirmation();
|
||||
const events = useEventStore((state) => state.events);
|
||||
|
||||
// Find the most recent V1 action awaiting confirmation
|
||||
const awaitingAction = events
|
||||
.filter(isV1Event)
|
||||
.slice()
|
||||
.reverse()
|
||||
.find((ev) => {
|
||||
if (ev.source !== "agent") return false;
|
||||
// For V1, we check if the agent state is waiting for confirmation
|
||||
return curAgentState === AgentState.AWAITING_USER_CONFIRMATION;
|
||||
});
|
||||
|
||||
const handleConfirmation = useCallback(
|
||||
(accept: boolean) => {
|
||||
if (!awaitingAction || !conversation) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Mark event as submitted to prevent duplicate submissions
|
||||
addV1SubmittedEventId(awaitingAction.id);
|
||||
|
||||
// Call the V1 API endpoint
|
||||
respondToConfirmation({
|
||||
conversationId: conversation.conversation_id,
|
||||
conversationUrl: conversation.url || "",
|
||||
sessionApiKey: conversation.session_api_key,
|
||||
accept,
|
||||
});
|
||||
},
|
||||
[
|
||||
awaitingAction,
|
||||
conversation,
|
||||
addV1SubmittedEventId,
|
||||
respondToConfirmation,
|
||||
],
|
||||
);
|
||||
|
||||
// Handle keyboard shortcuts
|
||||
useEffect(() => {
|
||||
if (!awaitingAction) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
const handleCancelShortcut = (event: KeyboardEvent) => {
|
||||
if (event.shiftKey && event.metaKey && event.key === "Backspace") {
|
||||
event.preventDefault();
|
||||
handleConfirmation(false);
|
||||
}
|
||||
};
|
||||
|
||||
const handleContinueShortcut = (event: KeyboardEvent) => {
|
||||
if (event.metaKey && event.key === "Enter") {
|
||||
event.preventDefault();
|
||||
handleConfirmation(true);
|
||||
}
|
||||
};
|
||||
|
||||
const handleKeyDown = (event: KeyboardEvent) => {
|
||||
// Cancel: Shift+Cmd+Backspace (⇧⌘⌫)
|
||||
handleCancelShortcut(event);
|
||||
// Continue: Cmd+Enter (⌘↩)
|
||||
handleContinueShortcut(event);
|
||||
};
|
||||
|
||||
document.addEventListener("keydown", handleKeyDown);
|
||||
|
||||
return () => document.removeEventListener("keydown", handleKeyDown);
|
||||
}, [awaitingAction, handleConfirmation]);
|
||||
|
||||
// Only show if agent is waiting for confirmation and we haven't already submitted
|
||||
if (
|
||||
curAgentState !== AgentState.AWAITING_USER_CONFIRMATION ||
|
||||
!awaitingAction ||
|
||||
v1SubmittedEventIds.includes(awaitingAction.id)
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
// Get security risk from the action (only ActionEvent has security_risk)
|
||||
const risk = isActionEvent(awaitingAction)
|
||||
? awaitingAction.security_risk
|
||||
: SecurityRisk.UNKNOWN;
|
||||
|
||||
const isHighRisk = risk === SecurityRisk.HIGH;
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-2 pt-4">
|
||||
{isHighRisk && (
|
||||
<RiskAlert
|
||||
content={t(I18nKey.CHAT_INTERFACE$HIGH_RISK_WARNING)}
|
||||
icon={<WarningIcon width={16} height={16} color="#fff" />}
|
||||
severity="high"
|
||||
title={t(I18nKey.COMMON$HIGH_RISK)}
|
||||
/>
|
||||
)}
|
||||
<div className="flex justify-between items-center">
|
||||
<p className="text-sm font-normal text-white">
|
||||
{t(I18nKey.CHAT_INTERFACE$USER_ASK_CONFIRMATION)}
|
||||
</p>
|
||||
<div className="flex items-center gap-3">
|
||||
<ActionTooltip
|
||||
type="reject"
|
||||
onClick={() => handleConfirmation(false)}
|
||||
/>
|
||||
<ActionTooltip
|
||||
type="confirm"
|
||||
onClick={() => handleConfirmation(true)}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
@ -19,6 +19,10 @@ const getFileEditorObservationContent = (
|
||||
): string => {
|
||||
const { observation } = event;
|
||||
|
||||
if (observation.error) {
|
||||
return `**Error:**\n${observation.error}`;
|
||||
}
|
||||
|
||||
const successMessage = getObservationResult(event) === "success";
|
||||
|
||||
// For view commands or successful edits with content changes, format as code block
|
||||
|
||||
@ -11,9 +11,10 @@ export const getObservationResult = (
|
||||
switch (observationType) {
|
||||
case "ExecuteBashObservation": {
|
||||
const exitCode = observation.exit_code;
|
||||
const { metadata } = observation;
|
||||
|
||||
if (exitCode === -1) return "timeout"; // Command timed out
|
||||
if (exitCode === 0) return "success"; // Command executed successfully
|
||||
if (exitCode === -1 || metadata.exit_code === -1) return "timeout"; // Command timed out
|
||||
if (exitCode === 0 || metadata.exit_code === 0) return "success"; // Command executed successfully
|
||||
return "error"; // Command failed
|
||||
}
|
||||
case "FileEditorObservation":
|
||||
|
||||
@ -1,19 +1,18 @@
|
||||
import React from "react";
|
||||
import { OpenHandsEvent } from "#/types/v1/core";
|
||||
import { GenericEventMessage } from "../../../features/chat/generic-event-message";
|
||||
import { getEventContent } from "../event-content-helpers/get-event-content";
|
||||
import { getObservationResult } from "../event-content-helpers/get-observation-result";
|
||||
import { isObservationEvent } from "#/types/v1/type-guards";
|
||||
import { ConfirmationButtons } from "#/components/shared/buttons/confirmation-buttons";
|
||||
import { V1ConfirmationButtons } from "#/components/shared/buttons/v1-confirmation-buttons";
|
||||
|
||||
interface GenericEventMessageWrapperProps {
|
||||
event: OpenHandsEvent;
|
||||
shouldShowConfirmationButtons: boolean;
|
||||
isLastMessage: boolean;
|
||||
}
|
||||
|
||||
export function GenericEventMessageWrapper({
|
||||
event,
|
||||
shouldShowConfirmationButtons,
|
||||
isLastMessage,
|
||||
}: GenericEventMessageWrapperProps) {
|
||||
const { title, details } = getEventContent(event);
|
||||
|
||||
@ -27,7 +26,7 @@ export function GenericEventMessageWrapper({
|
||||
}
|
||||
initiallyExpanded={false}
|
||||
/>
|
||||
{shouldShowConfirmationButtons && <ConfirmationButtons />}
|
||||
{isLastMessage && <V1ConfirmationButtons />}
|
||||
</div>
|
||||
);
|
||||
}
|
||||
|
||||
@ -4,7 +4,7 @@ import { ChatMessage } from "../../../features/chat/chat-message";
|
||||
import { ImageCarousel } from "../../../features/images/image-carousel";
|
||||
// TODO: Implement file_urls support for V1 messages
|
||||
// import { FileList } from "../../../features/files/file-list";
|
||||
import { ConfirmationButtons } from "#/components/shared/buttons/confirmation-buttons";
|
||||
import { V1ConfirmationButtons } from "#/components/shared/buttons/v1-confirmation-buttons";
|
||||
import { MicroagentStatusWrapper } from "../../../features/chat/event-message-components/microagent-status-wrapper";
|
||||
// TODO: Implement V1 LikertScaleWrapper when API supports V1 event IDs
|
||||
// import { LikertScaleWrapper } from "../../../features/chat/event-message-components/likert-scale-wrapper";
|
||||
@ -13,7 +13,6 @@ import { MicroagentStatus } from "#/types/microagent-status";
|
||||
|
||||
interface UserAssistantEventMessageProps {
|
||||
event: MessageEvent;
|
||||
shouldShowConfirmationButtons: boolean;
|
||||
microagentStatus?: MicroagentStatus | null;
|
||||
microagentConversationId?: string;
|
||||
microagentPRUrl?: string;
|
||||
@ -22,15 +21,16 @@ interface UserAssistantEventMessageProps {
|
||||
onClick: () => void;
|
||||
tooltip?: string;
|
||||
}>;
|
||||
isLastMessage: boolean;
|
||||
}
|
||||
|
||||
export function UserAssistantEventMessage({
|
||||
event,
|
||||
shouldShowConfirmationButtons,
|
||||
microagentStatus,
|
||||
microagentConversationId,
|
||||
microagentPRUrl,
|
||||
actions,
|
||||
isLastMessage,
|
||||
}: UserAssistantEventMessageProps) {
|
||||
const message = parseMessageFromEvent(event);
|
||||
|
||||
@ -51,7 +51,7 @@ export function UserAssistantEventMessage({
|
||||
<ImageCarousel size="small" images={imageUrls} />
|
||||
)}
|
||||
{/* TODO: Handle file_urls if V1 messages support them */}
|
||||
{shouldShowConfirmationButtons && <ConfirmationButtons />}
|
||||
{isLastMessage && <V1ConfirmationButtons />}
|
||||
</ChatMessage>
|
||||
<MicroagentStatusWrapper
|
||||
microagentStatus={microagentStatus}
|
||||
|
||||
@ -21,7 +21,6 @@ import {
|
||||
interface EventMessageProps {
|
||||
event: OpenHandsEvent;
|
||||
hasObservationPair: boolean;
|
||||
isAwaitingUserConfirmation: boolean;
|
||||
isLastMessage: boolean;
|
||||
microagentStatus?: MicroagentStatus | null;
|
||||
microagentConversationId?: string;
|
||||
@ -38,7 +37,6 @@ interface EventMessageProps {
|
||||
export function EventMessage({
|
||||
event,
|
||||
hasObservationPair,
|
||||
isAwaitingUserConfirmation,
|
||||
isLastMessage,
|
||||
microagentStatus,
|
||||
microagentConversationId,
|
||||
@ -46,9 +44,6 @@ export function EventMessage({
|
||||
actions,
|
||||
isInLast10Actions,
|
||||
}: EventMessageProps) {
|
||||
const shouldShowConfirmationButtons =
|
||||
isLastMessage && event.source === "agent" && isAwaitingUserConfirmation;
|
||||
|
||||
const { data: config } = useConfig();
|
||||
|
||||
// V1 events use string IDs, but useFeedbackExists expects number
|
||||
@ -103,17 +98,14 @@ export function EventMessage({
|
||||
return (
|
||||
<UserAssistantEventMessage
|
||||
event={event as MessageEvent}
|
||||
shouldShowConfirmationButtons={shouldShowConfirmationButtons}
|
||||
{...commonProps}
|
||||
isLastMessage={isLastMessage}
|
||||
/>
|
||||
);
|
||||
}
|
||||
|
||||
// Generic fallback for all other events (including observation events)
|
||||
return (
|
||||
<GenericEventMessageWrapper
|
||||
event={event}
|
||||
shouldShowConfirmationButtons={shouldShowConfirmationButtons}
|
||||
/>
|
||||
<GenericEventMessageWrapper event={event} isLastMessage={isLastMessage} />
|
||||
);
|
||||
}
|
||||
|
||||
@ -10,11 +10,10 @@ import { useOptimisticUserMessageStore } from "#/stores/optimistic-user-message-
|
||||
|
||||
interface MessagesProps {
|
||||
messages: OpenHandsEvent[];
|
||||
isAwaitingUserConfirmation: boolean;
|
||||
}
|
||||
|
||||
export const Messages: React.FC<MessagesProps> = React.memo(
|
||||
({ messages, isAwaitingUserConfirmation }) => {
|
||||
({ messages }) => {
|
||||
const { getOptimisticUserMessage } = useOptimisticUserMessageStore();
|
||||
|
||||
const optimisticUserMessage = getOptimisticUserMessage();
|
||||
@ -43,7 +42,6 @@ export const Messages: React.FC<MessagesProps> = React.memo(
|
||||
key={message.id}
|
||||
event={message}
|
||||
hasObservationPair={actionHasObservationPair(message)}
|
||||
isAwaitingUserConfirmation={isAwaitingUserConfirmation}
|
||||
isLastMessage={messages.length - 1 === index}
|
||||
isInLast10Actions={messages.length - 1 - index < 10}
|
||||
// Microagent props - not implemented yet for V1
|
||||
|
||||
@ -5,6 +5,7 @@ import React, {
|
||||
useState,
|
||||
useCallback,
|
||||
useMemo,
|
||||
useRef,
|
||||
} from "react";
|
||||
import { useQueryClient } from "@tanstack/react-query";
|
||||
import { useWebSocket, WebSocketHookOptions } from "#/hooks/use-websocket";
|
||||
@ -27,6 +28,7 @@ import {
|
||||
import { handleActionEventCacheInvalidation } from "#/utils/cache-utils";
|
||||
import { buildWebSocketUrl } from "#/utils/websocket-url";
|
||||
import type { V1SendMessageRequest } from "#/api/conversation-service/v1-conversation-service.types";
|
||||
import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
|
||||
|
||||
// eslint-disable-next-line @typescript-eslint/naming-convention
|
||||
export type V1_WebSocketConnectionState =
|
||||
@ -38,6 +40,7 @@ export type V1_WebSocketConnectionState =
|
||||
interface ConversationWebSocketContextType {
|
||||
connectionState: V1_WebSocketConnectionState;
|
||||
sendMessage: (message: V1SendMessageRequest) => Promise<void>;
|
||||
isLoadingHistory: boolean;
|
||||
}
|
||||
|
||||
const ConversationWebSocketContext = createContext<
|
||||
@ -67,6 +70,13 @@ export function ConversationWebSocketProvider({
|
||||
const { setAgentStatus } = useV1ConversationStateStore();
|
||||
const { appendInput, appendOutput } = useCommandStore();
|
||||
|
||||
// History loading state
|
||||
const [isLoadingHistory, setIsLoadingHistory] = useState(true);
|
||||
const [expectedEventCount, setExpectedEventCount] = useState<number | null>(
|
||||
null,
|
||||
);
|
||||
const receivedEventCountRef = useRef(0);
|
||||
|
||||
// Build WebSocket URL from props
|
||||
// Only build URL if we have both conversationId and conversationUrl
|
||||
// This prevents connection attempts during task polling phase
|
||||
@ -78,16 +88,43 @@ export function ConversationWebSocketProvider({
|
||||
return buildWebSocketUrl(conversationId, conversationUrl);
|
||||
}, [conversationId, conversationUrl]);
|
||||
|
||||
// Reset hasConnected flag when conversation changes
|
||||
// Reset hasConnected flag and history loading state when conversation changes
|
||||
useEffect(() => {
|
||||
hasConnectedRef.current = false;
|
||||
setIsLoadingHistory(true);
|
||||
setExpectedEventCount(null);
|
||||
receivedEventCountRef.current = 0;
|
||||
}, [conversationId]);
|
||||
|
||||
// Check if we've received all events when expectedEventCount becomes available
|
||||
useEffect(() => {
|
||||
if (
|
||||
expectedEventCount !== null &&
|
||||
receivedEventCountRef.current >= expectedEventCount &&
|
||||
isLoadingHistory
|
||||
) {
|
||||
setIsLoadingHistory(false);
|
||||
}
|
||||
}, [expectedEventCount, isLoadingHistory]);
|
||||
|
||||
const handleMessage = useCallback(
|
||||
(messageEvent: MessageEvent) => {
|
||||
try {
|
||||
const event = JSON.parse(messageEvent.data);
|
||||
|
||||
// Track received events for history loading (count ALL events from WebSocket)
|
||||
// Always count when loading, even if we don't have the expected count yet
|
||||
if (isLoadingHistory) {
|
||||
receivedEventCountRef.current += 1;
|
||||
|
||||
if (
|
||||
expectedEventCount !== null &&
|
||||
receivedEventCountRef.current >= expectedEventCount
|
||||
) {
|
||||
setIsLoadingHistory(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Use type guard to validate v1 event structure
|
||||
if (isV1Event(event)) {
|
||||
addEvent(event);
|
||||
@ -141,6 +178,8 @@ export function ConversationWebSocketProvider({
|
||||
},
|
||||
[
|
||||
addEvent,
|
||||
isLoadingHistory,
|
||||
expectedEventCount,
|
||||
setErrorMessage,
|
||||
removeOptimisticUserMessage,
|
||||
queryClient,
|
||||
@ -164,10 +203,27 @@ export function ConversationWebSocketProvider({
|
||||
return {
|
||||
queryParams,
|
||||
reconnect: { enabled: true },
|
||||
onOpen: () => {
|
||||
onOpen: async () => {
|
||||
setConnectionState("OPEN");
|
||||
hasConnectedRef.current = true; // Mark that we've successfully connected
|
||||
removeErrorMessage(); // Clear any previous error messages on successful connection
|
||||
|
||||
// Fetch expected event count for history loading detection
|
||||
if (conversationId) {
|
||||
try {
|
||||
const count =
|
||||
await V1ConversationService.getEventCount(conversationId);
|
||||
setExpectedEventCount(count);
|
||||
|
||||
// If no events expected, mark as loaded immediately
|
||||
if (count === 0) {
|
||||
setIsLoadingHistory(false);
|
||||
}
|
||||
} catch (error) {
|
||||
// Fall back to marking as loaded to avoid infinite loading state
|
||||
setIsLoadingHistory(false);
|
||||
}
|
||||
}
|
||||
},
|
||||
onClose: (event: CloseEvent) => {
|
||||
setConnectionState("CLOSED");
|
||||
@ -188,7 +244,13 @@ export function ConversationWebSocketProvider({
|
||||
},
|
||||
onMessage: handleMessage,
|
||||
};
|
||||
}, [handleMessage, setErrorMessage, removeErrorMessage, sessionApiKey]);
|
||||
}, [
|
||||
handleMessage,
|
||||
setErrorMessage,
|
||||
removeErrorMessage,
|
||||
sessionApiKey,
|
||||
conversationId,
|
||||
]);
|
||||
|
||||
// Only attempt WebSocket connection when we have a valid URL
|
||||
// This prevents connection attempts during task polling phase
|
||||
@ -246,8 +308,8 @@ export function ConversationWebSocketProvider({
|
||||
}, [socket, wsUrl]);
|
||||
|
||||
const contextValue = useMemo(
|
||||
() => ({ connectionState, sendMessage }),
|
||||
[connectionState, sendMessage],
|
||||
() => ({ connectionState, sendMessage, isLoadingHistory }),
|
||||
[connectionState, sendMessage, isLoadingHistory],
|
||||
);
|
||||
|
||||
return (
|
||||
|
||||
@ -18,11 +18,15 @@ export const getConversationVersionFromQueryCache = (
|
||||
};
|
||||
|
||||
/**
|
||||
* Fetches a V1 conversation's sandbox_id
|
||||
* Fetches a V1 conversation's sandbox_id and conversation_url
|
||||
*/
|
||||
const fetchV1ConversationSandboxId = async (
|
||||
const fetchV1ConversationData = async (
|
||||
conversationId: string,
|
||||
): Promise<string> => {
|
||||
): Promise<{
|
||||
sandboxId: string;
|
||||
conversationUrl: string | null;
|
||||
sessionApiKey: string | null;
|
||||
}> => {
|
||||
const conversations = await V1ConversationService.batchGetAppConversations([
|
||||
conversationId,
|
||||
]);
|
||||
@ -32,17 +36,34 @@ const fetchV1ConversationSandboxId = async (
|
||||
throw new Error(`V1 conversation not found: ${conversationId}`);
|
||||
}
|
||||
|
||||
return appConversation.sandbox_id;
|
||||
return {
|
||||
sandboxId: appConversation.sandbox_id,
|
||||
conversationUrl: appConversation.conversation_url,
|
||||
sessionApiKey: appConversation.session_api_key,
|
||||
};
|
||||
};
|
||||
|
||||
/**
|
||||
* Pause a V1 conversation sandbox by fetching the sandbox_id and pausing it
|
||||
*/
|
||||
export const pauseV1ConversationSandbox = async (conversationId: string) => {
|
||||
const sandboxId = await fetchV1ConversationSandboxId(conversationId);
|
||||
const { sandboxId } = await fetchV1ConversationData(conversationId);
|
||||
return V1ConversationService.pauseSandbox(sandboxId);
|
||||
};
|
||||
|
||||
/**
|
||||
* Pause a V1 conversation by fetching the conversation data and pausing it
|
||||
*/
|
||||
export const pauseV1Conversation = async (conversationId: string) => {
|
||||
const { conversationUrl, sessionApiKey } =
|
||||
await fetchV1ConversationData(conversationId);
|
||||
return V1ConversationService.pauseConversation(
|
||||
conversationId,
|
||||
conversationUrl,
|
||||
sessionApiKey,
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Stops a V0 conversation using the legacy API
|
||||
*/
|
||||
@ -53,10 +74,23 @@ export const stopV0Conversation = async (conversationId: string) =>
|
||||
* Resumes a V1 conversation sandbox by fetching the sandbox_id and resuming it
|
||||
*/
|
||||
export const resumeV1ConversationSandbox = async (conversationId: string) => {
|
||||
const sandboxId = await fetchV1ConversationSandboxId(conversationId);
|
||||
const { sandboxId } = await fetchV1ConversationData(conversationId);
|
||||
return V1ConversationService.resumeSandbox(sandboxId);
|
||||
};
|
||||
|
||||
/**
|
||||
* Resume a V1 conversation by fetching the conversation data and resuming it
|
||||
*/
|
||||
export const resumeV1Conversation = async (conversationId: string) => {
|
||||
const { conversationUrl, sessionApiKey } =
|
||||
await fetchV1ConversationData(conversationId);
|
||||
return V1ConversationService.resumeConversation(
|
||||
conversationId,
|
||||
conversationUrl,
|
||||
sessionApiKey,
|
||||
);
|
||||
};
|
||||
|
||||
/**
|
||||
* Starts a V0 conversation using the legacy API
|
||||
*/
|
||||
|
||||
@ -45,7 +45,7 @@ export const useCreateConversation = () => {
|
||||
createMicroagent,
|
||||
} = variables;
|
||||
|
||||
const useV1 = USE_V1_CONVERSATION_API();
|
||||
const useV1 = USE_V1_CONVERSATION_API() && !createMicroagent;
|
||||
|
||||
if (useV1) {
|
||||
// Use V1 API - creates a conversation start task
|
||||
|
||||
32
frontend/src/hooks/mutation/use-respond-to-confirmation.ts
Normal file
32
frontend/src/hooks/mutation/use-respond-to-confirmation.ts
Normal file
@ -0,0 +1,32 @@
|
||||
import { useMutation } from "@tanstack/react-query";
|
||||
import EventService from "#/api/event-service/event-service.api";
|
||||
import type { ConfirmationResponseRequest } from "#/api/event-service/event-service.types";
|
||||
|
||||
interface UseRespondToConfirmationVariables {
|
||||
conversationId: string;
|
||||
conversationUrl: string;
|
||||
sessionApiKey?: string | null;
|
||||
accept: boolean;
|
||||
}
|
||||
|
||||
export const useRespondToConfirmation = () =>
|
||||
useMutation({
|
||||
mutationKey: ["respond-to-confirmation"],
|
||||
mutationFn: async ({
|
||||
conversationId,
|
||||
conversationUrl,
|
||||
sessionApiKey,
|
||||
accept,
|
||||
}: UseRespondToConfirmationVariables) => {
|
||||
const request: ConfirmationResponseRequest = {
|
||||
accept,
|
||||
};
|
||||
|
||||
return EventService.respondToConfirmation(
|
||||
conversationId,
|
||||
conversationUrl,
|
||||
request,
|
||||
sessionApiKey,
|
||||
);
|
||||
},
|
||||
});
|
||||
@ -26,6 +26,13 @@ export const useUpdateConversation = () => {
|
||||
),
|
||||
);
|
||||
|
||||
// Also optimistically update the active conversation query
|
||||
queryClient.setQueryData(
|
||||
["user", "conversation", variables.conversationId],
|
||||
(old: { title: string } | undefined) =>
|
||||
old ? { ...old, title: variables.newTitle } : old,
|
||||
);
|
||||
|
||||
return { previousConversations };
|
||||
},
|
||||
onError: (err, variables, context) => {
|
||||
|
||||
40
frontend/src/hooks/mutation/use-v1-pause-conversation.ts
Normal file
40
frontend/src/hooks/mutation/use-v1-pause-conversation.ts
Normal file
@ -0,0 +1,40 @@
|
||||
import { useMutation, useQueryClient } from "@tanstack/react-query";
|
||||
import { pauseV1Conversation } from "./conversation-mutation-utils";
|
||||
|
||||
export const useV1PauseConversation = () => {
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
return useMutation({
|
||||
mutationFn: (variables: { conversationId: string }) =>
|
||||
pauseV1Conversation(variables.conversationId),
|
||||
onMutate: async () => {
|
||||
await queryClient.cancelQueries({ queryKey: ["user", "conversations"] });
|
||||
const previousConversations = queryClient.getQueryData([
|
||||
"user",
|
||||
"conversations",
|
||||
]);
|
||||
|
||||
return { previousConversations };
|
||||
},
|
||||
onError: (_, __, context) => {
|
||||
if (context?.previousConversations) {
|
||||
queryClient.setQueryData(
|
||||
["user", "conversations"],
|
||||
context.previousConversations,
|
||||
);
|
||||
}
|
||||
},
|
||||
onSettled: (_, __, variables) => {
|
||||
// Invalidate the specific conversation query to trigger automatic refetch
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["user", "conversation", variables.conversationId],
|
||||
});
|
||||
// Also invalidate the conversations list for consistency
|
||||
queryClient.invalidateQueries({ queryKey: ["user", "conversations"] });
|
||||
// Invalidate V1 batch get queries
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["v1-batch-get-app-conversations"],
|
||||
});
|
||||
},
|
||||
});
|
||||
};
|
||||
40
frontend/src/hooks/mutation/use-v1-resume-conversation.ts
Normal file
40
frontend/src/hooks/mutation/use-v1-resume-conversation.ts
Normal file
@ -0,0 +1,40 @@
|
||||
import { useMutation, useQueryClient } from "@tanstack/react-query";
|
||||
import { resumeV1Conversation } from "./conversation-mutation-utils";
|
||||
|
||||
export const useV1ResumeConversation = () => {
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
return useMutation({
|
||||
mutationFn: (variables: { conversationId: string }) =>
|
||||
resumeV1Conversation(variables.conversationId),
|
||||
onMutate: async () => {
|
||||
await queryClient.cancelQueries({ queryKey: ["user", "conversations"] });
|
||||
const previousConversations = queryClient.getQueryData([
|
||||
"user",
|
||||
"conversations",
|
||||
]);
|
||||
|
||||
return { previousConversations };
|
||||
},
|
||||
onError: (_, __, context) => {
|
||||
if (context?.previousConversations) {
|
||||
queryClient.setQueryData(
|
||||
["user", "conversations"],
|
||||
context.previousConversations,
|
||||
);
|
||||
}
|
||||
},
|
||||
onSettled: (_, __, variables) => {
|
||||
// Invalidate the specific conversation query to trigger automatic refetch
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["user", "conversation", variables.conversationId],
|
||||
});
|
||||
// Also invalidate the conversations list for consistency
|
||||
queryClient.invalidateQueries({ queryKey: ["user", "conversations"] });
|
||||
// Invalidate V1 batch get queries
|
||||
queryClient.invalidateQueries({
|
||||
queryKey: ["v1-batch-get-app-conversations"],
|
||||
});
|
||||
},
|
||||
});
|
||||
};
|
||||
11
frontend/src/hooks/query/use-batch-app-conversations.ts
Normal file
11
frontend/src/hooks/query/use-batch-app-conversations.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import { useQuery } from "@tanstack/react-query";
|
||||
import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
|
||||
|
||||
export const useBatchAppConversations = (ids: string[]) =>
|
||||
useQuery({
|
||||
queryKey: ["v1-batch-get-app-conversations", ids],
|
||||
queryFn: () => V1ConversationService.batchGetAppConversations(ids),
|
||||
enabled: ids.length > 0,
|
||||
staleTime: 1000 * 60 * 5, // 5 minutes
|
||||
gcTime: 1000 * 60 * 15, // 15 minutes
|
||||
});
|
||||
11
frontend/src/hooks/query/use-batch-sandboxes.ts
Normal file
11
frontend/src/hooks/query/use-batch-sandboxes.ts
Normal file
@ -0,0 +1,11 @@
|
||||
import { useQuery } from "@tanstack/react-query";
|
||||
import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
|
||||
|
||||
export const useBatchSandboxes = (ids: string[]) =>
|
||||
useQuery({
|
||||
queryKey: ["sandboxes", "batch", ids],
|
||||
queryFn: () => V1ConversationService.batchGetSandboxes(ids),
|
||||
enabled: ids.length > 0,
|
||||
staleTime: 1000 * 60 * 5, // 5 minutes
|
||||
gcTime: 1000 * 60 * 15, // 15 minutes
|
||||
});
|
||||
@ -2,14 +2,20 @@ import { useQuery } from "@tanstack/react-query";
|
||||
import React from "react";
|
||||
import { useConversationId } from "#/hooks/use-conversation-id";
|
||||
import ConversationService from "#/api/conversation-service/conversation-service.api";
|
||||
import V1ConversationService from "#/api/conversation-service/v1-conversation-service.api";
|
||||
import { useRuntimeIsReady } from "../use-runtime-is-ready";
|
||||
import { useActiveConversation } from "./use-active-conversation";
|
||||
|
||||
export const useConversationConfig = () => {
|
||||
/**
|
||||
* @deprecated This hook is for V0 conversations only. Use useUnifiedConversationConfig instead,
|
||||
* or useV1ConversationConfig once we fully migrate to V1.
|
||||
*/
|
||||
export const useV0ConversationConfig = () => {
|
||||
const { conversationId } = useConversationId();
|
||||
const runtimeIsReady = useRuntimeIsReady();
|
||||
|
||||
const query = useQuery({
|
||||
queryKey: ["conversation_config", conversationId],
|
||||
queryKey: ["v0_conversation_config", conversationId],
|
||||
queryFn: () => {
|
||||
if (!conversationId) throw new Error("No conversation ID");
|
||||
return ConversationService.getRuntimeId(conversationId);
|
||||
@ -34,3 +40,80 @@ export const useConversationConfig = () => {
|
||||
|
||||
return query;
|
||||
};
|
||||
|
||||
export const useV1ConversationConfig = () => {
|
||||
const { conversationId } = useConversationId();
|
||||
const runtimeIsReady = useRuntimeIsReady();
|
||||
|
||||
const query = useQuery({
|
||||
queryKey: ["v1_conversation_config", conversationId],
|
||||
queryFn: () => {
|
||||
if (!conversationId) throw new Error("No conversation ID");
|
||||
return V1ConversationService.getConversationConfig(conversationId);
|
||||
},
|
||||
enabled: runtimeIsReady && !!conversationId,
|
||||
staleTime: 1000 * 60 * 5, // 5 minutes
|
||||
gcTime: 1000 * 60 * 15, // 15 minutes
|
||||
});
|
||||
|
||||
React.useEffect(() => {
|
||||
if (query.data) {
|
||||
const { runtime_id: runtimeId } = query.data;
|
||||
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(
|
||||
"Runtime ID: %c%s",
|
||||
"background: #444; color: #ffeb3b; font-weight: bold; padding: 2px 4px; border-radius: 4px;",
|
||||
runtimeId,
|
||||
);
|
||||
}
|
||||
}, [query.data]);
|
||||
|
||||
return query;
|
||||
};
|
||||
|
||||
/**
|
||||
* Unified hook that switches between V0 and V1 conversation config endpoints based on conversation version.
|
||||
*
|
||||
* @temporary This hook is temporary during the V0 to V1 migration period.
|
||||
* Once we fully migrate to V1, all code should use useV1ConversationConfig directly.
|
||||
*/
|
||||
export const useUnifiedConversationConfig = () => {
|
||||
const { conversationId } = useConversationId();
|
||||
const { data: conversation } = useActiveConversation();
|
||||
const runtimeIsReady = useRuntimeIsReady();
|
||||
const isV1Conversation = conversation?.conversation_version === "V1";
|
||||
|
||||
const query = useQuery({
|
||||
queryKey: ["conversation_config", conversationId, isV1Conversation],
|
||||
queryFn: () => {
|
||||
if (!conversationId) throw new Error("No conversation ID");
|
||||
|
||||
if (isV1Conversation) {
|
||||
return V1ConversationService.getConversationConfig(conversationId);
|
||||
}
|
||||
return ConversationService.getRuntimeId(conversationId);
|
||||
},
|
||||
enabled: runtimeIsReady && !!conversationId && conversation !== undefined,
|
||||
staleTime: 1000 * 60 * 5, // 5 minutes
|
||||
gcTime: 1000 * 60 * 15, // 15 minutes
|
||||
});
|
||||
|
||||
React.useEffect(() => {
|
||||
if (query.data) {
|
||||
const { runtime_id: runtimeId } = query.data;
|
||||
|
||||
// eslint-disable-next-line no-console
|
||||
console.log(
|
||||
"Runtime ID: %c%s",
|
||||
"background: #444; color: #ffeb3b; font-weight: bold; padding: 2px 4px; border-radius: 4px;",
|
||||
runtimeId,
|
||||
);
|
||||
}
|
||||
}, [query.data]);
|
||||
|
||||
return query;
|
||||
};
|
||||
|
||||
// Keep the old export name for backward compatibility (uses unified approach)
|
||||
export const useConversationConfig = useUnifiedConversationConfig;
|
||||
|
||||
@ -1,14 +1,29 @@
|
||||
import { useInfiniteQuery } from "@tanstack/react-query";
|
||||
import { useInfiniteQuery, useQueryClient } from "@tanstack/react-query";
|
||||
import ConversationService from "#/api/conversation-service/conversation-service.api";
|
||||
import { useIsAuthed } from "./use-is-authed";
|
||||
|
||||
export const usePaginatedConversations = (limit: number = 20) => {
|
||||
const { data: userIsAuthenticated } = useIsAuthed();
|
||||
const queryClient = useQueryClient();
|
||||
|
||||
return useInfiniteQuery({
|
||||
queryKey: ["user", "conversations", "paginated", limit],
|
||||
queryFn: ({ pageParam }) =>
|
||||
ConversationService.getUserConversations(limit, pageParam),
|
||||
queryFn: async ({ pageParam }) => {
|
||||
const result = await ConversationService.getUserConversations(
|
||||
limit,
|
||||
pageParam,
|
||||
);
|
||||
|
||||
// Optimistically populate individual conversation caches
|
||||
result.results.forEach((conversation) => {
|
||||
queryClient.setQueryData(
|
||||
["user", "conversation", conversation.conversation_id],
|
||||
conversation,
|
||||
);
|
||||
});
|
||||
|
||||
return result;
|
||||
},
|
||||
enabled: !!userIsAuthenticated,
|
||||
getNextPageParam: (lastPage) => lastPage.next_page_id,
|
||||
initialPageParam: undefined as string | undefined,
|
||||
|
||||
99
frontend/src/hooks/query/use-unified-active-host.ts
Normal file
99
frontend/src/hooks/query/use-unified-active-host.ts
Normal file
@ -0,0 +1,99 @@
|
||||
import { useQueries, useQuery } from "@tanstack/react-query";
|
||||
import axios from "axios";
|
||||
import React from "react";
|
||||
import ConversationService from "#/api/conversation-service/conversation-service.api";
|
||||
import { useConversationId } from "#/hooks/use-conversation-id";
|
||||
import { useRuntimeIsReady } from "#/hooks/use-runtime-is-ready";
|
||||
import { useActiveConversation } from "#/hooks/query/use-active-conversation";
|
||||
import { useBatchSandboxes } from "./use-batch-sandboxes";
|
||||
import { useConversationConfig } from "./use-conversation-config";
|
||||
|
||||
/**
|
||||
* Unified hook to get active web host for both legacy (V0) and V1 conversations
|
||||
* - V0: Uses the legacy getWebHosts API endpoint and polls them
|
||||
* - V1: Gets worker URLs from sandbox exposed_urls (WORKER_1, WORKER_2, etc.)
|
||||
*/
|
||||
export const useUnifiedActiveHost = () => {
|
||||
const [activeHost, setActiveHost] = React.useState<string | null>(null);
|
||||
const { conversationId } = useConversationId();
|
||||
const runtimeIsReady = useRuntimeIsReady();
|
||||
const { data: conversation } = useActiveConversation();
|
||||
const { data: conversationConfig, isLoading: isLoadingConfig } =
|
||||
useConversationConfig();
|
||||
|
||||
const isV1Conversation = conversation?.conversation_version === "V1";
|
||||
const sandboxId = conversationConfig?.runtime_id;
|
||||
|
||||
// Fetch sandbox data for V1 conversations
|
||||
const sandboxesQuery = useBatchSandboxes(sandboxId ? [sandboxId] : []);
|
||||
|
||||
// Get worker URLs from V1 sandbox or legacy web hosts from V0
|
||||
const { data, isLoading: hostsQueryLoading } = useQuery({
|
||||
queryKey: [conversationId, "unified", "hosts", isV1Conversation, sandboxId],
|
||||
queryFn: async () => {
|
||||
// V1: Get worker URLs from sandbox exposed_urls
|
||||
if (isV1Conversation) {
|
||||
if (
|
||||
!sandboxesQuery.data ||
|
||||
sandboxesQuery.data.length === 0 ||
|
||||
!sandboxesQuery.data[0]
|
||||
) {
|
||||
return { hosts: [] };
|
||||
}
|
||||
|
||||
const sandbox = sandboxesQuery.data[0];
|
||||
const workerUrls =
|
||||
sandbox.exposed_urls
|
||||
?.filter((url) => url.name.startsWith("WORKER_"))
|
||||
.map((url) => url.url) || [];
|
||||
|
||||
return { hosts: workerUrls };
|
||||
}
|
||||
|
||||
// V0 (Legacy): Use the legacy API endpoint
|
||||
const hosts = await ConversationService.getWebHosts(conversationId);
|
||||
return { hosts };
|
||||
},
|
||||
enabled:
|
||||
runtimeIsReady &&
|
||||
!!conversationId &&
|
||||
(!isV1Conversation || !!sandboxesQuery.data),
|
||||
initialData: { hosts: [] },
|
||||
meta: {
|
||||
disableToast: true,
|
||||
},
|
||||
});
|
||||
|
||||
// Poll all hosts to find which one is active
|
||||
const apps = useQueries({
|
||||
queries: data.hosts.map((host) => ({
|
||||
queryKey: [conversationId, "unified", "hosts", host],
|
||||
queryFn: async () => {
|
||||
try {
|
||||
await axios.get(host);
|
||||
return host;
|
||||
} catch (e) {
|
||||
return "";
|
||||
}
|
||||
},
|
||||
refetchInterval: 3000,
|
||||
meta: {
|
||||
disableToast: true,
|
||||
},
|
||||
})),
|
||||
});
|
||||
|
||||
const appsData = apps.map((app) => app.data);
|
||||
|
||||
React.useEffect(() => {
|
||||
const successfulApp = appsData.find((app) => app);
|
||||
setActiveHost(successfulApp || "");
|
||||
}, [appsData]);
|
||||
|
||||
// Calculate overall loading state including dependent queries for V1
|
||||
const isLoading = isV1Conversation
|
||||
? isLoadingConfig || sandboxesQuery.isLoading || hostsQueryLoading
|
||||
: hostsQueryLoading;
|
||||
|
||||
return { activeHost, isLoading };
|
||||
};
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user