Integrate OpenHands resolver into main repository (#4964)

Co-authored-by: openhands <openhands@all-hands.dev>
Co-authored-by: Rohit Malhotra <rohitvinodmalhotra@gmail.com>
This commit is contained in:
Graham Neubig 2024-11-14 09:45:46 -05:00 committed by GitHub
parent 38dc41ca42
commit a753babb7a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
43 changed files with 8094 additions and 9 deletions

View File

@ -1,15 +1,265 @@
name: Resolve Issues with OpenHands
name: Auto-Fix Tagged Issue with OpenHands
on:
workflow_call:
inputs:
max_iterations:
required: false
type: number
default: 50
macro:
required: false
type: string
default: "@openhands-agent"
secrets:
LLM_MODEL:
required: true
LLM_API_KEY:
required: true
LLM_BASE_URL:
required: false
PAT_TOKEN:
required: true
PAT_USERNAME:
required: true
issues:
types: [labeled]
pull_request:
types: [labeled]
issue_comment:
types: [created]
pull_request_review_comment:
types: [created]
pull_request_review:
types: [submitted]
permissions:
contents: write
pull-requests: write
issues: write
jobs:
call-openhands-resolver:
uses: All-Hands-AI/openhands-resolver/.github/workflows/openhands-resolver.yml@main
if: github.event.label.name == 'fix-me'
with:
max_iterations: 50
secrets: inherit
auto-fix:
if: |
github.event_name == 'workflow_call' ||
github.event.label.name == 'fix-me' ||
github.event.label.name == 'fix-me-experimental' ||
(
((github.event_name == 'issue_comment' || github.event_name == 'pull_request_review_comment') &&
startsWith(github.event.comment.body, inputs.macro || '@openhands-agent') &&
(github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER')
) ||
(github.event_name == 'pull_request_review' &&
startsWith(github.event.review.body, inputs.macro || '@openhands-agent') &&
(github.event.review.author_association == 'OWNER' || github.event.review.author_association == 'COLLABORATOR' || github.event.review.author_association == 'MEMBER')
)
)
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v4
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.12"
- name: Get latest versions and create requirements.txt
run: |
python -m pip index versions openhands-ai > openhands_versions.txt
OPENHANDS_VERSION=$(head -n 1 openhands_versions.txt | awk '{print $2}' | tr -d '()')
echo "openhands-resolver==${OPENHANDS_VERSION}" >> requirements.txt
cat requirements.txt
- name: Cache pip dependencies
if: github.event.label.name != 'fix-me-experimental'
uses: actions/cache@v3
with:
path: ${{ env.pythonLocation }}/lib/python3.12/site-packages/*
key: ${{ runner.os }}-pip-openhands-resolver-${{ hashFiles('requirements.txt') }}
restore-keys: |
${{ runner.os }}-pip-openhands-resolver-${{ hashFiles('requirements.txt') }}
- name: Check required environment variables
env:
LLM_MODEL: ${{ secrets.LLM_MODEL }}
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }}
PAT_TOKEN: ${{ secrets.PAT_TOKEN }}
PAT_USERNAME: ${{ secrets.PAT_USERNAME }}
run: |
required_vars=("LLM_MODEL" "LLM_API_KEY" "PAT_TOKEN" "PAT_USERNAME")
for var in "${required_vars[@]}"; do
if [ -z "${!var}" ]; then
echo "Error: Required environment variable $var is not set."
exit 1
fi
done
- name: Set environment variables
run: |
if [ -n "${{ github.event.review.body }}" ]; then
echo "ISSUE_NUMBER=${{ github.event.pull_request.number }}" >> $GITHUB_ENV
echo "ISSUE_TYPE=pr" >> $GITHUB_ENV
elif [ -n "${{ github.event.issue.pull_request }}" ]; then
echo "ISSUE_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV
echo "ISSUE_TYPE=pr" >> $GITHUB_ENV
elif [ -n "${{ github.event.pull_request.number }}" ]; then
echo "ISSUE_NUMBER=${{ github.event.pull_request.number }}" >> $GITHUB_ENV
echo "ISSUE_TYPE=pr" >> $GITHUB_ENV
else
echo "ISSUE_NUMBER=${{ github.event.issue.number }}" >> $GITHUB_ENV
echo "ISSUE_TYPE=issue" >> $GITHUB_ENV
fi
if [ -n "${{ github.event.review.body }}" ]; then
echo "COMMENT_ID=${{ github.event.review.id || 'None' }}" >> $GITHUB_ENV
else
echo "COMMENT_ID=${{ github.event.comment.id || 'None' }}" >> $GITHUB_ENV
fi
echo "MAX_ITERATIONS=${{ inputs.max_iterations || 50 }}" >> $GITHUB_ENV
echo "SANDBOX_ENV_GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" >> $GITHUB_ENV
- name: Comment on issue with start message
uses: actions/github-script@v7
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
const issueType = process.env.ISSUE_TYPE;
github.rest.issues.createComment({
issue_number: ${{ env.ISSUE_NUMBER }},
owner: context.repo.owner,
repo: context.repo.repo,
body: `OpenHands started fixing the ${issueType}! You can monitor the progress [here](https://github.com/${context.repo.owner}/${context.repo.repo}/actions/runs/${context.runId}).`
});
- name: Install OpenHands
run: |
if [ "${{ github.event.label.name }}" == "fix-me-experimental" ]; then
python -m pip install --upgrade pip
pip install git+https://github.com/all-hands-ai/openhands.git
else
python -m pip install --upgrade -r requirements.txt
fi
- name: Attempt to resolve issue
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
LLM_MODEL: ${{ secrets.LLM_MODEL }}
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }}
PYTHONPATH: ""
run: |
cd /tmp && python -m openhands.resolver.resolve_issue \
--repo ${{ github.repository }} \
--issue-number ${{ env.ISSUE_NUMBER }} \
--issue-type ${{ env.ISSUE_TYPE }} \
--max-iterations ${{ env.MAX_ITERATIONS }} \
--comment-id ${{ env.COMMENT_ID }}
- name: Check resolution result
id: check_result
run: |
if cd /tmp && grep -q '"success":true' output/output.jsonl; then
echo "RESOLUTION_SUCCESS=true" >> $GITHUB_OUTPUT
else
echo "RESOLUTION_SUCCESS=false" >> $GITHUB_OUTPUT
fi
- name: Upload output.jsonl as artifact
uses: actions/upload-artifact@v4
if: always() # Upload even if the previous steps fail
with:
name: resolver-output
path: /tmp/output/output.jsonl
retention-days: 30 # Keep the artifact for 30 days
- name: Create draft PR or push branch
env:
GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }}
LLM_MODEL: ${{ secrets.LLM_MODEL }}
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }}
PYTHONPATH: ""
run: |
if [ "${{ steps.check_result.outputs.RESOLUTION_SUCCESS }}" == "true" ]; then
cd /tmp && python -m openhands.resolver.send_pull_request \
--issue-number ${{ env.ISSUE_NUMBER }} \
--pr-type draft | tee pr_result.txt && \
grep "draft created" pr_result.txt | sed 's/.*\///g' > pr_number.txt
else
cd /tmp && python -m openhands.resolver.send_pull_request \
--issue-number ${{ env.ISSUE_NUMBER }} \
--pr-type branch \
--send-on-failure | tee branch_result.txt && \
grep "branch created" branch_result.txt | sed 's/.*\///g; s/.expand=1//g' > branch_name.txt
fi
- name: Comment on issue
uses: actions/github-script@v7
with:
github-token: ${{secrets.GITHUB_TOKEN}}
script: |
const fs = require('fs');
const issueNumber = ${{ env.ISSUE_NUMBER }};
const success = ${{ steps.check_result.outputs.RESOLUTION_SUCCESS }};
let prNumber = '';
let branchName = '';
let logContent = '';
const noChangesMessage = `No changes to commit for issue #${issueNumber}. Skipping commit.`;
try {
if (success){
logContent = fs.readFileSync('/tmp/pr_result.txt', 'utf8').trim();
} else {
logContent = fs.readFileSync('/tmp/branch_result.txt', 'utf8').trim();
}
} catch (error) {
console.error('Error reading results file:', error);
}
try {
if (success) {
prNumber = fs.readFileSync('/tmp/pr_number.txt', 'utf8').trim();
} else {
branchName = fs.readFileSync('/tmp/branch_name.txt', 'utf8').trim();
}
} catch (error) {
console.error('Error reading file:', error);
}
if (logContent.includes(noChangesMessage)) {
github.rest.issues.createComment({
issue_number: issueNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body: `The workflow to fix this issue encountered an error. Openhands failed to create any code changes.`
});
} else if (success && prNumber) {
github.rest.issues.createComment({
issue_number: issueNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body: `A potential fix has been generated and a draft PR #${prNumber} has been created. Please review the changes.`
});
} else if (!success && branchName) {
github.rest.issues.createComment({
issue_number: issueNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body: `An attempt was made to automatically fix this issue, but it was unsuccessful. A branch named '${branchName}' has been created with the attempted changes. You can view the branch [here](https://github.com/${context.repo.owner}/${context.repo.repo}/tree/${branchName}). Manual intervention may be required.`
});
} else {
github.rest.issues.createComment({
issue_number: issueNumber,
owner: context.repo.owner,
repo: context.repo.repo,
body: `The workflow to fix this issue encountered an error. Please check the workflow logs for more information.`
});
}

3
.gitignore vendored
View File

@ -176,6 +176,9 @@ evaluation/gorilla/data
evaluation/toolqa/data
evaluation/scienceagentbench/benchmark
# openhands resolver
output/
# frontend
# dependencies

View File

@ -35,8 +35,8 @@ class FakeUserResponseFunc(Protocol):
def __call__(
self,
state: State,
encapsulate_solution: bool = ...,
try_parse: Callable[[Action], str] = ...,
encapsulate_solution: bool = False,
try_parse: Callable[[Action | None], str] | None = None,
) -> str: ...

View File

@ -0,0 +1,182 @@
# OpenHands Github Issue Resolver 🙌
Need help resolving a GitHub issue but don't have the time to do it yourself? Let an AI agent help you out!
This tool allows you to use open-source AI agents based on [OpenHands](https://github.com/all-hands-ai/openhands)
to attempt to resolve GitHub issues automatically. While it can handle multiple issues, it's primarily designed
to help you resolve one issue at a time with high quality.
Getting started is simple - just follow the instructions below.
## Using the GitHub Actions Workflow
This repository includes a GitHub Actions workflow that can automatically attempt to fix individual issues labeled with 'fix-me'.
Follow these steps to use this workflow in your own repository:
1. [Create a personal access token](https://github.com/settings/tokens?type=beta) with read/write scope for "contents", "issues", "pull requests", and "workflows"
2. Create an API key for the [Claude API](https://www.anthropic.com/api) (recommended) or another supported LLM service
3. Copy `examples/openhands-resolver.yml` to your repository's `.github/workflows/` directory
4. Configure repository permissions:
- Go to `Settings -> Actions -> General -> Workflow permissions`
- Select "Read and write permissions"
- Enable "Allow Github Actions to create and approve pull requests"
Note: If the "Read and write permissions" option is greyed out:
- First check if permissions need to be set at the organization level
- If still greyed out at the organization level, permissions need to be set in the [Enterprise policy settings](https://docs.github.com/en/enterprise-cloud@latest/admin/enforcing-policies/enforcing-policies-for-your-enterprise/enforcing-policies-for-github-actions-in-your-enterprise#enforcing-a-policy-for-workflow-permissions-in-your-enterprise)
5. Set up [GitHub secrets](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions):
- Required:
- `PAT_USERNAME`: GitHub username for the personal access token
- `PAT_TOKEN`: The personal access token
- `LLM_MODEL`: LLM model to use (e.g., "anthropic/claude-3-5-sonnet-20241022")
- `LLM_API_KEY`: Your LLM API key
- Optional:
- `LLM_BASE_URL`: Base URL for LLM API (only if using a proxy)
Note: You can set these secrets at the organization level to use across multiple repositories.
6. Usage:
There are two ways to trigger the OpenHands agent:
a. Using the 'fix-me' label:
- Add the 'fix-me' label to any issue you want the AI to resolve
- The agent will consider all comments in the issue thread when resolving
- The workflow will:
1. Attempt to resolve the issue using OpenHands
2. Create a draft PR if successful, or push a branch if unsuccessful
3. Comment on the issue with the results
4. Remove the 'fix-me' label once processed
b. Using `@openhands-agent` mention:
- Create a new comment containing `@openhands-agent` in any issue
- The agent will only consider the comment where it's mentioned
- The workflow will:
1. Attempt to resolve the issue based on the specific comment
2. Create a draft PR if successful, or push a branch if unsuccessful
3. Comment on the issue with the results
Need help? Feel free to [open an issue](https://github.com/all-hands-ai/openhands-resolver/issues) or email us at [contact@all-hands.dev](mailto:contact@all-hands.dev).
## Manual Installation
If you prefer to run the resolver programmatically instead of using GitHub Actions, follow these steps:
1. Install the package:
```bash
pip install openhands-ai
```
2. Create a GitHub access token:
- Visit [GitHub's token settings](https://github.com/settings/personal-access-tokens/new)
- Create a fine-grained token with these scopes:
- "Content"
- "Pull requests"
- "Issues"
- "Workflows"
- If you don't have push access to the target repo, you can fork it first
3. Set up environment variables:
```bash
# GitHub credentials
export GITHUB_TOKEN="your-github-token"
export GITHUB_USERNAME="your-github-username" # Optional, defaults to token owner
# LLM configuration
export LLM_MODEL="anthropic/claude-3-5-sonnet-20241022" # Recommended
export LLM_API_KEY="your-llm-api-key"
export LLM_BASE_URL="your-api-url" # Optional, for API proxies
```
Note: OpenHands works best with powerful models like Anthropic's Claude or OpenAI's GPT-4. While other models are supported, they may not perform as well for complex issue resolution.
## Resolving Issues
The resolver can automatically attempt to fix a single issue in your repository using the following command:
```bash
python -m openhands.resolver.resolve_issue --repo [OWNER]/[REPO] --issue-number [NUMBER]
```
For instance, if you want to resolve issue #100 in this repo, you would run:
```bash
python -m openhands.resolver.resolve_issue --repo all-hands-ai/openhands-resolver --issue-number 100
```
The output will be written to the `output/` directory.
If you've installed the package from source using poetry, you can use:
```bash
poetry run python openhands/resolver/resolve_issue.py --repo all-hands-ai/openhands-resolver --issue-number 100
```
For resolving multiple issues at once (e.g., in a batch process), you can use the `resolve_all_issues` command:
```bash
python -m openhands.resolver.resolve_all_issues --repo [OWNER]/[REPO] --issue-numbers [NUMBERS]
```
For example:
```bash
python -m openhands.resolver.resolve_all_issues --repo all-hands-ai/openhands-resolver --issue-numbers 100,101,102
```
## Responding to PR Comments
The resolver can also respond to comments on pull requests using:
```bash
python -m openhands.resolver.send_pull_request --issue-number PR_NUMBER --issue-type pr
```
This functionality is available both through the GitHub Actions workflow and when running the resolver locally.
## Visualizing successful PRs
To find successful PRs, you can run the following command:
```bash
grep '"success":true' output/output.jsonl | sed 's/.*\("number":[0-9]*\).*/\1/g'
```
Then you can go through and visualize the ones you'd like.
```bash
python -m openhands.resolver.visualize_resolver_output --issue-number ISSUE_NUMBER --vis-method json
```
## Uploading PRs
If you find any PRs that were successful, you can upload them.
There are three ways you can upload:
1. `branch` - upload a branch without creating a PR
2. `draft` - create a draft PR
3. `ready` - create a non-draft PR that's ready for review
```bash
python -m openhands.resolver.send_pull_request --issue-number ISSUE_NUMBER --github-username YOUR_GITHUB_USERNAME --pr-type draft
```
If you want to upload to a fork, you can do so by specifying the `fork-owner`:
```bash
python -m openhands.resolver.send_pull_request --issue-number ISSUE_NUMBER --github-username YOUR_GITHUB_USERNAME --pr-type draft --fork-owner YOUR_GITHUB_USERNAME
```
## Providing Custom Instructions
You can customize how the AI agent approaches issue resolution by adding a `.openhands_instructions` file to the root of your repository. If present, this file's contents will be injected into the prompt for openhands edits.
## Troubleshooting
If you have any issues, please open an issue on this github repo, we're happy to help!
Alternatively, you can [email us](mailto:contact@all-hands.dev) or join the [OpenHands Slack workspace](https://join.slack.com/t/opendevin/shared_invite/zt-2oikve2hu-UDxHeo8nsE69y6T7yFX_BA) and ask there.

View File

View File

@ -0,0 +1,34 @@
name: Resolve Issue with OpenHands
on:
issues:
types: [labeled]
pull_request:
types: [labeled]
issue_comment:
types: [created]
permissions:
contents: write
pull-requests: write
issues: write
jobs:
call-openhands-resolver:
if: |
${{
github.event.label.name == 'fix-me' ||
(github.event_name == 'issue_comment' &&
startsWith(github.event.comment.body, vars.OPENHANDS_MACRO || '@openhands-agent') &&
(github.event.comment.author_association == 'OWNER' || github.event.comment.author_association == 'COLLABORATOR' || github.event.comment.author_association == 'MEMBER'))
}}
uses: All-Hands-AI/OpenHands/.github/workflows/openhands-resolver.yml@main
with:
macro: ${{ vars.OPENHANDS_MACRO || '@openhands-agent' }}
max_iterations: 50
secrets:
PAT_TOKEN: ${{ secrets.PAT_TOKEN }}
PAT_USERNAME: ${{ secrets.PAT_USERNAME }}
LLM_MODEL: ${{ secrets.LLM_MODEL }}
LLM_API_KEY: ${{ secrets.LLM_API_KEY }}
LLM_BASE_URL: ${{ secrets.LLM_BASE_URL }}

View File

@ -0,0 +1,20 @@
from pydantic import BaseModel
class ReviewThread(BaseModel):
comment: str
files: list[str]
class GithubIssue(BaseModel):
owner: str
repo: str
number: int
title: str
body: str
thread_comments: list[str] | None = None # Added field for issue thread comments
closing_issues: list[str] | None = None
review_comments: list[str] | None = None
review_threads: list[ReviewThread] | None = None
thread_ids: list[str] | None = None
head_branch: str | None = None

View File

@ -0,0 +1,17 @@
import json
from typing import Iterable
from openhands.resolver.resolver_output import ResolverOutput
def load_all_resolver_outputs(output_jsonl: str) -> Iterable[ResolverOutput]:
with open(output_jsonl, 'r') as f:
for line in f:
yield ResolverOutput.model_validate(json.loads(line))
def load_single_resolver_output(output_jsonl: str, issue_number: int) -> ResolverOutput:
for resolver_output in load_all_resolver_outputs(output_jsonl):
if resolver_output.issue.number == issue_number:
return resolver_output
raise ValueError(f'Issue number {issue_number} not found in {output_jsonl}')

View File

@ -0,0 +1,728 @@
import json
import os
import re
from abc import ABC, abstractmethod
from typing import Any, ClassVar
import jinja2
import litellm
import requests
from openhands.core.config import LLMConfig
from openhands.core.logger import openhands_logger as logger
from openhands.events.event import Event
from openhands.resolver.github_issue import GithubIssue, ReviewThread
class IssueHandlerInterface(ABC):
issue_type: ClassVar[str]
@abstractmethod
def get_converted_issues(self, comment_id: int | None = None) -> list[GithubIssue]:
"""Download issues from GitHub."""
pass
@abstractmethod
def get_instruction(
self,
issue: GithubIssue,
prompt_template: str,
repo_instruction: str | None = None,
) -> tuple[str, list[str]]:
"""Generate instruction and image urls for the agent."""
pass
@abstractmethod
def guess_success(
self, issue: GithubIssue, history: list[Event], llm_config: LLMConfig
) -> tuple[bool, list[bool] | None, str]:
"""Guess if the issue has been resolved based on the agent's output."""
pass
class IssueHandler(IssueHandlerInterface):
issue_type: ClassVar[str] = 'issue'
def __init__(self, owner: str, repo: str, token: str):
self.download_url = 'https://api.github.com/repos/{}/{}/issues'
self.owner = owner
self.repo = repo
self.token = token
def _download_issues_from_github(self) -> list[Any]:
url = self.download_url.format(self.owner, self.repo)
headers = {
'Authorization': f'token {self.token}',
'Accept': 'application/vnd.github.v3+json',
}
params: dict[str, int | str] = {'state': 'open', 'per_page': 100, 'page': 1}
all_issues = []
while True:
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
issues = response.json()
if not issues:
break
if not isinstance(issues, list) or any(
[not isinstance(issue, dict) for issue in issues]
):
raise ValueError('Expected list of dictionaries from Github API.')
all_issues.extend(issues)
assert isinstance(params['page'], int)
params['page'] += 1
return all_issues
def _extract_image_urls(self, issue_body: str) -> list[str]:
# Regular expression to match Markdown image syntax ![alt text](image_url)
image_pattern = r'!\[.*?\]\((https?://[^\s)]+)\)'
return re.findall(image_pattern, issue_body)
def _extract_issue_references(self, body: str) -> list[int]:
pattern = r'#(\d+)'
return [int(match) for match in re.findall(pattern, body)]
def _get_issue_comments(
self, issue_number: int, comment_id: int | None = None
) -> list[str] | None:
"""Download comments for a specific issue from Github."""
url = f'https://api.github.com/repos/{self.owner}/{self.repo}/issues/{issue_number}/comments'
headers = {
'Authorization': f'token {self.token}',
'Accept': 'application/vnd.github.v3+json',
}
params = {'per_page': 100, 'page': 1}
all_comments = []
while True:
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
comments = response.json()
if not comments:
break
if comment_id:
matching_comment = next(
(
comment['body']
for comment in comments
if comment['id'] == comment_id
),
None,
)
if matching_comment:
return [matching_comment]
else:
all_comments.extend([comment['body'] for comment in comments])
params['page'] += 1
return all_comments if all_comments else None
def get_converted_issues(self, comment_id: int | None = None) -> list[GithubIssue]:
"""Download issues from Github.
Returns:
List of Github issues.
"""
all_issues = self._download_issues_from_github()
converted_issues = []
for issue in all_issues:
if any([issue.get(key) is None for key in ['number', 'title', 'body']]):
logger.warning(
f'Skipping issue {issue} as it is missing number, title, or body.'
)
continue
if 'pull_request' in issue:
continue
# Get issue thread comments
thread_comments = self._get_issue_comments(
issue['number'], comment_id=comment_id
)
# Convert empty lists to None for optional fields
issue_details = GithubIssue(
owner=self.owner,
repo=self.repo,
number=issue['number'],
title=issue['title'],
body=issue['body'],
thread_comments=thread_comments,
review_comments=None, # Initialize review comments as None for regular issues
)
converted_issues.append(issue_details)
return converted_issues
def get_instruction(
self,
issue: GithubIssue,
prompt_template: str,
repo_instruction: str | None = None,
) -> tuple[str, list[str]]:
"""Generate instruction for the agent."""
# Format thread comments if they exist
thread_context = ''
if issue.thread_comments:
thread_context = '\n\nIssue Thread Comments:\n' + '\n---\n'.join(
issue.thread_comments
)
images = []
images.extend(self._extract_image_urls(issue.body))
images.extend(self._extract_image_urls(thread_context))
template = jinja2.Template(prompt_template)
return (
template.render(
body=issue.title + '\n\n' + issue.body + thread_context,
repo_instruction=repo_instruction,
),
images,
)
def guess_success(
self, issue: GithubIssue, history: list[Event], llm_config: LLMConfig
) -> tuple[bool, None | list[bool], str]:
"""Guess if the issue is fixed based on the history and the issue description."""
last_message = history[-1].message
# Include thread comments in the prompt if they exist
issue_context = issue.body
if issue.thread_comments:
issue_context += '\n\nIssue Thread Comments:\n' + '\n---\n'.join(
issue.thread_comments
)
with open(
os.path.join(
os.path.dirname(__file__),
'prompts/guess_success/issue-success-check.jinja',
),
'r',
) as f:
template = jinja2.Template(f.read())
prompt = template.render(issue_context=issue_context, last_message=last_message)
response = litellm.completion(
model=llm_config.model,
messages=[{'role': 'user', 'content': prompt}],
api_key=llm_config.api_key,
base_url=llm_config.base_url,
)
answer = response.choices[0].message.content.strip()
pattern = r'--- success\n*(true|false)\n*--- explanation*\n((?:.|\n)*)'
match = re.search(pattern, answer)
if match:
return match.group(1).lower() == 'true', None, match.group(2)
return False, None, f'Failed to decode answer from LLM response: {answer}'
class PRHandler(IssueHandler):
issue_type: ClassVar[str] = 'pr'
def __init__(self, owner: str, repo: str, token: str):
super().__init__(owner, repo, token)
self.download_url = 'https://api.github.com/repos/{}/{}/pulls'
def __download_pr_metadata(
self, pull_number: int, comment_id: int | None = None
) -> tuple[list[str], list[int], list[str], list[ReviewThread], list[str]]:
"""Run a GraphQL query against the GitHub API for information.
Retrieves information about:
1. unresolved review comments
2. referenced issues the pull request would close
Args:
pull_number: The number of the pull request to query.
comment_id: Optional ID of a specific comment to focus on.
query: The GraphQL query as a string.
variables: A dictionary of variables for the query.
token: Your GitHub personal access token.
Returns:
The JSON response from the GitHub API.
"""
# Using graphql as REST API doesn't indicate resolved status for review comments
# TODO: grabbing the first 10 issues, 100 review threads, and 100 coments; add pagination to retrieve all
query = """
query($owner: String!, $repo: String!, $pr: Int!) {
repository(owner: $owner, name: $repo) {
pullRequest(number: $pr) {
closingIssuesReferences(first: 10) {
edges {
node {
body
number
}
}
}
url
reviews(first: 100) {
nodes {
body
state
fullDatabaseId
}
}
reviewThreads(first: 100) {
edges{
node{
id
isResolved
comments(first: 100) {
totalCount
nodes {
body
path
fullDatabaseId
}
}
}
}
}
}
}
}
"""
variables = {'owner': self.owner, 'repo': self.repo, 'pr': pull_number}
url = 'https://api.github.com/graphql'
headers = {
'Authorization': f'Bearer {self.token}',
'Content-Type': 'application/json',
}
response = requests.post(
url, json={'query': query, 'variables': variables}, headers=headers
)
response.raise_for_status()
response_json = response.json()
# Parse the response to get closing issue references and unresolved review comments
pr_data = (
response_json.get('data', {}).get('repository', {}).get('pullRequest', {})
)
# Get closing issues
closing_issues = pr_data.get('closingIssuesReferences', {}).get('edges', [])
closing_issues_bodies = [issue['node']['body'] for issue in closing_issues]
closing_issue_numbers = [
issue['node']['number'] for issue in closing_issues
] # Extract issue numbers
# Get review comments
reviews = pr_data.get('reviews', {}).get('nodes', [])
if comment_id is not None:
reviews = [
review
for review in reviews
if int(review['fullDatabaseId']) == comment_id
]
review_bodies = [review['body'] for review in reviews]
# Get unresolved review threads
review_threads = []
thread_ids = [] # Store thread IDs; agent replies to the thread
raw_review_threads = pr_data.get('reviewThreads', {}).get('edges', [])
for thread in raw_review_threads:
node = thread.get('node', {})
if not node.get(
'isResolved', True
): # Check if the review thread is unresolved
id = node.get('id')
thread_contains_comment_id = False
my_review_threads = node.get('comments', {}).get('nodes', [])
message = ''
files = []
for i, review_thread in enumerate(my_review_threads):
if (
comment_id is not None
and int(review_thread['fullDatabaseId']) == comment_id
):
thread_contains_comment_id = True
if (
i == len(my_review_threads) - 1
): # Check if it's the last thread in the thread
if len(my_review_threads) > 1:
message += '---\n' # Add "---" before the last message if there's more than one thread
message += 'latest feedback:\n' + review_thread['body'] + '\n'
else:
message += (
review_thread['body'] + '\n'
) # Add each thread in a new line
file = review_thread.get('path')
if file and file not in files:
files.append(file)
if comment_id is None or thread_contains_comment_id:
unresolved_thread = ReviewThread(comment=message, files=files)
review_threads.append(unresolved_thread)
thread_ids.append(id)
return (
closing_issues_bodies,
closing_issue_numbers,
review_bodies,
review_threads,
thread_ids,
)
# Override processing of downloaded issues
def _get_pr_comments(
self, pr_number: int, comment_id: int | None = None
) -> list[str] | None:
"""Download comments for a specific pull request from Github."""
url = f'https://api.github.com/repos/{self.owner}/{self.repo}/issues/{pr_number}/comments'
headers = {
'Authorization': f'token {self.token}',
'Accept': 'application/vnd.github.v3+json',
}
params = {'per_page': 100, 'page': 1}
all_comments = []
while True:
response = requests.get(url, headers=headers, params=params)
response.raise_for_status()
comments = response.json()
if not comments:
break
if comment_id is not None:
matching_comment = next(
(
comment['body']
for comment in comments
if comment['id'] == comment_id
),
None,
)
if matching_comment:
return [matching_comment]
else:
all_comments.extend([comment['body'] for comment in comments])
params['page'] += 1
return all_comments if all_comments else None
def __get_context_from_external_issues_references(
self,
closing_issues: list[str],
closing_issue_numbers: list[int],
issue_body: str,
review_comments: list[str],
review_threads: list[ReviewThread],
thread_comments: list[str] | None,
):
new_issue_references = []
if issue_body:
new_issue_references.extend(self._extract_issue_references(issue_body))
if review_comments:
for comment in review_comments:
new_issue_references.extend(self._extract_issue_references(comment))
if review_threads:
for review_thread in review_threads:
new_issue_references.extend(
self._extract_issue_references(review_thread.comment)
)
if thread_comments:
for thread_comment in thread_comments:
new_issue_references.extend(
self._extract_issue_references(thread_comment)
)
non_duplicate_references = set(new_issue_references)
unique_issue_references = non_duplicate_references.difference(
closing_issue_numbers
)
for issue_number in unique_issue_references:
url = f'https://api.github.com/repos/{self.owner}/{self.repo}/issues/{issue_number}'
headers = {
'Authorization': f'Bearer {self.token}',
'Accept': 'application/vnd.github.v3+json',
}
response = requests.get(url, headers=headers)
response.raise_for_status()
issue_data = response.json()
issue_body = issue_data.get('body', '')
if issue_body:
closing_issues.append(issue_body)
return closing_issues
def get_converted_issues(self, comment_id: int | None = None) -> list[GithubIssue]:
all_issues = self._download_issues_from_github()
converted_issues = []
for issue in all_issues:
# For PRs, body can be None
if any([issue.get(key) is None for key in ['number', 'title']]):
logger.warning(f'Skipping #{issue} as it is missing number or title.')
continue
# Handle None body for PRs
body = issue.get('body') if issue.get('body') is not None else ''
(
closing_issues,
closing_issues_numbers,
review_comments,
review_threads,
thread_ids,
) = self.__download_pr_metadata(issue['number'], comment_id=comment_id)
head_branch = issue['head']['ref']
# Get PR thread comments
thread_comments = self._get_pr_comments(
issue['number'], comment_id=comment_id
)
closing_issues = self.__get_context_from_external_issues_references(
closing_issues,
closing_issues_numbers,
body,
review_comments,
review_threads,
thread_comments,
)
issue_details = GithubIssue(
owner=self.owner,
repo=self.repo,
number=issue['number'],
title=issue['title'],
body=body,
closing_issues=closing_issues,
review_comments=review_comments,
review_threads=review_threads,
thread_ids=thread_ids,
head_branch=head_branch,
thread_comments=thread_comments,
)
converted_issues.append(issue_details)
return converted_issues
def get_instruction(
self,
issue: GithubIssue,
prompt_template: str,
repo_instruction: str | None = None,
) -> tuple[str, list[str]]:
"""Generate instruction for the agent."""
template = jinja2.Template(prompt_template)
images = []
issues_str = None
if issue.closing_issues:
issues_str = json.dumps(issue.closing_issues, indent=4)
images.extend(self._extract_image_urls(issues_str))
# Handle PRs with review comments
review_comments_str = None
if issue.review_comments:
review_comments_str = json.dumps(issue.review_comments, indent=4)
images.extend(self._extract_image_urls(review_comments_str))
# Handle PRs with file-specific review comments
review_thread_str = None
review_thread_file_str = None
if issue.review_threads:
review_threads = [
review_thread.comment for review_thread in issue.review_threads
]
review_thread_files = []
for review_thread in issue.review_threads:
review_thread_files.extend(review_thread.files)
review_thread_str = json.dumps(review_threads, indent=4)
review_thread_file_str = json.dumps(review_thread_files, indent=4)
images.extend(self._extract_image_urls(review_thread_str))
# Format thread comments if they exist
thread_context = ''
if issue.thread_comments:
thread_context = '\n\nPR Thread Comments:\n' + '\n---\n'.join(
issue.thread_comments
)
images.extend(self._extract_image_urls(thread_context))
instruction = template.render(
issues=issues_str,
review_comments=review_comments_str,
review_threads=review_thread_str,
files=review_thread_file_str,
thread_context=thread_context,
repo_instruction=repo_instruction,
)
return instruction, images
def _check_feedback_with_llm(
self, prompt: str, llm_config: LLMConfig
) -> tuple[bool, str]:
"""Helper function to check feedback with LLM and parse response."""
response = litellm.completion(
model=llm_config.model,
messages=[{'role': 'user', 'content': prompt}],
api_key=llm_config.api_key,
base_url=llm_config.base_url,
)
answer = response.choices[0].message.content.strip()
pattern = r'--- success\n*(true|false)\n*--- explanation*\n((?:.|\n)*)'
match = re.search(pattern, answer)
if match:
return match.group(1).lower() == 'true', match.group(2).strip()
return False, f'Failed to decode answer from LLM response: {answer}'
def _check_review_thread(
self,
review_thread: ReviewThread,
issues_context: str,
last_message: str,
llm_config: LLMConfig,
) -> tuple[bool, str]:
"""Check if a review thread's feedback has been addressed."""
files_context = json.dumps(review_thread.files, indent=4)
with open(
os.path.join(
os.path.dirname(__file__),
'prompts/guess_success/pr-feedback-check.jinja',
),
'r',
) as f:
template = jinja2.Template(f.read())
prompt = template.render(
issue_context=issues_context,
feedback=review_thread.comment,
files_context=files_context,
last_message=last_message,
)
return self._check_feedback_with_llm(prompt, llm_config)
def _check_thread_comments(
self,
thread_comments: list[str],
issues_context: str,
last_message: str,
llm_config: LLMConfig,
) -> tuple[bool, str]:
"""Check if thread comments feedback has been addressed."""
thread_context = '\n---\n'.join(thread_comments)
with open(
os.path.join(
os.path.dirname(__file__), 'prompts/guess_success/pr-thread-check.jinja'
),
'r',
) as f:
template = jinja2.Template(f.read())
prompt = template.render(
issue_context=issues_context,
thread_context=thread_context,
last_message=last_message,
)
return self._check_feedback_with_llm(prompt, llm_config)
def _check_review_comments(
self,
review_comments: list[str],
issues_context: str,
last_message: str,
llm_config: LLMConfig,
) -> tuple[bool, str]:
"""Check if review comments feedback has been addressed."""
review_context = '\n---\n'.join(review_comments)
with open(
os.path.join(
os.path.dirname(__file__), 'prompts/guess_success/pr-review-check.jinja'
),
'r',
) as f:
template = jinja2.Template(f.read())
prompt = template.render(
issue_context=issues_context,
review_context=review_context,
last_message=last_message,
)
return self._check_feedback_with_llm(prompt, llm_config)
def guess_success(
self, issue: GithubIssue, history: list[Event], llm_config: LLMConfig
) -> tuple[bool, None | list[bool], str]:
"""Guess if the issue is fixed based on the history and the issue description."""
last_message = history[-1].message
issues_context = json.dumps(issue.closing_issues, indent=4)
success_list = []
explanation_list = []
# Handle PRs with file-specific review comments
if issue.review_threads:
for review_thread in issue.review_threads:
if issues_context and last_message:
success, explanation = self._check_review_thread(
review_thread, issues_context, last_message, llm_config
)
else:
success, explanation = False, 'Missing context or message'
success_list.append(success)
explanation_list.append(explanation)
# Handle PRs with only thread comments (no file-specific review comments)
elif issue.thread_comments:
if issue.thread_comments and issues_context and last_message:
success, explanation = self._check_thread_comments(
issue.thread_comments, issues_context, last_message, llm_config
)
else:
success, explanation = (
False,
'Missing thread comments, context or message',
)
success_list.append(success)
explanation_list.append(explanation)
elif issue.review_comments:
# Handle PRs with only review comments (no file-specific review comments or thread comments)
if issue.review_comments and issues_context and last_message:
success, explanation = self._check_review_comments(
issue.review_comments, issues_context, last_message, llm_config
)
else:
success, explanation = (
False,
'Missing review comments, context or message',
)
success_list.append(success)
explanation_list.append(explanation)
else:
# No review comments, thread comments, or file-level review comments found
return False, None, 'No feedback was found to process'
# Return overall success (all must be true) and explanations
if not success_list:
return False, None, 'No feedback was processed'
return all(success_list), success_list, '\n'.join(explanation_list)

View File

@ -0,0 +1,4 @@
# Patching code
Originally from [whatthepatch](https://github.com/cscorley/whatthepatch)
(MIT license)

View File

@ -0,0 +1,6 @@
# -*- coding: utf-8 -*-
from .patch import parse_patch
from .apply import apply_diff
__all__ = ["parse_patch", "apply_diff"]

View File

@ -0,0 +1,120 @@
# -*- coding: utf-8 -*-
import os.path
import subprocess
import tempfile
from .exceptions import HunkApplyException, SubprocessException
from .snippets import remove, which
def _apply_diff_with_subprocess(diff, lines, reverse=False):
# call out to patch program
patchexec = which("patch")
if not patchexec:
raise SubprocessException("cannot find patch program", code=-1)
tempdir = tempfile.gettempdir()
filepath = os.path.join(tempdir, "wtp-" + str(hash(diff.header)))
oldfilepath = filepath + ".old"
newfilepath = filepath + ".new"
rejfilepath = filepath + ".rej"
patchfilepath = filepath + ".patch"
with open(oldfilepath, "w") as f:
f.write("\n".join(lines) + "\n")
with open(patchfilepath, "w") as f:
f.write(diff.text)
args = [
patchexec,
"--reverse" if reverse else "--forward",
"--quiet",
"--no-backup-if-mismatch",
"-o",
newfilepath,
"-i",
patchfilepath,
"-r",
rejfilepath,
oldfilepath,
]
ret = subprocess.call(args)
with open(newfilepath) as f:
lines = f.read().splitlines()
try:
with open(rejfilepath) as f:
rejlines = f.read().splitlines()
except IOError:
rejlines = None
remove(oldfilepath)
remove(newfilepath)
remove(rejfilepath)
remove(patchfilepath)
# do this last to ensure files get cleaned up
if ret != 0:
raise SubprocessException("patch program failed", code=ret)
return lines, rejlines
def _reverse(changes):
def _reverse_change(c):
return c._replace(old=c.new, new=c.old)
return [_reverse_change(c) for c in changes]
def apply_diff(diff, text, reverse=False, use_patch=False):
try:
lines = text.splitlines()
except AttributeError:
lines = list(text)
if use_patch:
return _apply_diff_with_subprocess(diff, lines, reverse)
n_lines = len(lines)
changes = _reverse(diff.changes) if reverse else diff.changes
# check that the source text matches the context of the diff
for old, new, line, hunk in changes:
# might have to check for line is None here for ed scripts
if old is not None and line is not None:
if old > n_lines:
raise HunkApplyException(
'context line {n}, "{line}" does not exist in source'.format(
n=old, line=line
),
hunk=hunk,
)
if lines[old - 1] != line:
raise HunkApplyException(
'context line {n}, "{line}" does not match "{sl}"'.format(
n=old, line=line, sl=lines[old - 1]
),
hunk=hunk,
)
# for calculating the old line
r = 0
i = 0
for old, new, line, hunk in changes:
if old is not None and new is None:
del lines[old - 1 - r + i]
r += 1
elif old is None and new is not None:
lines.insert(new - 1, line)
i += 1
elif old is not None and new is not None:
# Sometimes, people remove hunks from patches, making these
# numbers completely unreliable. Because they're jerks.
pass
return lines

View File

@ -0,0 +1,31 @@
class PatchingException(Exception):
pass
class HunkException(PatchingException):
def __init__(self, msg, hunk=None):
self.hunk = hunk
if hunk is not None:
super(HunkException, self).__init__(
"{msg}, in hunk #{n}".format(msg=msg, n=hunk)
)
else:
super(HunkException, self).__init__(msg)
class ApplyException(PatchingException):
pass
class SubprocessException(ApplyException):
def __init__(self, msg, code):
super(SubprocessException, self).__init__(msg)
self.code = code
class HunkApplyException(HunkException, ApplyException, ValueError):
pass
class ParseException(HunkException, ValueError):
pass

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,63 @@
# -*- coding: utf-8 -*-
import os
from shutil import rmtree
def remove(path):
if os.path.exists(path):
if os.path.isdir(path):
rmtree(path)
else:
os.remove(path)
# find all indices of a list of strings that match a regex
def findall_regex(items, regex):
found = list()
for i in range(0, len(items)):
k = regex.match(items[i])
if k:
found.append(i)
k = None
return found
def split_by_regex(items, regex):
splits = list()
indices = findall_regex(items, regex)
if not indices:
splits.append(items)
return splits
# Add first chunk before first match
splits.append(items[0 : indices[0]])
# Add chunks between matches
for i in range(len(indices) - 1):
splits.append(items[indices[i] : indices[i + 1]])
# Add final chunk after last match
splits.append(items[indices[-1] :])
return splits
# http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python
def which(program):
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None

View File

@ -0,0 +1,18 @@
Given the following issue description and the last message from an AI agent attempting to fix it, determine if the issue has been successfully resolved.
Issue description:
{{ issue_context }}
Last message from AI agent:
{{ last_message }}
(1) has the issue been successfully resolved?
(2) If the issue has been resolved, please provide an explanation of what was done in the PR that can be sent to a human reviewer on github. If the issue has not been resolved, please provide an explanation of why.
Answer in exactly the format below, with only true or false for success, and an explanation of the result.
--- success
true/false
--- explanation
...

View File

@ -0,0 +1,24 @@
You are given one or more issue descriptions, a piece of feedback to resolve the issues, and the last message from an AI agent attempting to incorporate the feedback. If the feedback is addressed to a specific code file, then the file locations will be provided as well. Determine if the feedback has been successfully resolved.
Issue descriptions:
{{ issue_context }}
Feedback:
{{ feedback }}
Files locations:
{{ files_context }}
Last message from AI agent:
{{ last_message }}
(1) has the feedback been successfully incorporated?
(2) If the feedback has been incorporated, please provide an explanation of what was done that can be sent to a human reviewer on github. If the feedback has not been resolved, please provide an explanation of why.
Answer in exactly the format below, with only true or false for success, and an explanation of the result.
--- success
true/false
--- explanation
...

View File

@ -0,0 +1,21 @@
You are given one or more issue descriptions, the PR review comments, and the last message from an AI agent attempting to address the feedback. Determine if the feedback has been successfully resolved.
Issue descriptions:
{{ issue_context }}
PR Review Comments:
{{ review_context }}
Last message from AI agent:
{{ last_message }}
(1) has the feedback been successfully incorporated?
(2) If the feedback has been incorporated, please provide an explanation of what was done that can be sent to a human reviewer on github. If the feedback has not been resolved, please provide an explanation of why.
Answer in exactly the format below, with only true or false for success, and an explanation of the result.
--- success
true/false
--- explanation
...

View File

@ -0,0 +1,21 @@
You are given one or more issue descriptions, the PR thread comments, and the last message from an AI agent attempting to address the feedback. Determine if the feedback has been successfully resolved.
Issue descriptions:
{{ issue_context }}
PR Thread Comments:
{{ thread_context }}
Last message from AI agent:
{{ last_message }}
(1) has the feedback been successfully incorporated?
(2) If the feedback has been incorporated, please provide an explanation of what was done that can be sent to a human reviewer on github. If the feedback has not been resolved, please provide an explanation of why.
Answer in exactly the format below, with only true or false for success, and an explanation of the result.
--- success
true/false
--- explanation
...

View File

@ -0,0 +1,4 @@
This is a Python repo for openhands-resolver, a library that attempts to resolve github issues with the AI agent OpenHands.
- Setup: `poetry install --with test --with dev`
- Testing: `poetry run pytest tests/test_*.py`

View File

@ -0,0 +1,7 @@
OpenHands is an automated AI software engineer. It is a repo with a Python backend
(in the `openhands` directory) and typescript frontend (in the `frontend` directory).
- Setup: To set up the repo, including frontend/backend you can `make build`
- Backend Testing: All tests are in `tests/unit/test_*.py`. To test new code, you
can do `poetry run pytest tests/unit/test_xxx.py` where `xxx` is the appropriate
file for the current functionality. Write all tests with pytest.

View File

@ -0,0 +1,4 @@
This is a node repo for an RSS parser.
- Setup: `yes | npm install`
- Testing: `SKIP_BROWSER_TESTS=1 npm test`
- Writing Tests: Add to the `test` directory.

View File

@ -0,0 +1,24 @@
The current code is an attempt at fixing one or more issues. The code is not satisfactory and follow up feedback have been provided to address this.
The feedback may be addressed to specific code files. In this case the file locations will be provided.
Please update the code based on the feedback for the repository in /workspace.
An environment has been set up for you to start working. You may assume all necessary tools are installed.
# Issues addressed
{{ issues }}
# Review comments
{{ review_comments }}
# Review threads
{{ review_threads }}
# Review thread files
{{ files }}
IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
You SHOULD INCLUDE PROPER INDENTATION in your edit commands.{% if repo_instruction %}
Some basic information about this repository:
{{ repo_instruction }}{% endif %}
When you think you have fixed the issue through code changes, please finish the interaction.

View File

@ -0,0 +1,17 @@
Please fix the following issue for the repository in /workspace.
An environment has been set up for you to start working. You may assume all necessary tools are installed.
# Problem Statement
{{ body }}
IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
You SHOULD INCLUDE PROPER INDENTATION in your edit commands.{% if repo_instruction %}
Some basic information about this repository:
{{ repo_instruction }}{% endif %}
For all changes to actual application code (e.g. in Python or Javascript), add an appropriate test to the testing directory to make sure that the issue has been fixed.
Run the tests, and if they pass you are done!
You do NOT need to write new tests if there are only changes to documentation or configuration files.
When you think you have fixed the issue through code changes, please call the finish action to end the interaction.

View File

@ -0,0 +1,13 @@
Please fix the following issue for the repository in /workspace.
An environment has been set up for you to start working. You may assume all necessary tools are installed.
# Problem Statement
{{ body }}
IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
You SHOULD INCLUDE PROPER INDENTATION in your edit commands.{% if repo_instruction %}
Some basic information about this repository:
{{ repo_instruction }}{% endif %}
When you think you have fixed the issue through code changes, please finish the interaction.

View File

@ -0,0 +1,3 @@
Please create a concise overview of the following changes, commenting on whether all issues have been successfully resolved or if there are still issues remaining:
{{ comment_message }}

View File

@ -0,0 +1,387 @@
# flake8: noqa: E501
import argparse
import asyncio
import multiprocessing as mp
import os
import pathlib
import subprocess
from typing import Awaitable, TextIO
from tqdm import tqdm
import openhands
from openhands.core.config import LLMConfig
from openhands.core.logger import openhands_logger as logger
from openhands.resolver.github_issue import GithubIssue
from openhands.resolver.resolve_issue import (
issue_handler_factory,
process_issue,
)
from openhands.resolver.resolver_output import ResolverOutput
def cleanup():
print('Cleaning up child processes...')
for process in mp.active_children():
print(f'Terminating child process: {process.name}')
process.terminate()
process.join()
# This function tracks the progress AND write the output to a JSONL file
async def update_progress(
output: Awaitable[ResolverOutput], output_fp: TextIO, pbar: tqdm
) -> None:
resolved_output = await output
pbar.update(1)
pbar.set_description(f'issue {resolved_output.issue.number}')
pbar.set_postfix_str(
f'Test Result: {resolved_output.metrics.get("test_result", "N/A") if resolved_output.metrics else "N/A"}'
)
logger.info(
f'Finished issue {resolved_output.issue.number}: {resolved_output.metrics.get("test_result", "N/A") if resolved_output.metrics else "N/A"}'
)
output_fp.write(resolved_output.model_dump_json() + '\n')
output_fp.flush()
async def resolve_issues(
owner: str,
repo: str,
token: str,
username: str,
max_iterations: int,
limit_issues: int | None,
num_workers: int,
output_dir: str,
llm_config: LLMConfig,
runtime_container_image: str,
prompt_template: str,
issue_type: str,
repo_instruction: str | None,
issue_numbers: list[int] | None,
) -> None:
"""Resolve multiple github issues.
Args:
owner: Github owner of the repo.
repo: Github repository to resolve issues in form of `owner/repo`.
token: Github token to access the repository.
username: Github username to access the repository.
max_iterations: Maximum number of iterations to run.
limit_issues: Limit the number of issues to resolve.
num_workers: Number of workers to use for parallel processing.
output_dir: Output directory to write the results.
llm_config: Configuration for the language model.
runtime_container_image: Container image to use.
prompt_template: Prompt template to use.
issue_type: Type of issue to resolve (issue or pr).
repo_instruction: Repository instruction to use.
issue_numbers: List of issue numbers to resolve.
"""
issue_handler = issue_handler_factory(issue_type, owner, repo, token)
# Load dataset
issues: list[GithubIssue] = issue_handler.get_converted_issues()
if issue_numbers is not None:
issues = [issue for issue in issues if issue.number in issue_numbers]
logger.info(f'Limiting resolving to issues {issue_numbers}.')
if limit_issues is not None:
issues = issues[:limit_issues]
logger.info(f'Limiting resolving to first {limit_issues} issues.')
# TEST METADATA
model_name = llm_config.model.split('/')[-1]
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
pathlib.Path(os.path.join(output_dir, 'infer_logs')).mkdir(
parents=True, exist_ok=True
)
logger.info(f'Using output directory: {output_dir}')
# checkout the repo
repo_dir = os.path.join(output_dir, 'repo')
if not os.path.exists(repo_dir):
checkout_output = subprocess.check_output(
[
'git',
'clone',
f'https://{username}:{token}@github.com/{owner}/{repo}',
f'{output_dir}/repo',
]
).decode('utf-8')
if 'fatal' in checkout_output:
raise RuntimeError(f'Failed to clone repository: {checkout_output}')
# get the commit id of current repo for reproducibility
base_commit = (
subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=repo_dir)
.decode('utf-8')
.strip()
)
logger.info(f'Base commit: {base_commit}')
if repo_instruction is None:
# Check for .openhands_instructions file in the workspace directory
openhands_instructions_path = os.path.join(repo_dir, '.openhands_instructions')
if os.path.exists(openhands_instructions_path):
with open(openhands_instructions_path, 'r') as f:
repo_instruction = f.read()
# OUTPUT FILE
output_file = os.path.join(output_dir, 'output.jsonl')
logger.info(f'Writing output to {output_file}')
finished_numbers = set()
if os.path.exists(output_file):
with open(output_file, 'r') as f:
for line in f:
data = ResolverOutput.model_validate_json(line)
finished_numbers.add(data.issue.number)
logger.warning(
f'Output file {output_file} already exists. Loaded {len(finished_numbers)} finished issues.'
)
output_fp = open(output_file, 'a')
logger.info(
f'Resolving issues with model {model_name}, max iterations {max_iterations}.'
)
# =============================================
# filter out finished issues
new_issues = []
for issue in issues:
if issue.number in finished_numbers:
logger.info(f'Skipping issue {issue.number} as it is already finished.')
continue
new_issues.append(issue)
logger.info(
f'Finished issues: {len(finished_numbers)}, Remaining issues: {len(issues)}'
)
# =============================================
pbar = tqdm(total=len(issues))
# This sets the multi-processing
logger.info(f'Using {num_workers} workers.')
try:
tasks = []
for issue in issues:
# checkout to pr branch
if issue_type == 'pr':
logger.info(
f'Checking out to PR branch {issue.head_branch} for issue {issue.number}'
)
subprocess.check_output(
['git', 'checkout', f'{issue.head_branch}'],
cwd=repo_dir,
)
base_commit = (
subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=repo_dir)
.decode('utf-8')
.strip()
)
task = update_progress(
process_issue(
issue,
base_commit,
max_iterations,
llm_config,
output_dir,
runtime_container_image,
prompt_template,
issue_handler,
repo_instruction,
bool(num_workers > 1),
),
output_fp,
pbar,
)
tasks.append(task)
# Use asyncio.gather with a semaphore to limit concurrency
sem = asyncio.Semaphore(num_workers)
async def run_with_semaphore(task):
async with sem:
return await task
await asyncio.gather(*[run_with_semaphore(task) for task in tasks])
except KeyboardInterrupt:
print('KeyboardInterrupt received. Cleaning up...')
cleanup()
output_fp.close()
logger.info('Finished.')
def main():
parser = argparse.ArgumentParser(description='Resolve multiple issues from Github.')
parser.add_argument(
'--repo',
type=str,
required=True,
help='Github repository to resolve issues in form of `owner/repo`.',
)
parser.add_argument(
'--token',
type=str,
default=None,
help='Github token to access the repository.',
)
parser.add_argument(
'--username',
type=str,
default=None,
help='Github username to access the repository.',
)
parser.add_argument(
'--runtime-container-image',
type=str,
default=None,
help='Container image to use.',
)
parser.add_argument(
'--max-iterations',
type=int,
default=50,
help='Maximum number of iterations to run.',
)
parser.add_argument(
'--limit-issues',
type=int,
default=None,
help='Limit the number of issues to resolve.',
)
parser.add_argument(
'--issue-numbers',
type=str,
default=None,
help='Comma separated list of issue numbers to resolve.',
)
parser.add_argument(
'--num-workers',
type=int,
default=1,
help='Number of workers to use for parallel processing.',
)
parser.add_argument(
'--output-dir',
type=str,
default='output',
help='Output directory to write the results.',
)
parser.add_argument(
'--llm-model',
type=str,
default=None,
help='LLM model to use.',
)
parser.add_argument(
'--llm-api-key',
type=str,
default=None,
help='LLM API key to use.',
)
parser.add_argument(
'--llm-base-url',
type=str,
default=None,
help='LLM base URL to use.',
)
parser.add_argument(
'--prompt-file',
type=str,
default=None,
help='Path to the prompt template file in Jinja format.',
)
parser.add_argument(
'--repo-instruction-file',
type=str,
default=None,
help='Path to the repository instruction file in text format.',
)
parser.add_argument(
'--issue-type',
type=str,
default='issue',
choices=['issue', 'pr'],
help='Type of issue to resolve, either open issue or pr comments.',
)
my_args = parser.parse_args()
runtime_container_image = my_args.runtime_container_image
if runtime_container_image is None:
runtime_container_image = (
f'ghcr.io/all-hands-ai/runtime:{openhands.__version__}-nikolaik'
)
owner, repo = my_args.repo.split('/')
token = my_args.token if my_args.token else os.getenv('GITHUB_TOKEN')
username = my_args.username if my_args.username else os.getenv('GITHUB_USERNAME')
if not username:
raise ValueError('Github username is required.')
if not token:
raise ValueError('Github token is required.')
llm_config = LLMConfig(
model=my_args.llm_model or os.environ['LLM_MODEL'],
api_key=my_args.llm_api_key or os.environ['LLM_API_KEY'],
base_url=my_args.llm_base_url or os.environ.get('LLM_BASE_URL', None),
)
repo_instruction = None
if my_args.repo_instruction_file:
with open(my_args.repo_instruction_file, 'r') as f:
repo_instruction = f.read()
issue_numbers = None
if my_args.issue_numbers:
issue_numbers = [int(number) for number in my_args.issue_numbers.split(',')]
issue_type = my_args.issue_type
# Read the prompt template
prompt_file = my_args.prompt_file
if prompt_file is None:
if issue_type == 'issue':
prompt_file = os.path.join(
os.path.dirname(__file__), 'prompts/resolve/basic-with-tests.jinja'
)
else:
prompt_file = os.path.join(
os.path.dirname(__file__), 'prompts/resolve/basic-followup.jinja'
)
with open(prompt_file, 'r') as f:
prompt_template = f.read()
asyncio.run(
resolve_issues(
owner=owner,
repo=repo,
token=token,
username=username,
runtime_container_image=runtime_container_image,
max_iterations=my_args.max_iterations,
limit_issues=my_args.limit_issues,
num_workers=my_args.num_workers,
output_dir=my_args.output_dir,
llm_config=llm_config,
prompt_template=prompt_template,
issue_type=issue_type,
repo_instruction=repo_instruction,
issue_numbers=issue_numbers,
)
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,624 @@
# flake8: noqa: E501
import asyncio
import dataclasses
import json
import os
import pathlib
import shutil
import subprocess
from typing import Any
from uuid import uuid4
from termcolor import colored
import openhands
from openhands.controller.state.state import State
from openhands.core.config import (
AgentConfig,
AppConfig,
LLMConfig,
SandboxConfig,
)
from openhands.core.logger import openhands_logger as logger
from openhands.core.main import create_runtime, run_controller
from openhands.events.action import CmdRunAction, MessageAction
from openhands.events.observation import (
CmdOutputObservation,
ErrorObservation,
Observation,
)
from openhands.events.stream import EventStreamSubscriber
from openhands.resolver.github_issue import GithubIssue
from openhands.resolver.issue_definitions import (
IssueHandler,
IssueHandlerInterface,
PRHandler,
)
from openhands.resolver.resolver_output import ResolverOutput
from openhands.resolver.utils import (
codeact_user_response,
reset_logger_for_multiprocessing,
)
from openhands.runtime.base import Runtime
# Don't make this confgurable for now, unless we have other competitive agents
AGENT_CLASS = 'CodeActAgent'
def initialize_runtime(
runtime: Runtime,
):
"""Initialize the runtime for the agent.
This function is called before the runtime is used to run the agent.
Currently it does nothing.
"""
logger.info('-' * 30)
logger.info('BEGIN Runtime Completion Fn')
logger.info('-' * 30)
obs: Observation
action = CmdRunAction(command='cd /workspace')
logger.info(action, extra={'msg_type': 'ACTION'})
obs = runtime.run_action(action)
logger.info(obs, extra={'msg_type': 'OBSERVATION'})
if not isinstance(obs, CmdOutputObservation) or obs.exit_code != 0:
raise RuntimeError(f'Failed to change directory to /workspace.\n{obs}')
action = CmdRunAction(command='git config --global core.pager ""')
logger.info(action, extra={'msg_type': 'ACTION'})
obs = runtime.run_action(action)
logger.info(obs, extra={'msg_type': 'OBSERVATION'})
if not isinstance(obs, CmdOutputObservation) or obs.exit_code != 0:
raise RuntimeError(f'Failed to set git config.\n{obs}')
async def complete_runtime(
runtime: Runtime,
base_commit: str,
) -> dict[str, Any]:
"""Complete the runtime for the agent.
This function is called before the runtime is used to run the agent.
If you need to do something in the sandbox to get the correctness metric after
the agent has run, modify this function.
"""
logger.info('-' * 30)
logger.info('BEGIN Runtime Completion Fn')
logger.info('-' * 30)
obs: Observation
action = CmdRunAction(command='cd /workspace')
logger.info(action, extra={'msg_type': 'ACTION'})
obs = runtime.run_action(action)
logger.info(obs, extra={'msg_type': 'OBSERVATION'})
if not isinstance(obs, CmdOutputObservation) or obs.exit_code != 0:
raise RuntimeError(
f'Failed to change directory to /workspace. Observation: {obs}'
)
action = CmdRunAction(command='git config --global core.pager ""')
logger.info(action, extra={'msg_type': 'ACTION'})
obs = runtime.run_action(action)
logger.info(obs, extra={'msg_type': 'OBSERVATION'})
if not isinstance(obs, CmdOutputObservation) or obs.exit_code != 0:
raise RuntimeError(f'Failed to set git config. Observation: {obs}')
action = CmdRunAction(command='git config --global --add safe.directory /workspace')
logger.info(action, extra={'msg_type': 'ACTION'})
obs = runtime.run_action(action)
logger.info(obs, extra={'msg_type': 'OBSERVATION'})
if not isinstance(obs, CmdOutputObservation) or obs.exit_code != 0:
raise RuntimeError(f'Failed to set git config. Observation: {obs}')
action = CmdRunAction(command='git add -A')
logger.info(action, extra={'msg_type': 'ACTION'})
obs = runtime.run_action(action)
logger.info(obs, extra={'msg_type': 'OBSERVATION'})
if not isinstance(obs, CmdOutputObservation) or obs.exit_code != 0:
raise RuntimeError(f'Failed to git add. Observation: {obs}')
n_retries = 0
git_patch = None
while n_retries < 5:
action = CmdRunAction(
command=f'git diff --no-color --cached {base_commit}',
keep_prompt=False,
)
action.timeout = 600 + 100 * n_retries
logger.info(action, extra={'msg_type': 'ACTION'})
obs = runtime.run_action(action)
logger.info(obs, extra={'msg_type': 'OBSERVATION'})
n_retries += 1
if isinstance(obs, CmdOutputObservation):
if obs.exit_code == 0:
git_patch = obs.content.strip()
break
else:
logger.info('Failed to get git diff, retrying...')
await asyncio.sleep(10)
elif isinstance(obs, ErrorObservation):
logger.error(f'Error occurred: {obs.content}. Retrying...')
await asyncio.sleep(10)
else:
raise ValueError(f'Unexpected observation type: {type(obs)}')
logger.info('-' * 30)
logger.info('END Runtime Completion Fn')
logger.info('-' * 30)
return {'git_patch': git_patch}
async def process_issue(
issue: GithubIssue,
base_commit: str,
max_iterations: int,
llm_config: LLMConfig,
output_dir: str,
runtime_container_image: str,
prompt_template: str,
issue_handler: IssueHandlerInterface,
repo_instruction: str | None = None,
reset_logger: bool = False,
) -> ResolverOutput:
# Setup the logger properly, so you can run multi-processing to parallelize processing
if reset_logger:
log_dir = os.path.join(output_dir, 'infer_logs')
reset_logger_for_multiprocessing(logger, str(issue.number), log_dir)
else:
logger.info(f'Starting fixing issue {issue.number}.')
workspace_base = os.path.join(
output_dir, 'workspace', f'{issue_handler.issue_type}_{issue.number}'
)
# Get the absolute path of the workspace base
workspace_base = os.path.abspath(workspace_base)
# write the repo to the workspace
if os.path.exists(workspace_base):
shutil.rmtree(workspace_base)
shutil.copytree(os.path.join(output_dir, 'repo'), workspace_base)
config = AppConfig(
default_agent='CodeActAgent',
runtime='eventstream',
max_budget_per_task=4,
max_iterations=max_iterations,
sandbox=SandboxConfig(
runtime_container_image=runtime_container_image,
enable_auto_lint=False,
use_host_network=False,
# large enough timeout, since some testcases take very long to run
timeout=300,
),
# do not mount workspace
workspace_base=workspace_base,
workspace_mount_path=workspace_base,
agents={'CodeActAgent': AgentConfig(disabled_microagents=['github'])},
)
config.set_llm_config(llm_config)
runtime = create_runtime(config, sid=f'{issue.number}')
await runtime.connect()
async def on_event(evt):
logger.info(evt)
runtime.event_stream.subscribe(EventStreamSubscriber.MAIN, on_event, str(uuid4()))
initialize_runtime(runtime)
instruction, images_urls = issue_handler.get_instruction(
issue, prompt_template, repo_instruction
)
# Here's how you can run the agent (similar to the `main` function) and get the final task state
action = MessageAction(content=instruction, image_urls=images_urls)
try:
state: State | None = await run_controller(
config=config,
initial_user_action=action,
runtime=runtime,
fake_user_response_fn=codeact_user_response,
)
if state is None:
raise RuntimeError('Failed to run the agent.')
except (ValueError, RuntimeError) as e:
error_msg = f'Agent failed with error: {str(e)}'
logger.error(error_msg)
state = None
last_error: str | None = error_msg
# Get git patch
return_val = await complete_runtime(runtime, base_commit)
git_patch = return_val['git_patch']
logger.info(
f'Got git diff for instance {issue.number}:\n--------\n{git_patch}\n--------'
)
# Serialize histories and set defaults for failed state
if state is None:
histories = []
metrics = None
success = False
comment_success = None
success_explanation = 'Agent failed to run'
last_error = 'Agent failed to run or crashed'
else:
histories = [dataclasses.asdict(event) for event in state.history]
metrics = state.metrics.get() if state.metrics else None
# determine success based on the history and the issue description
success, comment_success, success_explanation = issue_handler.guess_success(
issue, state.history, llm_config
)
if issue_handler.issue_type == 'pr' and comment_success:
success_log = 'I have updated the PR and resolved some of the issues that were cited in the pull request review. Specifically, I identified the following revision requests, and all the ones that I think I successfully resolved are checked off. All the unchecked ones I was not able to resolve, so manual intervention may be required:\n'
try:
explanations = json.loads(success_explanation)
except json.JSONDecodeError:
logger.error(
f'Failed to parse success_explanation as JSON: {success_explanation}'
)
explanations = [str(success_explanation)] # Use raw string as fallback
for success_indicator, explanation in zip(comment_success, explanations):
status = (
colored('[X]', 'red')
if success_indicator
else colored('[ ]', 'red')
)
bullet_point = colored('-', 'yellow')
success_log += f'\n{bullet_point} {status}: {explanation}'
logger.info(success_log)
last_error = state.last_error if state.last_error else None
# Save the output
output = ResolverOutput(
issue=issue,
issue_type=issue_handler.issue_type,
instruction=instruction,
base_commit=base_commit,
git_patch=git_patch,
history=histories,
metrics=metrics,
success=success,
comment_success=comment_success,
success_explanation=success_explanation,
error=last_error,
)
return output
def issue_handler_factory(
issue_type: str, owner: str, repo: str, token: str
) -> IssueHandlerInterface:
if issue_type == 'issue':
return IssueHandler(owner, repo, token)
elif issue_type == 'pr':
return PRHandler(owner, repo, token)
else:
raise ValueError(f'Invalid issue type: {issue_type}')
async def resolve_issue(
owner: str,
repo: str,
token: str,
username: str,
max_iterations: int,
output_dir: str,
llm_config: LLMConfig,
runtime_container_image: str,
prompt_template: str,
issue_type: str,
repo_instruction: str | None,
issue_number: int,
comment_id: int | None,
reset_logger: bool = False,
) -> None:
"""Resolve a single github issue.
Args:
owner: Github owner of the repo.
repo: Github repository to resolve issues in form of `owner/repo`.
token: Github token to access the repository.
username: Github username to access the repository.
max_iterations: Maximum number of iterations to run.
output_dir: Output directory to write the results.
llm_config: Configuration for the language model.
runtime_container_image: Container image to use.
prompt_template: Prompt template to use.
issue_type: Type of issue to resolve (issue or pr).
repo_instruction: Repository instruction to use.
issue_number: Issue number to resolve.
comment_id: Optional ID of a specific comment to focus on.
reset_logger: Whether to reset the logger for multiprocessing.
"""
issue_handler = issue_handler_factory(issue_type, owner, repo, token)
# Load dataset
issues: list[GithubIssue] = issue_handler.get_converted_issues(
comment_id=comment_id
)
# Find the specific issue
issue = next((i for i in issues if i.number == issue_number), None)
if not issue:
raise ValueError(f'Issue {issue_number} not found')
if comment_id is not None:
if (
issue_type == 'pr'
and not issue.review_comments
and not issue.review_threads
and not issue.thread_comments
):
raise ValueError(
f'Comment ID {comment_id} did not have a match for issue {issue.number}'
)
if issue_type == 'issue' and not issue.thread_comments:
raise ValueError(
f'Comment ID {comment_id} did not have a match for issue {issue.number}'
)
# TEST METADATA
model_name = llm_config.model.split('/')[-1]
pathlib.Path(output_dir).mkdir(parents=True, exist_ok=True)
pathlib.Path(os.path.join(output_dir, 'infer_logs')).mkdir(
parents=True, exist_ok=True
)
logger.info(f'Using output directory: {output_dir}')
# checkout the repo
repo_dir = os.path.join(output_dir, 'repo')
if not os.path.exists(repo_dir):
checkout_output = subprocess.check_output(
[
'git',
'clone',
f'https://{username}:{token}@github.com/{owner}/{repo}',
f'{output_dir}/repo',
]
).decode('utf-8')
if 'fatal' in checkout_output:
raise RuntimeError(f'Failed to clone repository: {checkout_output}')
# get the commit id of current repo for reproducibility
base_commit = (
subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=repo_dir)
.decode('utf-8')
.strip()
)
logger.info(f'Base commit: {base_commit}')
if repo_instruction is None:
# Check for .openhands_instructions file in the workspace directory
openhands_instructions_path = os.path.join(repo_dir, '.openhands_instructions')
if os.path.exists(openhands_instructions_path):
with open(openhands_instructions_path, 'r') as f:
repo_instruction = f.read()
# OUTPUT FILE
output_file = os.path.join(output_dir, 'output.jsonl')
logger.info(f'Writing output to {output_file}')
# Check if this issue was already processed
if os.path.exists(output_file):
with open(output_file, 'r') as f:
for line in f:
data = ResolverOutput.model_validate_json(line)
if data.issue.number == issue_number:
logger.warning(
f'Issue {issue_number} was already processed. Skipping.'
)
return
output_fp = open(output_file, 'a')
logger.info(
f'Resolving issue {issue_number} with Agent {AGENT_CLASS}, model {model_name}, max iterations {max_iterations}.'
)
try:
# checkout to pr branch if needed
if issue_type == 'pr':
logger.info(
f'Checking out to PR branch {issue.head_branch} for issue {issue.number}'
)
subprocess.check_output(
['git', 'checkout', f'{issue.head_branch}'],
cwd=repo_dir,
)
base_commit = (
subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=repo_dir)
.decode('utf-8')
.strip()
)
output = await process_issue(
issue,
base_commit,
max_iterations,
llm_config,
output_dir,
runtime_container_image,
prompt_template,
issue_handler,
repo_instruction,
reset_logger,
)
output_fp.write(output.model_dump_json() + '\n')
output_fp.flush()
finally:
output_fp.close()
logger.info('Finished.')
def main():
import argparse
def int_or_none(value):
if value.lower() == 'none':
return None
else:
return int(value)
parser = argparse.ArgumentParser(description='Resolve a single issue from Github.')
parser.add_argument(
'--repo',
type=str,
required=True,
help='Github repository to resolve issues in form of `owner/repo`.',
)
parser.add_argument(
'--token',
type=str,
default=None,
help='Github token to access the repository.',
)
parser.add_argument(
'--username',
type=str,
default=None,
help='Github username to access the repository.',
)
parser.add_argument(
'--runtime-container-image',
type=str,
default=None,
help='Container image to use.',
)
parser.add_argument(
'--max-iterations',
type=int,
default=50,
help='Maximum number of iterations to run.',
)
parser.add_argument(
'--issue-number',
type=int,
required=True,
help='Issue number to resolve.',
)
parser.add_argument(
'--comment-id',
type=int_or_none,
required=False,
default=None,
help='Resolve a specific comment',
)
parser.add_argument(
'--output-dir',
type=str,
default='output',
help='Output directory to write the results.',
)
parser.add_argument(
'--llm-model',
type=str,
default=None,
help='LLM model to use.',
)
parser.add_argument(
'--llm-api-key',
type=str,
default=None,
help='LLM API key to use.',
)
parser.add_argument(
'--llm-base-url',
type=str,
default=None,
help='LLM base URL to use.',
)
parser.add_argument(
'--prompt-file',
type=str,
default=None,
help='Path to the prompt template file in Jinja format.',
)
parser.add_argument(
'--repo-instruction-file',
type=str,
default=None,
help='Path to the repository instruction file in text format.',
)
parser.add_argument(
'--issue-type',
type=str,
default='issue',
choices=['issue', 'pr'],
help='Type of issue to resolve, either open issue or pr comments.',
)
my_args = parser.parse_args()
runtime_container_image = my_args.runtime_container_image
if runtime_container_image is None:
runtime_container_image = (
f'ghcr.io/all-hands-ai/runtime:{openhands.__version__}-nikolaik'
)
owner, repo = my_args.repo.split('/')
token = my_args.token if my_args.token else os.getenv('GITHUB_TOKEN')
username = my_args.username if my_args.username else os.getenv('GITHUB_USERNAME')
if not username:
raise ValueError('Github username is required.')
if not token:
raise ValueError('Github token is required.')
llm_config = LLMConfig(
model=my_args.llm_model or os.environ['LLM_MODEL'],
api_key=my_args.llm_api_key or os.environ['LLM_API_KEY'],
base_url=my_args.llm_base_url or os.environ.get('LLM_BASE_URL', None),
)
repo_instruction = None
if my_args.repo_instruction_file:
with open(my_args.repo_instruction_file, 'r') as f:
repo_instruction = f.read()
issue_type = my_args.issue_type
# Read the prompt template
prompt_file = my_args.prompt_file
if prompt_file is None:
if issue_type == 'issue':
prompt_file = os.path.join(
os.path.dirname(__file__), 'prompts/resolve/basic-with-tests.jinja'
)
else:
prompt_file = os.path.join(
os.path.dirname(__file__), 'prompts/resolve/basic-followup.jinja'
)
with open(prompt_file, 'r') as f:
prompt_template = f.read()
asyncio.run(
resolve_issue(
owner=owner,
repo=repo,
token=token,
username=username,
runtime_container_image=runtime_container_image,
max_iterations=my_args.max_iterations,
output_dir=my_args.output_dir,
llm_config=llm_config,
prompt_template=prompt_template,
issue_type=issue_type,
repo_instruction=repo_instruction,
issue_number=my_args.issue_number,
comment_id=my_args.comment_id,
)
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,20 @@
from typing import Any
from litellm import BaseModel
from openhands.resolver.github_issue import GithubIssue
class ResolverOutput(BaseModel):
# NOTE: User-specified
issue: GithubIssue
issue_type: str
instruction: str
base_commit: str
git_patch: str
history: list[dict[str, Any]]
metrics: dict[str, Any] | None
success: bool
comment_success: list[bool] | None
success_explanation: str
error: str | None

View File

@ -0,0 +1,632 @@
import argparse
import json
import os
import shutil
import subprocess
import jinja2
import litellm
import requests
from openhands.core.config import LLMConfig
from openhands.core.logger import openhands_logger as logger
from openhands.resolver.github_issue import GithubIssue
from openhands.resolver.io_utils import (
load_all_resolver_outputs,
load_single_resolver_output,
)
from openhands.resolver.patching import apply_diff, parse_patch
from openhands.resolver.resolver_output import ResolverOutput
def apply_patch(repo_dir: str, patch: str) -> None:
diffs = parse_patch(patch)
for diff in diffs:
if not diff.header.new_path:
print('Warning: Could not determine file to patch')
continue
# Remove both "a/" and "b/" prefixes from paths
old_path = (
os.path.join(
repo_dir, diff.header.old_path.removeprefix('a/').removeprefix('b/')
)
if diff.header.old_path and diff.header.old_path != '/dev/null'
else None
)
new_path = os.path.join(
repo_dir, diff.header.new_path.removeprefix('a/').removeprefix('b/')
)
# Check if the file is being deleted
if diff.header.new_path == '/dev/null':
assert old_path is not None
if os.path.exists(old_path):
os.remove(old_path)
print(f'Deleted file: {old_path}')
continue
# Handle file rename
if old_path and new_path and 'rename from' in patch:
# Create parent directory of new path
os.makedirs(os.path.dirname(new_path), exist_ok=True)
try:
# Try to move the file directly
shutil.move(old_path, new_path)
except shutil.SameFileError:
# If it's the same file (can happen with directory renames), copy first then remove
shutil.copy2(old_path, new_path)
os.remove(old_path)
# Try to remove empty parent directories
old_dir = os.path.dirname(old_path)
while old_dir and old_dir.startswith(repo_dir):
try:
os.rmdir(old_dir)
old_dir = os.path.dirname(old_dir)
except OSError:
# Directory not empty or other error, stop trying to remove parents
break
continue
if old_path:
# Open the file in binary mode to detect line endings
with open(old_path, 'rb') as f:
original_content = f.read()
# Detect line endings
if b'\r\n' in original_content:
newline = '\r\n'
elif b'\n' in original_content:
newline = '\n'
else:
newline = None # Let Python decide
try:
with open(old_path, 'r', newline=newline) as f:
split_content = [x.strip(newline) for x in f.readlines()]
except UnicodeDecodeError as e:
logger.error(f'Error reading file {old_path}: {e}')
split_content = []
else:
newline = '\n'
split_content = []
if diff.changes is None:
print(f'Warning: No changes to apply for {old_path}')
continue
new_content = apply_diff(diff, split_content)
# Ensure the directory exists before writing the file
os.makedirs(os.path.dirname(new_path), exist_ok=True)
# Write the new content using the detected line endings
with open(new_path, 'w', newline=newline) as f:
for line in new_content:
print(line, file=f)
print('Patch applied successfully')
def initialize_repo(
output_dir: str, issue_number: int, issue_type: str, base_commit: str | None = None
) -> str:
src_dir = os.path.join(output_dir, 'repo')
dest_dir = os.path.join(output_dir, 'patches', f'{issue_type}_{issue_number}')
if not os.path.exists(src_dir):
raise ValueError(f'Source directory {src_dir} does not exist.')
if os.path.exists(dest_dir):
shutil.rmtree(dest_dir)
shutil.copytree(src_dir, dest_dir)
print(f'Copied repository to {dest_dir}')
if base_commit:
result = subprocess.run(
f'git -C {dest_dir} checkout {base_commit}',
shell=True,
capture_output=True,
text=True,
)
if result.returncode != 0:
print(f'Error checking out commit: {result.stderr}')
raise RuntimeError('Failed to check out commit')
return dest_dir
def make_commit(repo_dir: str, issue: GithubIssue, issue_type: str) -> None:
# Check if git username is set
result = subprocess.run(
f'git -C {repo_dir} config user.name',
shell=True,
capture_output=True,
text=True,
)
if not result.stdout.strip():
# If username is not set, configure git
subprocess.run(
f'git -C {repo_dir} config user.name "openhands" && '
f'git -C {repo_dir} config user.email "openhands@all-hands.dev" && '
f'git -C {repo_dir} config alias.git "git --no-pager"',
shell=True,
check=True,
)
print('Git user configured as openhands')
result = subprocess.run(
f'git -C {repo_dir} add .', shell=True, capture_output=True, text=True
)
if result.returncode != 0:
print(f'Error adding files: {result.stderr}')
raise RuntimeError('Failed to add files to git')
status_result = subprocess.run(
f'git -C {repo_dir} status --porcelain',
shell=True,
capture_output=True,
text=True,
)
if not status_result.stdout.strip():
print(f'No changes to commit for issue #{issue.number}. Skipping commit.')
raise RuntimeError('ERROR: Openhands failed to make code changes.')
commit_message = f'Fix {issue_type} #{issue.number}: {issue.title}'
result = subprocess.run(
['git', '-C', repo_dir, 'commit', '-m', commit_message],
capture_output=True,
text=True,
)
if result.returncode != 0:
raise RuntimeError(f'Failed to commit changes: {result}')
def branch_exists(base_url: str, branch_name: str, headers: dict) -> bool:
print(f'Checking if branch {branch_name} exists...')
response = requests.get(f'{base_url}/branches/{branch_name}', headers=headers)
exists = response.status_code == 200
print(f'Branch {branch_name} exists: {exists}')
return exists
def send_pull_request(
github_issue: GithubIssue,
github_token: str,
github_username: str | None,
patch_dir: str,
llm_config: LLMConfig,
pr_type: str,
fork_owner: str | None = None,
additional_message: str | None = None,
) -> str:
if pr_type not in ['branch', 'draft', 'ready']:
raise ValueError(f'Invalid pr_type: {pr_type}')
# Set up headers and base URL for GitHub API
headers = {
'Authorization': f'token {github_token}',
'Accept': 'application/vnd.github.v3+json',
}
base_url = f'https://api.github.com/repos/{github_issue.owner}/{github_issue.repo}'
# Create a new branch with a unique name
base_branch_name = f'openhands-fix-issue-{github_issue.number}'
branch_name = base_branch_name
attempt = 1
print('Checking if branch exists...')
while branch_exists(base_url, branch_name, headers):
attempt += 1
branch_name = f'{base_branch_name}-try{attempt}'
# Get the default branch
print('Getting default branch...')
response = requests.get(f'{base_url}', headers=headers)
response.raise_for_status()
default_branch = response.json()['default_branch']
print(f'Default branch: {default_branch}')
# Create and checkout the new branch
print('Creating new branch...')
result = subprocess.run(
['git', '-C', patch_dir, 'checkout', '-b', branch_name],
capture_output=True,
text=True,
)
if result.returncode != 0:
print(f'Error creating new branch: {result.stderr}')
raise RuntimeError(
f'Failed to create a new branch {branch_name} in {patch_dir}:'
)
# Determine the repository to push to (original or fork)
push_owner = fork_owner if fork_owner else github_issue.owner
push_repo = github_issue.repo
print('Pushing changes...')
username_and_token = (
f'{github_username}:{github_token}'
if github_username
else f'x-auth-token:{github_token}'
)
push_url = f'https://{username_and_token}@github.com/{push_owner}/{push_repo}.git'
result = subprocess.run(
['git', '-C', patch_dir, 'push', push_url, branch_name],
capture_output=True,
text=True,
)
if result.returncode != 0:
print(f'Error pushing changes: {result.stderr}')
raise RuntimeError('Failed to push changes to the remote repository')
pr_title = f'Fix issue #{github_issue.number}: {github_issue.title}'
pr_body = f'This pull request fixes #{github_issue.number}.'
if additional_message:
pr_body += f'\n\n{additional_message}'
pr_body += '\n\nAutomatic fix generated by [OpenHands](https://github.com/All-Hands-AI/OpenHands/) 🙌'
# If we are not sending a PR, we can finish early and return the
# URL for the user to open a PR manually
if pr_type == 'branch':
url = f'https://github.com/{push_owner}/{github_issue.repo}/compare/{branch_name}?expand=1'
else:
data = {
'title': pr_title, # No need to escape title for GitHub API
'body': pr_body,
'head': branch_name,
'base': default_branch,
'draft': pr_type == 'draft',
}
response = requests.post(f'{base_url}/pulls', headers=headers, json=data)
if response.status_code == 403:
raise RuntimeError(
'Failed to create pull request due to missing permissions. '
'Make sure that the provided token has push permissions for the repository.'
)
response.raise_for_status()
pr_data = response.json()
url = pr_data['html_url']
print(f'{pr_type} created: {url}\n\n--- Title: {pr_title}\n\n--- Body:\n{pr_body}')
return url
def reply_to_comment(github_token: str, comment_id: str, reply: str):
# Opting for graphql as REST API doesn't allow reply to replies in comment threads
query = """
mutation($body: String!, $pullRequestReviewThreadId: ID!) {
addPullRequestReviewThreadReply(input: { body: $body, pullRequestReviewThreadId: $pullRequestReviewThreadId }) {
comment {
id
body
createdAt
}
}
}
"""
comment_reply = f'Openhands fix success summary\n\n\n{reply}'
variables = {'body': comment_reply, 'pullRequestReviewThreadId': comment_id}
url = 'https://api.github.com/graphql'
headers = {
'Authorization': f'Bearer {github_token}',
'Content-Type': 'application/json',
}
response = requests.post(
url, json={'query': query, 'variables': variables}, headers=headers
)
response.raise_for_status()
def update_existing_pull_request(
github_issue: GithubIssue,
github_token: str,
github_username: str | None,
patch_dir: str,
llm_config: LLMConfig,
comment_message: str | None = None,
additional_message: str | None = None,
) -> str:
"""Update an existing pull request with the new patches.
Args:
github_issue: The issue to update.
github_token: The GitHub token to use for authentication.
github_username: The GitHub username to use for authentication.
patch_dir: The directory containing the patches to apply.
llm_config: The LLM configuration to use for summarizing changes.
comment_message: The main message to post as a comment on the PR.
additional_message: The additional messages to post as a comment on the PR in json list format.
"""
# Set up headers and base URL for GitHub API
headers = {
'Authorization': f'token {github_token}',
'Accept': 'application/vnd.github.v3+json',
}
base_url = f'https://api.github.com/repos/{github_issue.owner}/{github_issue.repo}'
branch_name = github_issue.head_branch
# Push the changes to the existing branch
push_command = (
f'git -C {patch_dir} push '
f'https://{github_username}:{github_token}@github.com/'
f'{github_issue.owner}/{github_issue.repo}.git {branch_name}'
)
result = subprocess.run(push_command, shell=True, capture_output=True, text=True)
if result.returncode != 0:
print(f'Error pushing changes: {result.stderr}')
raise RuntimeError('Failed to push changes to the remote repository')
pr_url = f'https://github.com/{github_issue.owner}/{github_issue.repo}/pull/{github_issue.number}'
print(f'Updated pull request {pr_url} with new patches.')
# Generate a summary of all comment success indicators for PR message
if not comment_message and additional_message:
try:
explanations = json.loads(additional_message)
if explanations:
comment_message = (
'OpenHands made the following changes to resolve the issues:\n\n'
)
for explanation in explanations:
comment_message += f'- {explanation}\n'
# Summarize with LLM if provided
if llm_config is not None:
with open(
os.path.join(
os.path.dirname(__file__),
'prompts/resolve/pr-changes-summary.jinja',
),
'r',
) as f:
template = jinja2.Template(f.read())
prompt = template.render(comment_message=comment_message)
response = litellm.completion(
model=llm_config.model,
messages=[{'role': 'user', 'content': prompt}],
api_key=llm_config.api_key,
base_url=llm_config.base_url,
)
comment_message = response.choices[0].message.content.strip()
except (json.JSONDecodeError, TypeError):
comment_message = 'New OpenHands update'
# Post a comment on the PR
if comment_message:
comment_url = f'{base_url}/issues/{github_issue.number}/comments'
comment_data = {'body': comment_message}
comment_response = requests.post(
comment_url, headers=headers, json=comment_data
)
if comment_response.status_code != 201:
print(
f'Failed to post comment: {comment_response.status_code} {comment_response.text}'
)
else:
print(f'Comment added to the PR: {comment_message}')
# Reply to each unresolved comment thread
if additional_message and github_issue.thread_ids:
explanations = json.loads(additional_message)
for count, reply_comment in enumerate(explanations):
comment_id = github_issue.thread_ids[count]
reply_to_comment(github_token, comment_id, reply_comment)
return pr_url
def process_single_issue(
output_dir: str,
resolver_output: ResolverOutput,
github_token: str,
github_username: str,
pr_type: str,
llm_config: LLMConfig,
fork_owner: str | None,
send_on_failure: bool,
) -> None:
if not resolver_output.success and not send_on_failure:
print(
f'Issue {resolver_output.issue.number} was not successfully resolved. Skipping PR creation.'
)
return
issue_type = resolver_output.issue_type
if issue_type == 'issue':
patched_repo_dir = initialize_repo(
output_dir,
resolver_output.issue.number,
issue_type,
resolver_output.base_commit,
)
elif issue_type == 'pr':
patched_repo_dir = initialize_repo(
output_dir,
resolver_output.issue.number,
issue_type,
resolver_output.issue.head_branch,
)
else:
raise ValueError(f'Invalid issue type: {issue_type}')
apply_patch(patched_repo_dir, resolver_output.git_patch)
make_commit(patched_repo_dir, resolver_output.issue, issue_type)
if issue_type == 'pr':
update_existing_pull_request(
github_issue=resolver_output.issue,
github_token=github_token,
github_username=github_username,
patch_dir=patched_repo_dir,
additional_message=resolver_output.success_explanation,
llm_config=llm_config,
)
else:
send_pull_request(
github_issue=resolver_output.issue,
github_token=github_token,
github_username=github_username,
patch_dir=patched_repo_dir,
pr_type=pr_type,
llm_config=llm_config,
fork_owner=fork_owner,
additional_message=resolver_output.success_explanation,
)
def process_all_successful_issues(
output_dir: str,
github_token: str,
github_username: str,
pr_type: str,
llm_config: LLMConfig,
fork_owner: str | None,
) -> None:
output_path = os.path.join(output_dir, 'output.jsonl')
for resolver_output in load_all_resolver_outputs(output_path):
if resolver_output.success:
print(f'Processing issue {resolver_output.issue.number}')
process_single_issue(
output_dir,
resolver_output,
github_token,
github_username,
pr_type,
llm_config,
fork_owner,
False,
)
def main():
parser = argparse.ArgumentParser(description='Send a pull request to Github.')
parser.add_argument(
'--github-token',
type=str,
default=None,
help='Github token to access the repository.',
)
parser.add_argument(
'--github-username',
type=str,
default=None,
help='Github username to access the repository.',
)
parser.add_argument(
'--output-dir',
type=str,
default='output',
help='Output directory to write the results.',
)
parser.add_argument(
'--pr-type',
type=str,
default='draft',
choices=['branch', 'draft', 'ready'],
help='Type of the pull request to send [branch, draft, ready]',
)
parser.add_argument(
'--issue-number',
type=str,
required=True,
help="Issue number to send the pull request for, or 'all_successful' to process all successful issues.",
)
parser.add_argument(
'--fork-owner',
type=str,
default=None,
help='Owner of the fork to push changes to (if different from the original repo owner).',
)
parser.add_argument(
'--send-on-failure',
action='store_true',
help='Send a pull request even if the issue was not successfully resolved.',
)
parser.add_argument(
'--llm-model',
type=str,
default=None,
help='LLM model to use for summarizing changes.',
)
parser.add_argument(
'--llm-api-key',
type=str,
default=None,
help='API key for the LLM model.',
)
parser.add_argument(
'--llm-base-url',
type=str,
default=None,
help='Base URL for the LLM model.',
)
my_args = parser.parse_args()
github_token = (
my_args.github_token if my_args.github_token else os.getenv('GITHUB_TOKEN')
)
if not github_token:
raise ValueError(
'Github token is not set, set via --github-token or GITHUB_TOKEN environment variable.'
)
github_username = (
my_args.github_username
if my_args.github_username
else os.getenv('GITHUB_USERNAME')
)
llm_config = LLMConfig(
model=my_args.llm_model or os.environ['LLM_MODEL'],
api_key=my_args.llm_api_key or os.environ['LLM_API_KEY'],
base_url=my_args.llm_base_url or os.environ.get('LLM_BASE_URL', None),
)
if not os.path.exists(my_args.output_dir):
raise ValueError(f'Output directory {my_args.output_dir} does not exist.')
if my_args.issue_number == 'all_successful':
if not github_username:
raise ValueError('Github username is required.')
process_all_successful_issues(
my_args.output_dir,
github_token,
github_username,
my_args.pr_type,
llm_config,
my_args.fork_owner,
)
else:
if not my_args.issue_number.isdigit():
raise ValueError(f'Issue number {my_args.issue_number} is not a number.')
issue_number = int(my_args.issue_number)
output_path = os.path.join(my_args.output_dir, 'output.jsonl')
resolver_output = load_single_resolver_output(output_path, issue_number)
if not github_username:
raise ValueError('Github username is required.')
process_single_issue(
my_args.output_dir,
resolver_output,
github_token,
github_username,
my_args.pr_type,
llm_config,
my_args.fork_owner,
my_args.send_on_failure,
)
if __name__ == '__main__':
main()

139
openhands/resolver/utils.py Normal file
View File

@ -0,0 +1,139 @@
import json
import logging
import multiprocessing as mp
import os
from typing import Callable
import pandas as pd
from openhands.controller.state.state import State
from openhands.core.logger import get_console_handler
from openhands.core.logger import openhands_logger as logger
from openhands.events.action import Action
from openhands.events.action.message import MessageAction
def codeact_user_response(
state: State,
encapsulate_solution: bool = False,
try_parse: Callable[[Action | None], str] | None = None,
) -> str:
encaps_str = (
(
'Please encapsulate your final answer (answer ONLY) within <solution> and </solution>.\n'
'For example: The answer to the question is <solution> 42 </solution>.\n'
)
if encapsulate_solution
else ''
)
msg = (
'Please continue working on the task on whatever approach you think is suitable.\n'
'If you think you have solved the task, please first send your answer to user through message and then finish the interaction.\n'
f'{encaps_str}'
'IMPORTANT: YOU SHOULD NEVER ASK FOR HUMAN HELP.\n'
)
if state.history:
# check if the last action has an answer, if so, early exit
if try_parse is not None:
last_action = next(
(
event
for event in reversed(state.history)
if isinstance(event, Action)
),
None,
)
ans = try_parse(last_action)
if ans is not None:
return '/exit'
# check if the agent has tried to talk to the user 3 times, if so, let the agent know it can give up
user_msgs = [
event
for event in state.history
if isinstance(event, MessageAction) and event.source == 'user'
]
if len(user_msgs) >= 2:
# let the agent know that it can give up when it has tried 3 times
return (
msg
+ 'If you want to give up, run: <execute_bash> exit </execute_bash>.\n'
)
return msg
def cleanup():
print('Cleaning up child processes...')
for process in mp.active_children():
print(f'Terminating child process: {process.name}')
process.terminate()
process.join()
def prepare_dataset(dataset: pd.DataFrame, output_file: str, eval_n_limit: int):
assert 'instance_id' in dataset.columns, (
"Expected 'instance_id' column in the dataset. You should define your own "
"unique identifier for each instance and use it as the 'instance_id' column."
)
id_column = 'instance_id'
logger.info(f'Writing evaluation output to {output_file}')
finished_ids = set()
if os.path.exists(output_file):
with open(output_file, 'r') as f:
for line in f:
data = json.loads(line)
finished_ids.add(data[id_column])
logger.warning(
f'Output file {output_file} already exists. Loaded '
f'{len(finished_ids)} finished instances.'
)
if eval_n_limit:
dataset = dataset.head(eval_n_limit)
logger.info(f'Limiting evaluation to first {eval_n_limit} instances.')
new_dataset = [
instance
for _, instance in dataset.iterrows()
if instance[id_column] not in finished_ids
]
logger.info(
f'Finished instances: {len(finished_ids)}, '
f'Remaining instances: {len(new_dataset)}'
)
return pd.DataFrame(new_dataset)
def reset_logger_for_multiprocessing(
logger: logging.Logger, instance_id: str, log_dir: str
):
"""Reset the logger for multiprocessing.
Save logs to a separate file for each process, instead of trying to write to the
same file/console from multiple processes.
"""
# Set up logger
log_file = os.path.join(
log_dir,
f'instance_{instance_id}.log',
)
# Remove all existing handlers from logger
for handler in logger.handlers[:]:
logger.removeHandler(handler)
# add back the console handler to print ONE line
logger.addHandler(get_console_handler())
logger.info(
f'Starting resolver for instance {instance_id}.\n'
f'Hint: run "tail -f {log_file}" to see live logs in a separate shell'
)
# Remove all existing handlers from logger
for handler in logger.handlers[:]:
logger.removeHandler(handler)
os.makedirs(os.path.dirname(log_file), exist_ok=True)
file_handler = logging.FileHandler(log_file)
file_handler.setFormatter(
logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
)
logger.addHandler(file_handler)

View File

@ -0,0 +1,43 @@
import argparse
import os
from openhands.resolver.io_utils import load_single_resolver_output
def visualize_resolver_output(issue_number: int, output_dir: str, vis_method: str):
output_jsonl = os.path.join(output_dir, 'output.jsonl')
resolver_output = load_single_resolver_output(output_jsonl, issue_number)
if vis_method == 'json':
print(resolver_output.model_dump_json(indent=4))
else:
raise ValueError(f'Invalid visualization method: {vis_method}')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Visualize a patch.')
parser.add_argument(
'--issue-number',
type=int,
required=True,
help='Issue number to send the pull request for.',
)
parser.add_argument(
'--output-dir',
type=str,
default='output',
help='Output directory to write the results.',
)
parser.add_argument(
'--vis-method',
type=str,
default='json',
choices=['json'],
help='Method to visualize the patch [json].',
)
my_args = parser.parse_args()
visualize_resolver_output(
issue_number=my_args.issue_number,
output_dir=my_args.output_dir,
vis_method=my_args.vis_method,
)

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,42 @@
#root {
max-width: 1280px;
margin: 0 auto;
padding: 2rem;
text-align: center;
}
.logo {
height: 6em;
padding: 1.5em;
will-change: filter;
transition: filter 300ms;
}
.logo:hover {
filter: drop-shadow(0 0 2em #646cffaa);
}
.logo.react:hover {
filter: drop-shadow(0 0 2em #61dafbaa);
}
@keyframes logo-spin {
from {
transform: rotate(0deg);
}
to {
transform: rotate(360deg);
}
}
@media (prefers-reduced-motion: no-preference) {
a:nth-of-type(2) .logo {
animation: logo-spin infinite 20s linear;
}
}
.card {
padding: 2em;
}
.read-the-docs {
color: #888;
}

View File

@ -0,0 +1,14 @@
import React from 'react'
import './App.css'
import PullRequestViewer from './PullRequestViewer'
function App() {
return (
<div className="App">
<PullRequestViewer />
</div>
)
}
export default App

View File

@ -0,0 +1,19 @@
import React from 'react';
import { render, screen } from '@testing-library/react';
import PullRequestViewer from './PullRequestViewer';
describe('PullRequestViewer', () => {
it('renders the component title', () => {
render(<PullRequestViewer />);
const titleElement = screen.getByText(/Pull Request Viewer/i);
expect(titleElement).toBeInTheDocument();
});
it('renders the repository select dropdown', () => {
render(<PullRequestViewer />);
const selectElement = screen.getByRole('combobox', { name: /select a repository/i });
expect(selectElement).toBeInTheDocument();
});
});

View File

@ -0,0 +1,112 @@
import React, { useState, useEffect } from 'react';
import axios from 'axios';
import { Octokit } from '@octokit/rest';
import Select from 'react-select';
const octokit = new Octokit({ auth: import.meta.env.VITE_GITHUB_TOKEN });
interface PullRequest {
title: string;
html_url: string;
user: {
login: string;
};
}
interface Repo {
value: string;
label: string;
}
const PullRequestViewer: React.FC = () => {
const [repos, setRepos] = useState<Repo[]>([]);
const [selectedRepo, setSelectedRepo] = useState<Repo | null>(null);
const [pullRequests, setPullRequests] = useState<PullRequest[]>([]);
useEffect(() => {
const fetchRepos = async () => {
try {
const response = await octokit.repos.listForOrg({
org: 'OpenDevin',
type: 'all',
});
const repoOptions = response.data.map(repo => ({
value: repo.name,
label: repo.name,
}));
setRepos(repoOptions);
} catch (error) {
console.error('Error fetching repos:', error);
}
};
fetchRepos();
}, []);
useEffect(() => {
const fetchPullRequests = async () => {
if (selectedRepo) {
try {
let allPullRequests: PullRequest[] = [];
let page = 1;
let hasNextPage = true;
while (hasNextPage) {
const response = await octokit.pulls.list({
owner: 'OpenDevin',
repo: selectedRepo.value,
state: 'open',
per_page: 100,
page: page,
});
allPullRequests = [...allPullRequests, ...response.data];
if (response.data.length < 100) {
hasNextPage = false;
} else {
page++;
}
}
setPullRequests(allPullRequests);
} catch (error) {
console.error('Error fetching pull requests:', error);
}
}
};
fetchPullRequests();
}, [selectedRepo]);
return (
<div>
<h1>Pull Request Viewer</h1>
<Select
options={repos}
value={selectedRepo}
onChange={(option) => setSelectedRepo(option as Repo)}
placeholder="Select a repository"
aria-label="Select a repository"
/>
{pullRequests.length > 0 ? (
<ul>
{pullRequests.map((pr) => (
<li key={pr.html_url}>
<a href={pr.html_url} target="_blank" rel="noopener noreferrer">
{pr.title}
</a>
{' by '}
{pr.user.login}
</li>
))}
</ul>
) : (
<p>No open pull requests found.</p>
)}
</div>
);
};
export default PullRequestViewer;

View File

@ -0,0 +1,71 @@
from openhands.resolver.issue_definitions import IssueHandler
from openhands.resolver.github_issue import GithubIssue
from openhands.events.action.message import MessageAction
from openhands.core.config import LLMConfig
def test_guess_success_multiline_explanation():
# Mock data
issue = GithubIssue(
owner="test",
repo="test",
number=1,
title="Test Issue",
body="Test body",
thread_comments=None,
review_comments=None,
)
history = [MessageAction(content="Test message")]
llm_config = LLMConfig(model="test", api_key="test")
# Create a mock response with multi-line explanation
mock_response = """--- success
true
--- explanation
The PR successfully addressed the issue by:
- Fixed bug A
- Added test B
- Updated documentation C
Automatic fix generated by OpenHands 🙌"""
# Create a handler instance
handler = IssueHandler("test", "test", "test")
# Mock the litellm.completion call
def mock_completion(*args, **kwargs):
class MockResponse:
class Choice:
class Message:
def __init__(self, content):
self.content = content
def __init__(self, content):
self.message = self.Message(content)
def __init__(self, content):
self.choices = [self.Choice(content)]
return MockResponse(mock_response)
# Patch the litellm.completion function
import litellm
original_completion = litellm.completion
litellm.completion = mock_completion
try:
# Call guess_success
success, _, explanation = handler.guess_success(issue, history, llm_config)
# Verify the results
assert success is True
assert "The PR successfully addressed the issue by:" in explanation
assert "Fixed bug A" in explanation
assert "Added test B" in explanation
assert "Updated documentation C" in explanation
assert "Automatic fix generated by OpenHands" in explanation
finally:
# Restore the original function
litellm.completion = original_completion

View File

@ -0,0 +1,704 @@
from unittest.mock import patch, MagicMock
from openhands.resolver.issue_definitions import IssueHandler, PRHandler
from openhands.resolver.github_issue import GithubIssue, ReviewThread
from openhands.events.action.message import MessageAction
from openhands.core.config import LLMConfig
def test_get_converted_issues_initializes_review_comments():
# Mock the necessary dependencies
with patch("requests.get") as mock_get:
# Mock the response for issues
mock_issues_response = MagicMock()
mock_issues_response.json.return_value = [
{"number": 1, "title": "Test Issue", "body": "Test Body"}
]
# Mock the response for comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = []
# Set up the mock to return different responses for different calls
# First call is for issues, second call is for comments
mock_get.side_effect = [
mock_issues_response,
mock_comments_response,
mock_comments_response,
] # Need two comment responses because we make two API calls
# Create an instance of IssueHandler
handler = IssueHandler("test-owner", "test-repo", "test-token")
# Get converted issues
issues = handler.get_converted_issues()
# Verify that we got exactly one issue
assert len(issues) == 1
# Verify that review_comments is initialized as None
assert issues[0].review_comments is None
# Verify other fields are set correctly
assert issues[0].number == 1
assert issues[0].title == "Test Issue"
assert issues[0].body == "Test Body"
assert issues[0].owner == "test-owner"
assert issues[0].repo == "test-repo"
def test_pr_handler_guess_success_with_thread_comments():
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create a mock issue with thread comments but no review comments
issue = GithubIssue(
owner="test-owner",
repo="test-repo",
number=1,
title="Test PR",
body="Test Body",
thread_comments=["First comment", "Second comment"],
closing_issues=["Issue description"],
review_comments=None,
thread_ids=None,
head_branch="test-branch",
)
# Create mock history
history = [MessageAction(content="Fixed the issue by implementing X and Y")]
# Create mock LLM config
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="""--- success
true
--- explanation
The changes successfully address the feedback."""
)
)
]
# Test the guess_success method
with patch("litellm.completion", return_value=mock_response):
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
)
# Verify the results
assert success is True
assert success_list == [True]
assert "successfully address" in explanation
def test_pr_handler_get_converted_issues_with_comments():
# Mock the necessary dependencies
with patch("requests.get") as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
"number": 1,
"title": "Test PR",
"body": "Test Body fixes #1",
"head": {"ref": "test-branch"},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
{"body": "First comment"},
{"body": "Second comment"},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
"data": {
"repository": {
"pullRequest": {
"closingIssuesReferences": {"edges": []},
"reviews": {"nodes": []},
"reviewThreads": {"edges": []},
}
}
}
}
# Set up the mock to return different responses
# We need to return empty responses for subsequent pages
mock_empty_response = MagicMock()
mock_empty_response.json.return_value = []
# Mock the response for fetching the external issue referenced in PR body
mock_external_issue_response = MagicMock()
mock_external_issue_response.json.return_value = {
"body": "This is additional context from an externally referenced issue."
}
mock_get.side_effect = [
mock_prs_response, # First call for PRs
mock_empty_response, # Second call for PRs (empty page)
mock_comments_response, # Third call for PR comments
mock_empty_response, # Fourth call for PR comments (empty page)
mock_external_issue_response, # Mock response for the external issue reference #1
]
# Mock the post request for GraphQL
with patch("requests.post") as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
handler = PRHandler("test-owner", "test-repo", "test-token")
# Get converted issues
prs = handler.get_converted_issues()
# Verify that we got exactly one PR
assert len(prs) == 1
# Verify that thread_comments are set correctly
assert prs[0].thread_comments == ["First comment", "Second comment"]
# Verify other fields are set correctly
assert prs[0].number == 1
assert prs[0].title == "Test PR"
assert prs[0].body == "Test Body fixes #1"
assert prs[0].owner == "test-owner"
assert prs[0].repo == "test-repo"
assert prs[0].head_branch == "test-branch"
assert prs[0].closing_issues == [
"This is additional context from an externally referenced issue."
]
def test_pr_handler_guess_success_only_review_comments():
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create a mock issue with only review comments
issue = GithubIssue(
owner="test-owner",
repo="test-repo",
number=1,
title="Test PR",
body="Test Body",
thread_comments=None,
closing_issues=["Issue description"],
review_comments=["Please fix the formatting", "Add more tests"],
thread_ids=None,
head_branch="test-branch",
)
# Create mock history
history = [MessageAction(content="Fixed the formatting and added more tests")]
# Create mock LLM config
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="""--- success
true
--- explanation
The changes successfully address the review comments."""
)
)
]
# Test the guess_success method
with patch("litellm.completion", return_value=mock_response):
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
)
# Verify the results
assert success is True
assert success_list == [True]
assert "successfully address" in explanation
def test_pr_handler_guess_success_no_comments():
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create a mock issue with no comments
issue = GithubIssue(
owner="test-owner",
repo="test-repo",
number=1,
title="Test PR",
body="Test Body",
thread_comments=None,
closing_issues=["Issue description"],
review_comments=None,
thread_ids=None,
head_branch="test-branch",
)
# Create mock history
history = [MessageAction(content="Fixed the issue")]
# Create mock LLM config
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Test that it returns appropriate message when no comments are present
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
)
assert success is False
assert success_list is None
assert explanation == "No feedback was found to process"
def test_get_issue_comments_with_specific_comment_id():
# Mock the necessary dependencies
with patch("requests.get") as mock_get:
# Mock the response for comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
{"id": 123, "body": "First comment"},
{"id": 456, "body": "Second comment"},
]
mock_get.return_value = mock_comments_response
# Create an instance of IssueHandler
handler = IssueHandler("test-owner", "test-repo", "test-token")
# Get comments with a specific comment_id
specific_comment = handler._get_issue_comments(issue_number=1, comment_id=123)
# Verify only the specific comment is returned
assert specific_comment == ["First comment"]
def test_pr_handler_get_converted_issues_with_specific_thread_comment():
# Define the specific comment_id to filter
specific_comment_id = 123
# Mock GraphQL response for review threads
with patch("requests.get") as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
"number": 1,
"title": "Test PR",
"body": "Test Body",
"head": {"ref": "test-branch"},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
{"body": "First comment", "id": 123},
{"body": "Second comment", "id": 124},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
"data": {
"repository": {
"pullRequest": {
"closingIssuesReferences": {"edges": []},
"reviews": {"nodes": []},
"reviewThreads": {
"edges": [
{
"node": {
"id": "review-thread-1",
"isResolved": False,
"comments": {
"nodes": [
{
"fullDatabaseId": 121,
"body": "Specific review comment",
"path": "file1.txt",
},
{
"fullDatabaseId": 456,
"body": "Another review comment",
"path": "file2.txt",
},
]
},
}
}
]
},
}
}
}
}
# Set up the mock to return different responses
# We need to return empty responses for subsequent pages
mock_empty_response = MagicMock()
mock_empty_response.json.return_value = []
mock_get.side_effect = [
mock_prs_response, # First call for PRs
mock_empty_response, # Second call for PRs (empty page)
mock_comments_response, # Third call for PR comments
mock_empty_response, # Fourth call for PR comments (empty page)
]
# Mock the post request for GraphQL
with patch("requests.post") as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
handler = PRHandler("test-owner", "test-repo", "test-token")
# Get converted issues
prs = handler.get_converted_issues(comment_id=specific_comment_id)
# Verify that we got exactly one PR
assert len(prs) == 1
# Verify that thread_comments are set correctly
assert prs[0].thread_comments == ["First comment"]
assert prs[0].review_comments == []
assert prs[0].review_threads == []
# Verify other fields are set correctly
assert prs[0].number == 1
assert prs[0].title == "Test PR"
assert prs[0].body == "Test Body"
assert prs[0].owner == "test-owner"
assert prs[0].repo == "test-repo"
assert prs[0].head_branch == "test-branch"
def test_pr_handler_get_converted_issues_with_specific_review_thread_comment():
# Define the specific comment_id to filter
specific_comment_id = 123
# Mock GraphQL response for review threads
with patch("requests.get") as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
"number": 1,
"title": "Test PR",
"body": "Test Body",
"head": {"ref": "test-branch"},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
{"body": "First comment", "id": 120},
{"body": "Second comment", "id": 124},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
"data": {
"repository": {
"pullRequest": {
"closingIssuesReferences": {"edges": []},
"reviews": {"nodes": []},
"reviewThreads": {
"edges": [
{
"node": {
"id": "review-thread-1",
"isResolved": False,
"comments": {
"nodes": [
{
"fullDatabaseId": specific_comment_id,
"body": "Specific review comment",
"path": "file1.txt",
},
{
"fullDatabaseId": 456,
"body": "Another review comment",
"path": "file1.txt",
},
]
},
}
}
]
},
}
}
}
}
# Set up the mock to return different responses
# We need to return empty responses for subsequent pages
mock_empty_response = MagicMock()
mock_empty_response.json.return_value = []
mock_get.side_effect = [
mock_prs_response, # First call for PRs
mock_empty_response, # Second call for PRs (empty page)
mock_comments_response, # Third call for PR comments
mock_empty_response, # Fourth call for PR comments (empty page)
]
# Mock the post request for GraphQL
with patch("requests.post") as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
handler = PRHandler("test-owner", "test-repo", "test-token")
# Get converted issues
prs = handler.get_converted_issues(comment_id=specific_comment_id)
# Verify that we got exactly one PR
assert len(prs) == 1
# Verify that thread_comments are set correctly
assert prs[0].thread_comments is None
assert prs[0].review_comments == []
assert len(prs[0].review_threads) == 1
assert isinstance(prs[0].review_threads[0], ReviewThread)
assert (
prs[0].review_threads[0].comment
== "Specific review comment\n---\nlatest feedback:\nAnother review comment\n"
)
assert prs[0].review_threads[0].files == ["file1.txt"]
# Verify other fields are set correctly
assert prs[0].number == 1
assert prs[0].title == "Test PR"
assert prs[0].body == "Test Body"
assert prs[0].owner == "test-owner"
assert prs[0].repo == "test-repo"
assert prs[0].head_branch == "test-branch"
def test_pr_handler_get_converted_issues_with_specific_comment_and_issue_refs():
# Define the specific comment_id to filter
specific_comment_id = 123
# Mock GraphQL response for review threads
with patch("requests.get") as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
"number": 1,
"title": "Test PR fixes #3",
"body": "Test Body",
"head": {"ref": "test-branch"},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
{"body": "First comment", "id": 120},
{"body": "Second comment", "id": 124},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
"data": {
"repository": {
"pullRequest": {
"closingIssuesReferences": {"edges": []},
"reviews": {"nodes": []},
"reviewThreads": {
"edges": [
{
"node": {
"id": "review-thread-1",
"isResolved": False,
"comments": {
"nodes": [
{
"fullDatabaseId": specific_comment_id,
"body": "Specific review comment that references #6",
"path": "file1.txt",
},
{
"fullDatabaseId": 456,
"body": "Another review comment referencing #7",
"path": "file2.txt",
},
]
},
}
}
]
},
}
}
}
}
# Set up the mock to return different responses
# We need to return empty responses for subsequent pages
mock_empty_response = MagicMock()
mock_empty_response.json.return_value = []
# Mock the response for fetching the external issue referenced in PR body
mock_external_issue_response_in_body = MagicMock()
mock_external_issue_response_in_body.json.return_value = {
"body": "External context #1."
}
# Mock the response for fetching the external issue referenced in review thread
mock_external_issue_response_review_thread = MagicMock()
mock_external_issue_response_review_thread.json.return_value = {
"body": "External context #2."
}
mock_get.side_effect = [
mock_prs_response, # First call for PRs
mock_empty_response, # Second call for PRs (empty page)
mock_comments_response, # Third call for PR comments
mock_empty_response, # Fourth call for PR comments (empty page)
mock_external_issue_response_in_body,
mock_external_issue_response_review_thread,
]
# Mock the post request for GraphQL
with patch("requests.post") as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
handler = PRHandler("test-owner", "test-repo", "test-token")
# Get converted issues
prs = handler.get_converted_issues(comment_id=specific_comment_id)
# Verify that we got exactly one PR
assert len(prs) == 1
# Verify that thread_comments are set correctly
assert prs[0].thread_comments is None
assert prs[0].review_comments == []
assert len(prs[0].review_threads) == 1
assert isinstance(prs[0].review_threads[0], ReviewThread)
assert (
prs[0].review_threads[0].comment
== "Specific review comment that references #6\n---\nlatest feedback:\nAnother review comment referencing #7\n"
)
assert prs[0].closing_issues == [
"External context #1.",
"External context #2.",
] # Only includes references inside comment ID and body PR
# Verify other fields are set correctly
assert prs[0].number == 1
assert prs[0].title == "Test PR fixes #3"
assert prs[0].body == "Test Body"
assert prs[0].owner == "test-owner"
assert prs[0].repo == "test-repo"
assert prs[0].head_branch == "test-branch"
def test_pr_handler_get_converted_issues_with_duplicate_issue_refs():
# Mock the necessary dependencies
with patch("requests.get") as mock_get:
# Mock the response for PRs
mock_prs_response = MagicMock()
mock_prs_response.json.return_value = [
{
"number": 1,
"title": "Test PR",
"body": "Test Body fixes #1",
"head": {"ref": "test-branch"},
}
]
# Mock the response for PR comments
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
{"body": "First comment addressing #1"},
{"body": "Second comment addressing #2"},
]
# Mock the response for PR metadata (GraphQL)
mock_graphql_response = MagicMock()
mock_graphql_response.json.return_value = {
"data": {
"repository": {
"pullRequest": {
"closingIssuesReferences": {"edges": []},
"reviews": {"nodes": []},
"reviewThreads": {"edges": []},
}
}
}
}
# Set up the mock to return different responses
# We need to return empty responses for subsequent pages
mock_empty_response = MagicMock()
mock_empty_response.json.return_value = []
# Mock the response for fetching the external issue referenced in PR body
mock_external_issue_response_in_body = MagicMock()
mock_external_issue_response_in_body.json.return_value = {
"body": "External context #1."
}
# Mock the response for fetching the external issue referenced in review thread
mock_external_issue_response_in_comment = MagicMock()
mock_external_issue_response_in_comment.json.return_value = {
"body": "External context #2."
}
mock_get.side_effect = [
mock_prs_response, # First call for PRs
mock_empty_response, # Second call for PRs (empty page)
mock_comments_response, # Third call for PR comments
mock_empty_response, # Fourth call for PR comments (empty page)
mock_external_issue_response_in_body, # Mock response for the external issue reference #1
mock_external_issue_response_in_comment,
]
# Mock the post request for GraphQL
with patch("requests.post") as mock_post:
mock_post.return_value = mock_graphql_response
# Create an instance of PRHandler
handler = PRHandler("test-owner", "test-repo", "test-token")
# Get converted issues
prs = handler.get_converted_issues()
# Verify that we got exactly one PR
assert len(prs) == 1
# Verify that thread_comments are set correctly
assert prs[0].thread_comments == [
"First comment addressing #1",
"Second comment addressing #2",
]
# Verify other fields are set correctly
assert prs[0].number == 1
assert prs[0].title == "Test PR"
assert prs[0].body == "Test Body fixes #1"
assert prs[0].owner == "test-owner"
assert prs[0].repo == "test-repo"
assert prs[0].head_branch == "test-branch"
assert prs[0].closing_issues == [
"External context #1.",
"External context #2.",
]

View File

@ -0,0 +1,460 @@
import json
from unittest.mock import patch, MagicMock
from openhands.resolver.issue_definitions import PRHandler
from openhands.resolver.github_issue import GithubIssue, ReviewThread
from openhands.events.action.message import MessageAction
from openhands.core.config import LLMConfig
def test_guess_success_review_threads_litellm_call():
"""Test that the litellm.completion() call for review threads contains the expected content."""
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create a mock issue with review threads
issue = GithubIssue(
owner="test-owner",
repo="test-repo",
number=1,
title="Test PR",
body="Test Body",
thread_comments=None,
closing_issues=["Issue 1 description", "Issue 2 description"],
review_comments=None,
review_threads=[
ReviewThread(
comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
files=["/src/file1.py", "/src/file2.py"],
),
ReviewThread(
comment="Add more tests\n---\nlatest feedback:\nAdd test cases",
files=["/tests/test_file.py"],
),
],
thread_ids=["1", "2"],
head_branch="test-branch",
)
# Create mock history with a detailed response
history = [
MessageAction(
content="""I have made the following changes:
1. Fixed formatting in file1.py and file2.py
2. Added docstrings to all functions
3. Added test cases in test_file.py"""
)
]
# Create mock LLM config
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="""--- success
true
--- explanation
The changes successfully address the feedback."""
)
)
]
# Test the guess_success method
with patch("litellm.completion") as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
)
# Verify the litellm.completion() calls
assert mock_completion.call_count == 2 # One call per review thread
# Check first call
first_call = mock_completion.call_args_list[0]
first_prompt = first_call[1]["messages"][0]["content"]
assert (
"Issue descriptions:\n"
+ json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
in first_prompt
)
assert (
"Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings"
in first_prompt
)
assert (
"Files locations:\n"
+ json.dumps(["/src/file1.py", "/src/file2.py"], indent=4)
in first_prompt
)
assert "Last message from AI agent:\n" + history[0].content in first_prompt
# Check second call
second_call = mock_completion.call_args_list[1]
second_prompt = second_call[1]["messages"][0]["content"]
assert (
"Issue descriptions:\n"
+ json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
in second_prompt
)
assert (
"Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases"
in second_prompt
)
assert (
"Files locations:\n" + json.dumps(["/tests/test_file.py"], indent=4)
in second_prompt
)
assert "Last message from AI agent:\n" + history[0].content in second_prompt
def test_guess_success_thread_comments_litellm_call():
"""Test that the litellm.completion() call for thread comments contains the expected content."""
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create a mock issue with thread comments
issue = GithubIssue(
owner="test-owner",
repo="test-repo",
number=1,
title="Test PR",
body="Test Body",
thread_comments=[
"Please improve error handling",
"Add input validation",
"latest feedback:\nHandle edge cases",
],
closing_issues=["Issue 1 description", "Issue 2 description"],
review_comments=None,
thread_ids=None,
head_branch="test-branch",
)
# Create mock history with a detailed response
history = [
MessageAction(
content="""I have made the following changes:
1. Added try/catch blocks for error handling
2. Added input validation checks
3. Added handling for edge cases"""
)
]
# Create mock LLM config
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="""--- success
true
--- explanation
The changes successfully address the feedback."""
)
)
]
# Test the guess_success method
with patch("litellm.completion") as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
)
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
prompt = call_args[1]["messages"][0]["content"]
# Check prompt content
assert (
"Issue descriptions:\n"
+ json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
in prompt
)
assert "PR Thread Comments:\n" + "\n---\n".join(issue.thread_comments) in prompt
assert "Last message from AI agent:\n" + history[0].content in prompt
def test_check_feedback_with_llm():
"""Test the _check_feedback_with_llm helper function."""
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create mock LLM config
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Test cases for different LLM responses
test_cases = [
{
"response": "--- success\ntrue\n--- explanation\nChanges look good",
"expected": (True, "Changes look good"),
},
{
"response": "--- success\nfalse\n--- explanation\nNot all issues fixed",
"expected": (False, "Not all issues fixed"),
},
{
"response": "Invalid response format",
"expected": (
False,
"Failed to decode answer from LLM response: Invalid response format",
),
},
{
"response": "--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere",
"expected": (True, "Multiline\nexplanation\nhere"),
},
]
for case in test_cases:
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [MagicMock(message=MagicMock(content=case["response"]))]
# Test the function
with patch("litellm.completion", return_value=mock_response):
success, explanation = handler._check_feedback_with_llm(
"test prompt", llm_config
)
assert (success, explanation) == case["expected"]
def test_check_review_thread():
"""Test the _check_review_thread helper function."""
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create test data
review_thread = ReviewThread(
comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
files=["/src/file1.py", "/src/file2.py"],
)
issues_context = json.dumps(
["Issue 1 description", "Issue 2 description"], indent=4
)
last_message = "I have fixed the formatting and added docstrings"
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="""--- success
true
--- explanation
Changes look good"""
)
)
]
# Test the function
with patch("litellm.completion") as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_review_thread(
review_thread, issues_context, last_message, llm_config
)
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
prompt = call_args[1]["messages"][0]["content"]
# Check prompt content
assert "Issue descriptions:\n" + issues_context in prompt
assert "Feedback:\n" + review_thread.comment in prompt
assert (
"Files locations:\n" + json.dumps(review_thread.files, indent=4) in prompt
)
assert "Last message from AI agent:\n" + last_message in prompt
# Check result
assert success is True
assert explanation == "Changes look good"
def test_check_thread_comments():
"""Test the _check_thread_comments helper function."""
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create test data
thread_comments = [
"Please improve error handling",
"Add input validation",
"latest feedback:\nHandle edge cases",
]
issues_context = json.dumps(
["Issue 1 description", "Issue 2 description"], indent=4
)
last_message = "I have added error handling and input validation"
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="""--- success
true
--- explanation
Changes look good"""
)
)
]
# Test the function
with patch("litellm.completion") as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_thread_comments(
thread_comments, issues_context, last_message, llm_config
)
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
prompt = call_args[1]["messages"][0]["content"]
# Check prompt content
assert "Issue descriptions:\n" + issues_context in prompt
assert "PR Thread Comments:\n" + "\n---\n".join(thread_comments) in prompt
assert "Last message from AI agent:\n" + last_message in prompt
# Check result
assert success is True
assert explanation == "Changes look good"
def test_check_review_comments():
"""Test the _check_review_comments helper function."""
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create test data
review_comments = [
"Please improve code readability",
"Add comments to complex functions",
"Follow PEP 8 style guide",
]
issues_context = json.dumps(
["Issue 1 description", "Issue 2 description"], indent=4
)
last_message = "I have improved code readability and added comments"
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="""--- success
true
--- explanation
Changes look good"""
)
)
]
# Test the function
with patch("litellm.completion") as mock_completion:
mock_completion.return_value = mock_response
success, explanation = handler._check_review_comments(
review_comments, issues_context, last_message, llm_config
)
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
prompt = call_args[1]["messages"][0]["content"]
# Check prompt content
assert "Issue descriptions:\n" + issues_context in prompt
assert "PR Review Comments:\n" + "\n---\n".join(review_comments) in prompt
assert "Last message from AI agent:\n" + last_message in prompt
# Check result
assert success is True
assert explanation == "Changes look good"
def test_guess_success_review_comments_litellm_call():
"""Test that the litellm.completion() call for review comments contains the expected content."""
# Create a PR handler instance
handler = PRHandler("test-owner", "test-repo", "test-token")
# Create a mock issue with review comments
issue = GithubIssue(
owner="test-owner",
repo="test-repo",
number=1,
title="Test PR",
body="Test Body",
thread_comments=None,
closing_issues=["Issue 1 description", "Issue 2 description"],
review_comments=[
"Please improve code readability",
"Add comments to complex functions",
"Follow PEP 8 style guide",
],
thread_ids=None,
head_branch="test-branch",
)
# Create mock history with a detailed response
history = [
MessageAction(
content="""I have made the following changes:
1. Improved code readability by breaking down complex functions
2. Added detailed comments to all complex functions
3. Fixed code style to follow PEP 8"""
)
]
# Create mock LLM config
llm_config = LLMConfig(model="test-model", api_key="test-key")
# Mock the LLM response
mock_response = MagicMock()
mock_response.choices = [
MagicMock(
message=MagicMock(
content="""--- success
true
--- explanation
The changes successfully address the feedback."""
)
)
]
# Test the guess_success method
with patch("litellm.completion") as mock_completion:
mock_completion.return_value = mock_response
success, success_list, explanation = handler.guess_success(
issue, history, llm_config
)
# Verify the litellm.completion() call
mock_completion.assert_called_once()
call_args = mock_completion.call_args
prompt = call_args[1]["messages"][0]["content"]
# Check prompt content
assert (
"Issue descriptions:\n"
+ json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
in prompt
)
assert "PR Review Comments:\n" + "\n---\n".join(issue.review_comments) in prompt
assert "Last message from AI agent:\n" + history[0].content in prompt

View File

@ -0,0 +1,165 @@
from openhands.resolver.github_issue import GithubIssue
from openhands.resolver.send_pull_request import make_commit
import os
import tempfile
import subprocess
def test_commit_message_with_quotes():
# Create a temporary directory and initialize git repo
with tempfile.TemporaryDirectory() as temp_dir:
subprocess.run(["git", "init", temp_dir], check=True)
# Create a test file and add it to git
test_file = os.path.join(temp_dir, "test.txt")
with open(test_file, "w") as f:
f.write("test content")
subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
# Create a test issue with problematic title
issue = GithubIssue(
owner="test-owner",
repo="test-repo",
number=123,
title="Issue with 'quotes' and \"double quotes\" and <class 'ValueError'>",
body="Test body",
labels=[],
assignees=[],
state="open",
created_at="2024-01-01T00:00:00Z",
updated_at="2024-01-01T00:00:00Z",
closed_at=None,
head_branch=None,
thread_ids=None,
)
# Make the commit
make_commit(temp_dir, issue, "issue")
# Get the commit message
result = subprocess.run(
["git", "-C", temp_dir, "log", "-1", "--pretty=%B"],
capture_output=True,
text=True,
check=True,
)
commit_msg = result.stdout.strip()
# The commit message should contain the quotes without excessive escaping
expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and <class 'ValueError'>"
assert commit_msg == expected, f"Expected: {expected}\nGot: {commit_msg}"
def test_pr_title_with_quotes(monkeypatch):
# Mock requests.post to avoid actual API calls
class MockResponse:
def __init__(self, status_code=201):
self.status_code = status_code
self.text = ""
def json(self):
return {"html_url": "https://github.com/test/test/pull/1"}
def raise_for_status(self):
pass
def mock_post(*args, **kwargs):
# Verify that the PR title is not over-escaped
data = kwargs.get("json", {})
title = data.get("title", "")
expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and <class 'ValueError'>"
assert (
title == expected
), f"PR title was incorrectly escaped.\nExpected: {expected}\nGot: {title}"
return MockResponse()
class MockGetResponse:
def __init__(self, status_code=200):
self.status_code = status_code
self.text = ""
def json(self):
return {"default_branch": "main"}
def raise_for_status(self):
pass
monkeypatch.setattr("requests.post", mock_post)
monkeypatch.setattr("requests.get", lambda *args, **kwargs: MockGetResponse())
monkeypatch.setattr(
"openhands.resolver.send_pull_request.branch_exists",
lambda *args, **kwargs: False,
)
# Mock subprocess.run to avoid actual git commands
original_run = subprocess.run
def mock_run(*args, **kwargs):
print(f"Running command: {args[0] if args else kwargs.get('args', [])}")
if isinstance(args[0], list) and args[0][0] == "git":
if "push" in args[0]:
return subprocess.CompletedProcess(
args[0], returncode=0, stdout="", stderr=""
)
return original_run(*args, **kwargs)
return original_run(*args, **kwargs)
monkeypatch.setattr("subprocess.run", mock_run)
# Create a temporary directory and initialize git repo
with tempfile.TemporaryDirectory() as temp_dir:
print("Initializing git repo...")
subprocess.run(["git", "init", temp_dir], check=True)
# Add these lines to configure git
subprocess.run(
["git", "-C", temp_dir, "config", "user.name", "Test User"], check=True
)
subprocess.run(
["git", "-C", temp_dir, "config", "user.email", "test@example.com"],
check=True,
)
# Create a test file and add it to git
test_file = os.path.join(temp_dir, "test.txt")
with open(test_file, "w") as f:
f.write("test content")
print("Adding and committing test file...")
subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
subprocess.run(
["git", "-C", temp_dir, "commit", "-m", "Initial commit"], check=True
)
# Create a test issue with problematic title
print("Creating test issue...")
issue = GithubIssue(
owner="test-owner",
repo="test-repo",
number=123,
title="Issue with 'quotes' and \"double quotes\" and <class 'ValueError'>",
body="Test body",
labels=[],
assignees=[],
state="open",
created_at="2024-01-01T00:00:00Z",
updated_at="2024-01-01T00:00:00Z",
closed_at=None,
head_branch=None,
thread_ids=None,
)
# Try to send a PR - this will fail if the title is incorrectly escaped
print("Sending PR...")
from openhands.resolver.send_pull_request import send_pull_request
from openhands.core.config import LLMConfig
send_pull_request(
github_issue=issue,
github_token="dummy-token",
github_username="test-user",
patch_dir=temp_dir,
llm_config=LLMConfig(model="test-model", api_key="test-key"),
pr_type="ready",
)

View File

@ -0,0 +1,875 @@
import os
import tempfile
import pytest
from unittest.mock import AsyncMock, patch, MagicMock
from openhands.resolver.issue_definitions import IssueHandler, PRHandler
from openhands.resolver.resolve_issue import (
initialize_runtime,
complete_runtime,
process_issue,
)
from openhands.resolver.github_issue import GithubIssue, ReviewThread
from openhands.events.action import CmdRunAction
from openhands.events.observation import CmdOutputObservation, NullObservation
from openhands.resolver.resolver_output import ResolverOutput
from openhands.core.config import LLMConfig
@pytest.fixture
def mock_output_dir():
with tempfile.TemporaryDirectory() as temp_dir:
repo_path = os.path.join(temp_dir, "repo")
# Initialize a GitHub repo in "repo" and add a commit with "README.md"
os.makedirs(repo_path)
os.system(f"git init {repo_path}")
readme_path = os.path.join(repo_path, "README.md")
with open(readme_path, "w") as f:
f.write("hello world")
os.system(f"git -C {repo_path} add README.md")
os.system(f"git -C {repo_path} commit -m 'Initial commit'")
yield temp_dir
@pytest.fixture
def mock_subprocess():
with patch("subprocess.check_output") as mock_check_output:
yield mock_check_output
@pytest.fixture
def mock_os():
with patch("os.system") as mock_system, patch("os.path.join") as mock_join:
yield mock_system, mock_join
@pytest.fixture
def mock_prompt_template():
return "Issue: {{ body }}\n\nPlease fix this issue."
@pytest.fixture
def mock_followup_prompt_template():
return "Issue context: {{ issues }}\n\nReview comments: {{ review_comments }}\n\nReview threads: {{ review_threads }}\n\nFiles: {{ files }}\n\nPlease fix this issue."
def create_cmd_output(exit_code: int, content: str, command_id: int, command: str):
return CmdOutputObservation(
exit_code=exit_code, content=content, command_id=command_id, command=command
)
def test_initialize_runtime():
mock_runtime = MagicMock()
mock_runtime.run_action.side_effect = [
create_cmd_output(
exit_code=0, content="", command_id=1, command="cd /workspace"
),
create_cmd_output(
exit_code=0,
content="",
command_id=2,
command='git config --global core.pager ""',
),
]
initialize_runtime(mock_runtime)
assert mock_runtime.run_action.call_count == 2
mock_runtime.run_action.assert_any_call(CmdRunAction(command="cd /workspace"))
mock_runtime.run_action.assert_any_call(
CmdRunAction(command='git config --global core.pager ""')
)
def test_download_issues_from_github():
handler = IssueHandler("owner", "repo", "token")
mock_issues_response = MagicMock()
mock_issues_response.json.side_effect = [
[
{"number": 1, "title": "Issue 1", "body": "This is an issue"},
{
"number": 2,
"title": "PR 1",
"body": "This is a pull request",
"pull_request": {},
},
{"number": 3, "title": "Issue 2", "body": "This is another issue"},
],
None,
]
mock_issues_response.raise_for_status = MagicMock()
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = []
mock_comments_response.raise_for_status = MagicMock()
def get_mock_response(url, *args, **kwargs):
if "/comments" in url:
return mock_comments_response
return mock_issues_response
with patch("requests.get", side_effect=get_mock_response):
issues = handler.get_converted_issues()
assert len(issues) == 2
assert handler.issue_type == "issue"
assert all(isinstance(issue, GithubIssue) for issue in issues)
assert [issue.number for issue in issues] == [1, 3]
assert [issue.title for issue in issues] == ["Issue 1", "Issue 2"]
assert [issue.review_comments for issue in issues] == [None, None]
assert [issue.closing_issues for issue in issues] == [None, None]
assert [issue.thread_ids for issue in issues] == [None, None]
def test_download_pr_from_github():
handler = PRHandler("owner", "repo", "token")
mock_pr_response = MagicMock()
mock_pr_response.json.side_effect = [
[
{
"number": 1,
"title": "PR 1",
"body": "This is a pull request",
"head": {"ref": "b1"},
},
{
"number": 2,
"title": "My PR",
"body": "This is another pull request",
"head": {"ref": "b2"},
},
{"number": 3, "title": "PR 3", "body": "Final PR", "head": {"ref": "b3"}},
],
None,
]
mock_pr_response.raise_for_status = MagicMock()
# Mock for PR comments response
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [] # No PR comments
mock_comments_response.raise_for_status = MagicMock()
# Mock for GraphQL request (for download_pr_metadata)
mock_graphql_response = MagicMock()
mock_graphql_response.json.side_effect = lambda: {
"data": {
"repository": {
"pullRequest": {
"closingIssuesReferences": {
"edges": [
{"node": {"body": "Issue 1 body", "number": 1}},
{"node": {"body": "Issue 2 body", "number": 2}},
]
},
"reviewThreads": {
"edges": [
{
"node": {
"isResolved": False,
"id": "1",
"comments": {
"nodes": [
{
"body": "Unresolved comment 1",
"path": "/frontend/header.tsx",
},
{"body": "Follow up thread"},
]
},
}
},
{
"node": {
"isResolved": True,
"id": "2",
"comments": {
"nodes": [
{
"body": "Resolved comment 1",
"path": "/some/file.py",
}
]
},
}
},
{
"node": {
"isResolved": False,
"id": "3",
"comments": {
"nodes": [
{
"body": "Unresolved comment 3",
"path": "/another/file.py",
}
]
},
}
},
]
},
}
}
}
}
mock_graphql_response.raise_for_status = MagicMock()
def get_mock_response(url, *args, **kwargs):
if "/comments" in url:
return mock_comments_response
return mock_pr_response
with patch("requests.get", side_effect=get_mock_response):
with patch("requests.post", return_value=mock_graphql_response):
issues = handler.get_converted_issues()
assert len(issues) == 3
assert handler.issue_type == "pr"
assert all(isinstance(issue, GithubIssue) for issue in issues)
assert [issue.number for issue in issues] == [1, 2, 3]
assert [issue.title for issue in issues] == ["PR 1", "My PR", "PR 3"]
assert [issue.head_branch for issue in issues] == ["b1", "b2", "b3"]
assert len(issues[0].review_threads) == 2 # Only unresolved threads
assert (
issues[0].review_threads[0].comment
== "Unresolved comment 1\n---\nlatest feedback:\nFollow up thread\n"
)
assert issues[0].review_threads[0].files == ["/frontend/header.tsx"]
assert (
issues[0].review_threads[1].comment
== "latest feedback:\nUnresolved comment 3\n"
)
assert issues[0].review_threads[1].files == ["/another/file.py"]
assert issues[0].closing_issues == ["Issue 1 body", "Issue 2 body"]
assert issues[0].thread_ids == ["1", "3"]
@pytest.mark.asyncio
async def test_complete_runtime():
mock_runtime = MagicMock()
mock_runtime.run_action.side_effect = [
create_cmd_output(
exit_code=0, content="", command_id=1, command="cd /workspace"
),
create_cmd_output(
exit_code=0,
content="",
command_id=2,
command='git config --global core.pager ""',
),
create_cmd_output(
exit_code=0,
content="",
command_id=3,
command="git config --global --add safe.directory /workspace",
),
create_cmd_output(
exit_code=0,
content="",
command_id=4,
command="git diff base_commit_hash fix",
),
create_cmd_output(
exit_code=0, content="git diff content", command_id=5, command="git apply"
),
]
result = await complete_runtime(mock_runtime, "base_commit_hash")
assert result == {"git_patch": "git diff content"}
assert mock_runtime.run_action.call_count == 5
@pytest.mark.asyncio
async def test_process_issue(mock_output_dir, mock_prompt_template):
# Mock dependencies
mock_create_runtime = MagicMock()
mock_initialize_runtime = AsyncMock()
mock_run_controller = AsyncMock()
mock_complete_runtime = AsyncMock()
handler_instance = MagicMock()
# Set up test data
issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=1,
title="Test Issue",
body="This is a test issue",
)
base_commit = "abcdef1234567890"
repo_instruction = "Resolve this repo"
max_iterations = 5
llm_config = LLMConfig(model="test_model", api_key="test_api_key")
runtime_container_image = "test_image:latest"
# Test cases for different scenarios
test_cases = [
{
"name": "successful_run",
"run_controller_return": MagicMock(
history=[NullObservation(content="")],
metrics=MagicMock(
get=MagicMock(return_value={"test_result": "passed"})
),
last_error=None,
),
"run_controller_raises": None,
"expected_success": True,
"expected_error": None,
"expected_explanation": "Issue resolved successfully",
},
{
"name": "value_error",
"run_controller_return": None,
"run_controller_raises": ValueError("Test value error"),
"expected_success": False,
"expected_error": "Agent failed to run or crashed",
"expected_explanation": "Agent failed to run",
},
{
"name": "runtime_error",
"run_controller_return": None,
"run_controller_raises": RuntimeError("Test runtime error"),
"expected_success": False,
"expected_error": "Agent failed to run or crashed",
"expected_explanation": "Agent failed to run",
},
{
"name": "json_decode_error",
"run_controller_return": MagicMock(
history=[NullObservation(content="")],
metrics=MagicMock(
get=MagicMock(return_value={"test_result": "passed"})
),
last_error=None,
),
"run_controller_raises": None,
"expected_success": True,
"expected_error": None,
"expected_explanation": "Non-JSON explanation",
"is_pr": True,
"comment_success": [
True,
False,
], # To trigger the PR success logging code path
},
]
for test_case in test_cases:
# Reset mocks
mock_create_runtime.reset_mock()
mock_initialize_runtime.reset_mock()
mock_run_controller.reset_mock()
mock_complete_runtime.reset_mock()
handler_instance.reset_mock()
# Mock return values
mock_create_runtime.return_value = MagicMock(connect=AsyncMock())
if test_case["run_controller_raises"]:
mock_run_controller.side_effect = test_case["run_controller_raises"]
else:
mock_run_controller.return_value = test_case["run_controller_return"]
mock_run_controller.side_effect = None
mock_complete_runtime.return_value = {"git_patch": "test patch"}
handler_instance.guess_success.return_value = (
test_case["expected_success"],
test_case.get("comment_success", None),
test_case["expected_explanation"],
)
handler_instance.get_instruction.return_value = ("Test instruction", [])
handler_instance.issue_type = "pr" if test_case.get("is_pr", False) else "issue"
with patch(
"openhands.resolver.resolve_issue.create_runtime", mock_create_runtime
), patch(
"openhands.resolver.resolve_issue.initialize_runtime",
mock_initialize_runtime,
), patch(
"openhands.resolver.resolve_issue.run_controller", mock_run_controller
), patch(
"openhands.resolver.resolve_issue.complete_runtime", mock_complete_runtime
), patch("openhands.resolver.resolve_issue.logger"):
# Call the function
result = await process_issue(
issue,
base_commit,
max_iterations,
llm_config,
mock_output_dir,
runtime_container_image,
mock_prompt_template,
handler_instance,
repo_instruction,
reset_logger=False,
)
# Assert the result
expected_issue_type = "pr" if test_case.get("is_pr", False) else "issue"
assert handler_instance.issue_type == expected_issue_type
assert isinstance(result, ResolverOutput)
assert result.issue == issue
assert result.base_commit == base_commit
assert result.git_patch == "test patch"
assert result.success == test_case["expected_success"]
assert result.success_explanation == test_case["expected_explanation"]
assert result.error == test_case["expected_error"]
# Assert that the mocked functions were called
mock_create_runtime.assert_called_once()
mock_initialize_runtime.assert_called_once()
mock_run_controller.assert_called_once()
mock_complete_runtime.assert_called_once()
# Assert that guess_success was called only for successful runs
if test_case["expected_success"]:
handler_instance.guess_success.assert_called_once()
else:
handler_instance.guess_success.assert_not_called()
def test_get_instruction(mock_prompt_template, mock_followup_prompt_template):
issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=123,
title="Test Issue",
body="This is a test issue refer to image ![First Image](https://sampleimage.com/image1.png)",
)
issue_handler = IssueHandler("owner", "repo", "token")
instruction, images_urls = issue_handler.get_instruction(
issue, mock_prompt_template, None
)
expected_instruction = "Issue: Test Issue\n\nThis is a test issue refer to image ![First Image](https://sampleimage.com/image1.png)\n\nPlease fix this issue."
assert images_urls == ["https://sampleimage.com/image1.png"]
assert issue_handler.issue_type == "issue"
assert instruction == expected_instruction
issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=123,
title="Test Issue",
body="This is a test issue",
closing_issues=["Issue 1 fix the type"],
review_threads=[
ReviewThread(
comment="There is still a typo 'pthon' instead of 'python'", files=[]
)
],
)
pr_handler = PRHandler("owner", "repo", "token")
instruction, images_urls = pr_handler.get_instruction(
issue, mock_followup_prompt_template, None
)
expected_instruction = "Issue context: [\n \"Issue 1 fix the type\"\n]\n\nReview comments: None\n\nReview threads: [\n \"There is still a typo 'pthon' instead of 'python'\"\n]\n\nFiles: []\n\nPlease fix this issue."
assert images_urls == []
assert pr_handler.issue_type == "pr"
assert instruction == expected_instruction
def test_file_instruction():
issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=123,
title="Test Issue",
body="This is a test issue ![image](https://sampleimage.com/sample.png)",
)
# load prompt from openhands/resolver/prompts/resolve/basic.jinja
with open("openhands/resolver/prompts/resolve/basic.jinja", "r") as f:
prompt = f.read()
# Test without thread comments
issue_handler = IssueHandler("owner", "repo", "token")
instruction, images_urls = issue_handler.get_instruction(issue, prompt, None)
expected_instruction = """Please fix the following issue for the repository in /workspace.
An environment has been set up for you to start working. You may assume all necessary tools are installed.
# Problem Statement
Test Issue
This is a test issue ![image](https://sampleimage.com/sample.png)
IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
You SHOULD INCLUDE PROPER INDENTATION in your edit commands.
When you think you have fixed the issue through code changes, please finish the interaction."""
assert instruction == expected_instruction
assert images_urls == ["https://sampleimage.com/sample.png"]
def test_file_instruction_with_repo_instruction():
issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=123,
title="Test Issue",
body="This is a test issue",
)
# load prompt from openhands/resolver/prompts/resolve/basic.jinja
with open("openhands/resolver/prompts/resolve/basic.jinja", "r") as f:
prompt = f.read()
# load repo instruction from openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt
with open(
"openhands/resolver/prompts/repo_instructions/all-hands-ai___openhands-resolver.txt",
"r",
) as f:
repo_instruction = f.read()
issue_handler = IssueHandler("owner", "repo", "token")
instruction, image_urls = issue_handler.get_instruction(
issue, prompt, repo_instruction
)
expected_instruction = """Please fix the following issue for the repository in /workspace.
An environment has been set up for you to start working. You may assume all necessary tools are installed.
# Problem Statement
Test Issue
This is a test issue
IMPORTANT: You should ONLY interact with the environment provided to you AND NEVER ASK FOR HUMAN HELP.
You SHOULD INCLUDE PROPER INDENTATION in your edit commands.
Some basic information about this repository:
This is a Python repo for openhands-resolver, a library that attempts to resolve github issues with the AI agent OpenHands.
- Setup: `poetry install --with test --with dev`
- Testing: `poetry run pytest tests/test_*.py`
When you think you have fixed the issue through code changes, please finish the interaction."""
assert instruction == expected_instruction
assert issue_handler.issue_type == "issue"
assert image_urls == []
def test_guess_success():
mock_issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=1,
title="Test Issue",
body="This is a test issue",
)
mock_history = [
create_cmd_output(
exit_code=0, content="", command_id=1, command="cd /workspace"
)
]
mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(
message=MagicMock(
content="--- success\ntrue\n--- explanation\nIssue resolved successfully"
)
)
]
issue_handler = IssueHandler("owner", "repo", "token")
with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
assert issue_handler.issue_type == "issue"
assert comment_success is None
assert success
assert explanation == "Issue resolved successfully"
def test_guess_success_with_thread_comments():
mock_issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=1,
title="Test Issue",
body="This is a test issue",
thread_comments=[
"First comment",
"Second comment",
"latest feedback:\nPlease add tests",
],
)
mock_history = [MagicMock(message="I have added tests for this case")]
mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(
message=MagicMock(
content="--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling"
)
)
]
issue_handler = IssueHandler("owner", "repo", "token")
with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
assert issue_handler.issue_type == "issue"
assert comment_success is None
assert success
assert "Tests have been added" in explanation
def test_instruction_with_thread_comments():
# Create an issue with thread comments
issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=123,
title="Test Issue",
body="This is a test issue",
thread_comments=[
"First comment",
"Second comment",
"latest feedback:\nPlease add tests",
],
)
# Load the basic prompt template
with open("openhands/resolver/prompts/resolve/basic.jinja", "r") as f:
prompt = f.read()
issue_handler = IssueHandler("owner", "repo", "token")
instruction, images_urls = issue_handler.get_instruction(issue, prompt, None)
# Verify that thread comments are included in the instruction
assert "First comment" in instruction
assert "Second comment" in instruction
assert "Please add tests" in instruction
assert "Issue Thread Comments:" in instruction
assert images_urls == []
def test_guess_success_failure():
mock_issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=1,
title="Test Issue",
body="This is a test issue",
thread_comments=[
"First comment",
"Second comment",
"latest feedback:\nPlease add tests",
],
)
mock_history = [MagicMock(message="I have added tests for this case")]
mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(
message=MagicMock(
content="--- success\ntrue\n--- explanation\nTests have been added to verify thread comments handling"
)
)
]
issue_handler = IssueHandler("owner", "repo", "token")
with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
assert issue_handler.issue_type == "issue"
assert comment_success is None
assert success
assert "Tests have been added" in explanation
def test_guess_success_negative_case():
mock_issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=1,
title="Test Issue",
body="This is a test issue",
)
mock_history = [
create_cmd_output(
exit_code=0, content="", command_id=1, command="cd /workspace"
)
]
mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(
message=MagicMock(
content="--- success\nfalse\n--- explanation\nIssue not resolved"
)
)
]
issue_handler = IssueHandler("owner", "repo", "token")
with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
assert issue_handler.issue_type == "issue"
assert comment_success is None
assert not success
assert explanation == "Issue not resolved"
def test_guess_success_invalid_output():
mock_issue = GithubIssue(
owner="test_owner",
repo="test_repo",
number=1,
title="Test Issue",
body="This is a test issue",
)
mock_history = [
create_cmd_output(
exit_code=0, content="", command_id=1, command="cd /workspace"
)
]
mock_llm_config = LLMConfig(model="test_model", api_key="test_api_key")
mock_completion_response = MagicMock()
mock_completion_response.choices = [
MagicMock(message=MagicMock(content="This is not a valid output"))
]
issue_handler = IssueHandler("owner", "repo", "token")
with patch("litellm.completion", MagicMock(return_value=mock_completion_response)):
success, comment_success, explanation = issue_handler.guess_success(
mock_issue, mock_history, mock_llm_config
)
assert issue_handler.issue_type == "issue"
assert comment_success is None
assert not success
assert (
explanation
== "Failed to decode answer from LLM response: This is not a valid output"
)
def test_download_pr_with_review_comments():
handler = PRHandler("owner", "repo", "token")
mock_pr_response = MagicMock()
mock_pr_response.json.side_effect = [
[
{
"number": 1,
"title": "PR 1",
"body": "This is a pull request",
"head": {"ref": "b1"},
},
],
None,
]
mock_pr_response.raise_for_status = MagicMock()
# Mock for PR comments response
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [] # No PR comments
mock_comments_response.raise_for_status = MagicMock()
# Mock for GraphQL request with review comments but no threads
mock_graphql_response = MagicMock()
mock_graphql_response.json.side_effect = lambda: {
"data": {
"repository": {
"pullRequest": {
"closingIssuesReferences": {"edges": []},
"reviews": {
"nodes": [
{"body": "Please fix this typo"},
{"body": "Add more tests"},
]
},
}
}
}
}
mock_graphql_response.raise_for_status = MagicMock()
def get_mock_response(url, *args, **kwargs):
if "/comments" in url:
return mock_comments_response
return mock_pr_response
with patch("requests.get", side_effect=get_mock_response):
with patch("requests.post", return_value=mock_graphql_response):
issues = handler.get_converted_issues()
assert len(issues) == 1
assert handler.issue_type == "pr"
assert isinstance(issues[0], GithubIssue)
assert issues[0].number == 1
assert issues[0].title == "PR 1"
assert issues[0].head_branch == "b1"
# Verify review comments are set but threads are empty
assert len(issues[0].review_comments) == 2
assert issues[0].review_comments[0] == "Please fix this typo"
assert issues[0].review_comments[1] == "Add more tests"
assert not issues[0].review_threads
assert not issues[0].closing_issues
assert not issues[0].thread_ids
def test_download_issue_with_specific_comment():
handler = IssueHandler("owner", "repo", "token")
# Define the specific comment_id to filter
specific_comment_id = 101
# Mock issue and comment responses
mock_issue_response = MagicMock()
mock_issue_response.json.side_effect = [
[
{"number": 1, "title": "Issue 1", "body": "This is an issue"},
],
None,
]
mock_issue_response.raise_for_status = MagicMock()
mock_comments_response = MagicMock()
mock_comments_response.json.return_value = [
{
"id": specific_comment_id,
"body": "Specific comment body",
"issue_url": "https://api.github.com/repos/owner/repo/issues/1",
},
{
"id": 102,
"body": "Another comment body",
"issue_url": "https://api.github.com/repos/owner/repo/issues/2",
},
]
mock_comments_response.raise_for_status = MagicMock()
def get_mock_response(url, *args, **kwargs):
if "/comments" in url:
return mock_comments_response
return mock_issue_response
with patch("requests.get", side_effect=get_mock_response):
issues = handler.get_converted_issues(comment_id=specific_comment_id)
assert len(issues) == 1
assert issues[0].number == 1
assert issues[0].title == "Issue 1"
assert issues[0].thread_comments == ["Specific comment body"]
if __name__ == "__main__":
pytest.main()

File diff suppressed because it is too large Load Diff