mirror of
https://github.com/OpenHands/OpenHands.git
synced 2025-12-26 05:48:36 +08:00
Fix issue #5179: [frontend]: Push to Github button should only push branch, but not creating a PR (#5181)
Co-authored-by: Xingyao Wang <xingyao6@illinois.edu> Co-authored-by: Xingyao Wang <xingyao@all-hands.dev> Co-authored-by: Graham Neubig <neubig@gmail.com>
This commit is contained in:
parent
678436da30
commit
d267c066e7
@ -281,6 +281,62 @@ describe.skip("ChatInterface", () => {
|
||||
expect(within(error).getByText("Something went wrong")).toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("should render both GitHub buttons initially when ghToken is available", () => {
|
||||
vi.mock("@remix-run/react", async (importActual) => ({
|
||||
...(await importActual<typeof import("@remix-run/react")>()),
|
||||
useRouteLoaderData: vi.fn(() => ({ ghToken: "test-token" })),
|
||||
}));
|
||||
|
||||
const messages: Message[] = [
|
||||
{
|
||||
sender: "assistant",
|
||||
content: "Hello",
|
||||
imageUrls: [],
|
||||
timestamp: new Date().toISOString(),
|
||||
},
|
||||
];
|
||||
renderChatInterface(messages);
|
||||
|
||||
const pushButton = screen.getByRole("button", { name: "Push to Branch" });
|
||||
const prButton = screen.getByRole("button", { name: "Push & Create PR" });
|
||||
|
||||
expect(pushButton).toBeInTheDocument();
|
||||
expect(prButton).toBeInTheDocument();
|
||||
expect(pushButton).toHaveTextContent("Push to Branch");
|
||||
expect(prButton).toHaveTextContent("Push & Create PR");
|
||||
});
|
||||
|
||||
it("should render only 'Push changes to PR' button after PR is created", async () => {
|
||||
vi.mock("@remix-run/react", async (importActual) => ({
|
||||
...(await importActual<typeof import("@remix-run/react")>()),
|
||||
useRouteLoaderData: vi.fn(() => ({ ghToken: "test-token" })),
|
||||
}));
|
||||
|
||||
const messages: Message[] = [
|
||||
{
|
||||
sender: "assistant",
|
||||
content: "Hello",
|
||||
imageUrls: [],
|
||||
timestamp: new Date().toISOString(),
|
||||
},
|
||||
];
|
||||
const { rerender } = renderChatInterface(messages);
|
||||
const user = userEvent.setup();
|
||||
|
||||
// Click the "Push & Create PR" button
|
||||
const prButton = screen.getByRole("button", { name: "Push & Create PR" });
|
||||
await user.click(prButton);
|
||||
|
||||
// Re-render to trigger state update
|
||||
rerender(<ChatInterface />);
|
||||
|
||||
// Verify only one button is shown
|
||||
const pushToPrButton = screen.getByRole("button", { name: "Push changes to PR" });
|
||||
expect(pushToPrButton).toBeInTheDocument();
|
||||
expect(screen.queryByRole("button", { name: "Push to Branch" })).not.toBeInTheDocument();
|
||||
expect(screen.queryByRole("button", { name: "Push & Create PR" })).not.toBeInTheDocument();
|
||||
});
|
||||
|
||||
it("should render feedback actions if there are more than 3 messages", () => {
|
||||
const messages: Message[] = [
|
||||
{
|
||||
|
||||
@ -2,4 +2,4 @@
|
||||
"APP_MODE": "oss",
|
||||
"GITHUB_CLIENT_ID": "",
|
||||
"POSTHOG_CLIENT_KEY": "phc_3ESMmY9SgqEAGBB6sMGK5ayYHkeUuknH2vP6FmWH9RA"
|
||||
}
|
||||
}
|
||||
|
||||
@ -52,6 +52,7 @@ export function ChatInterface() {
|
||||
const [feedbackModalIsOpen, setFeedbackModalIsOpen] = React.useState(false);
|
||||
const [messageToSend, setMessageToSend] = React.useState<string | null>(null);
|
||||
const [isDownloading, setIsDownloading] = React.useState(false);
|
||||
const [hasPullRequest, setHasPullRequest] = React.useState(false);
|
||||
|
||||
React.useEffect(() => {
|
||||
if (status === WsClientProviderStatus.ACTIVE) {
|
||||
@ -175,16 +176,44 @@ export function ChatInterface() {
|
||||
curAgentState === AgentState.FINISHED) && (
|
||||
<div className="flex flex-col gap-2 mb-2">
|
||||
{gitHubToken ? (
|
||||
<SuggestionItem
|
||||
suggestion={{
|
||||
label: "Push to GitHub",
|
||||
value:
|
||||
"Please push the changes to GitHub and open a pull request.",
|
||||
}}
|
||||
onClick={(value) => {
|
||||
handleSendMessage(value, []);
|
||||
}}
|
||||
/>
|
||||
<div className="flex flex-row gap-2 justify-center w-full">
|
||||
{!hasPullRequest ? (
|
||||
<>
|
||||
<SuggestionItem
|
||||
suggestion={{
|
||||
label: "Push to Branch",
|
||||
value:
|
||||
"Please push the changes to a remote branch on GitHub, but do NOT create a pull request.",
|
||||
}}
|
||||
onClick={(value) => {
|
||||
handleSendMessage(value, []);
|
||||
}}
|
||||
/>
|
||||
<SuggestionItem
|
||||
suggestion={{
|
||||
label: "Push & Create PR",
|
||||
value:
|
||||
"Please push the changes to GitHub and open a pull request.",
|
||||
}}
|
||||
onClick={(value) => {
|
||||
handleSendMessage(value, []);
|
||||
setHasPullRequest(true);
|
||||
}}
|
||||
/>
|
||||
</>
|
||||
) : (
|
||||
<SuggestionItem
|
||||
suggestion={{
|
||||
label: "Push changes to PR",
|
||||
value:
|
||||
"Please push the latest changes to the existing pull request.",
|
||||
}}
|
||||
onClick={(value) => {
|
||||
handleSendMessage(value, []);
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
</div>
|
||||
) : (
|
||||
<SuggestionItem
|
||||
suggestion={{
|
||||
|
||||
@ -7,7 +7,7 @@ interface SuggestionItemProps {
|
||||
|
||||
export function SuggestionItem({ suggestion, onClick }: SuggestionItemProps) {
|
||||
return (
|
||||
<li className="list-none border border-neutral-600 rounded-xl hover:bg-neutral-700">
|
||||
<li className="list-none border border-neutral-600 rounded-xl hover:bg-neutral-700 flex-1">
|
||||
<button
|
||||
type="button"
|
||||
data-testid="suggestion"
|
||||
|
||||
@ -187,7 +187,9 @@ class CodeActAgent(Agent):
|
||||
)
|
||||
]
|
||||
elif isinstance(action, CmdRunAction) and action.source == 'user':
|
||||
content = [TextContent(text=f'User executed the command:\n{action.command}')]
|
||||
content = [
|
||||
TextContent(text=f'User executed the command:\n{action.command}')
|
||||
]
|
||||
return [
|
||||
Message(
|
||||
role='user',
|
||||
|
||||
@ -24,6 +24,7 @@ class MessageAction(Action):
|
||||
@images_urls.setter
|
||||
def images_urls(self, value):
|
||||
self.image_urls = value
|
||||
|
||||
def __str__(self) -> str:
|
||||
ret = f'**MessageAction** (source={self.source})\n'
|
||||
ret += f'CONTENT: {self.content}'
|
||||
|
||||
@ -69,7 +69,7 @@ def action_from_dict(action: dict) -> Action:
|
||||
# images_urls has been renamed to image_urls
|
||||
if 'images_urls' in args:
|
||||
args['image_urls'] = args.pop('images_urls')
|
||||
|
||||
|
||||
try:
|
||||
decoded_action = action_class(**args)
|
||||
if 'timeout' in action:
|
||||
|
||||
@ -1,6 +1,6 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
from .patch import parse_patch
|
||||
from .apply import apply_diff
|
||||
from .patch import parse_patch
|
||||
|
||||
__all__ = ["parse_patch", "apply_diff"]
|
||||
__all__ = ['parse_patch', 'apply_diff']
|
||||
|
||||
@ -10,33 +10,33 @@ from .snippets import remove, which
|
||||
|
||||
def _apply_diff_with_subprocess(diff, lines, reverse=False):
|
||||
# call out to patch program
|
||||
patchexec = which("patch")
|
||||
patchexec = which('patch')
|
||||
if not patchexec:
|
||||
raise SubprocessException("cannot find patch program", code=-1)
|
||||
raise SubprocessException('cannot find patch program', code=-1)
|
||||
|
||||
tempdir = tempfile.gettempdir()
|
||||
|
||||
filepath = os.path.join(tempdir, "wtp-" + str(hash(diff.header)))
|
||||
oldfilepath = filepath + ".old"
|
||||
newfilepath = filepath + ".new"
|
||||
rejfilepath = filepath + ".rej"
|
||||
patchfilepath = filepath + ".patch"
|
||||
with open(oldfilepath, "w") as f:
|
||||
f.write("\n".join(lines) + "\n")
|
||||
filepath = os.path.join(tempdir, 'wtp-' + str(hash(diff.header)))
|
||||
oldfilepath = filepath + '.old'
|
||||
newfilepath = filepath + '.new'
|
||||
rejfilepath = filepath + '.rej'
|
||||
patchfilepath = filepath + '.patch'
|
||||
with open(oldfilepath, 'w') as f:
|
||||
f.write('\n'.join(lines) + '\n')
|
||||
|
||||
with open(patchfilepath, "w") as f:
|
||||
with open(patchfilepath, 'w') as f:
|
||||
f.write(diff.text)
|
||||
|
||||
args = [
|
||||
patchexec,
|
||||
"--reverse" if reverse else "--forward",
|
||||
"--quiet",
|
||||
"--no-backup-if-mismatch",
|
||||
"-o",
|
||||
'--reverse' if reverse else '--forward',
|
||||
'--quiet',
|
||||
'--no-backup-if-mismatch',
|
||||
'-o',
|
||||
newfilepath,
|
||||
"-i",
|
||||
'-i',
|
||||
patchfilepath,
|
||||
"-r",
|
||||
'-r',
|
||||
rejfilepath,
|
||||
oldfilepath,
|
||||
]
|
||||
@ -58,7 +58,7 @@ def _apply_diff_with_subprocess(diff, lines, reverse=False):
|
||||
|
||||
# do this last to ensure files get cleaned up
|
||||
if ret != 0:
|
||||
raise SubprocessException("patch program failed", code=ret)
|
||||
raise SubprocessException('patch program failed', code=ret)
|
||||
|
||||
return lines, rejlines
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@ class HunkException(PatchingException):
|
||||
self.hunk = hunk
|
||||
if hunk is not None:
|
||||
super(HunkException, self).__init__(
|
||||
"{msg}, in hunk #{n}".format(msg=msg, n=hunk)
|
||||
'{msg}, in hunk #{n}'.format(msg=msg, n=hunk)
|
||||
)
|
||||
else:
|
||||
super(HunkException, self).__init__(msg)
|
||||
|
||||
@ -8,67 +8,67 @@ from . import exceptions
|
||||
from .snippets import findall_regex, split_by_regex
|
||||
|
||||
header = namedtuple(
|
||||
"header",
|
||||
"index_path old_path old_version new_path new_version",
|
||||
'header',
|
||||
'index_path old_path old_version new_path new_version',
|
||||
)
|
||||
|
||||
diffobj = namedtuple("diffobj", "header changes text")
|
||||
Change = namedtuple("Change", "old new line hunk")
|
||||
diffobj = namedtuple('diffobj', 'header changes text')
|
||||
Change = namedtuple('Change', 'old new line hunk')
|
||||
|
||||
file_timestamp_str = "(.+?)(?:\t|:| +)(.*)"
|
||||
file_timestamp_str = '(.+?)(?:\t|:| +)(.*)'
|
||||
# .+? was previously [^:\t\n\r\f\v]+
|
||||
|
||||
# general diff regex
|
||||
diffcmd_header = re.compile("^diff.* (.+) (.+)$")
|
||||
unified_header_index = re.compile("^Index: (.+)$")
|
||||
unified_header_old_line = re.compile(r"^--- " + file_timestamp_str + "$")
|
||||
unified_header_new_line = re.compile(r"^\+\+\+ " + file_timestamp_str + "$")
|
||||
unified_hunk_start = re.compile(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@(.*)$")
|
||||
unified_change = re.compile("^([-+ ])(.*)$")
|
||||
diffcmd_header = re.compile('^diff.* (.+) (.+)$')
|
||||
unified_header_index = re.compile('^Index: (.+)$')
|
||||
unified_header_old_line = re.compile(r'^--- ' + file_timestamp_str + '$')
|
||||
unified_header_new_line = re.compile(r'^\+\+\+ ' + file_timestamp_str + '$')
|
||||
unified_hunk_start = re.compile(r'^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@(.*)$')
|
||||
unified_change = re.compile('^([-+ ])(.*)$')
|
||||
|
||||
context_header_old_line = re.compile(r"^\*\*\* " + file_timestamp_str + "$")
|
||||
context_header_new_line = re.compile("^--- " + file_timestamp_str + "$")
|
||||
context_hunk_start = re.compile(r"^\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*$")
|
||||
context_hunk_old = re.compile(r"^\*\*\* (\d+),?(\d*) \*\*\*\*$")
|
||||
context_hunk_new = re.compile(r"^--- (\d+),?(\d*) ----$")
|
||||
context_change = re.compile("^([-+ !]) (.*)$")
|
||||
context_header_old_line = re.compile(r'^\*\*\* ' + file_timestamp_str + '$')
|
||||
context_header_new_line = re.compile('^--- ' + file_timestamp_str + '$')
|
||||
context_hunk_start = re.compile(r'^\*\*\*\*\*\*\*\*\*\*\*\*\*\*\*$')
|
||||
context_hunk_old = re.compile(r'^\*\*\* (\d+),?(\d*) \*\*\*\*$')
|
||||
context_hunk_new = re.compile(r'^--- (\d+),?(\d*) ----$')
|
||||
context_change = re.compile('^([-+ !]) (.*)$')
|
||||
|
||||
ed_hunk_start = re.compile(r"^(\d+),?(\d*)([acd])$")
|
||||
ed_hunk_end = re.compile("^.$")
|
||||
ed_hunk_start = re.compile(r'^(\d+),?(\d*)([acd])$')
|
||||
ed_hunk_end = re.compile('^.$')
|
||||
# much like forward ed, but no 'c' type
|
||||
rcs_ed_hunk_start = re.compile(r"^([ad])(\d+) ?(\d*)$")
|
||||
rcs_ed_hunk_start = re.compile(r'^([ad])(\d+) ?(\d*)$')
|
||||
|
||||
default_hunk_start = re.compile(r"^(\d+),?(\d*)([acd])(\d+),?(\d*)$")
|
||||
default_hunk_mid = re.compile("^---$")
|
||||
default_change = re.compile("^([><]) (.*)$")
|
||||
default_hunk_start = re.compile(r'^(\d+),?(\d*)([acd])(\d+),?(\d*)$')
|
||||
default_hunk_mid = re.compile('^---$')
|
||||
default_change = re.compile('^([><]) (.*)$')
|
||||
|
||||
# Headers
|
||||
|
||||
# git has a special index header and no end part
|
||||
git_diffcmd_header = re.compile("^diff --git a/(.+) b/(.+)$")
|
||||
git_header_index = re.compile(r"^index ([a-f0-9]+)..([a-f0-9]+) ?(\d*)$")
|
||||
git_header_old_line = re.compile("^--- (.+)$")
|
||||
git_header_new_line = re.compile(r"^\+\+\+ (.+)$")
|
||||
git_header_file_mode = re.compile(r"^(new|deleted) file mode \d{6}$")
|
||||
git_header_binary_file = re.compile("^Binary files (.+) and (.+) differ")
|
||||
git_binary_patch_start = re.compile(r"^GIT binary patch$")
|
||||
git_binary_literal_start = re.compile(r"^literal (\d+)$")
|
||||
git_binary_delta_start = re.compile(r"^delta (\d+)$")
|
||||
base85string = re.compile(r"^[0-9A-Za-z!#$%&()*+;<=>?@^_`{|}~-]+$")
|
||||
git_diffcmd_header = re.compile('^diff --git a/(.+) b/(.+)$')
|
||||
git_header_index = re.compile(r'^index ([a-f0-9]+)..([a-f0-9]+) ?(\d*)$')
|
||||
git_header_old_line = re.compile('^--- (.+)$')
|
||||
git_header_new_line = re.compile(r'^\+\+\+ (.+)$')
|
||||
git_header_file_mode = re.compile(r'^(new|deleted) file mode \d{6}$')
|
||||
git_header_binary_file = re.compile('^Binary files (.+) and (.+) differ')
|
||||
git_binary_patch_start = re.compile(r'^GIT binary patch$')
|
||||
git_binary_literal_start = re.compile(r'^literal (\d+)$')
|
||||
git_binary_delta_start = re.compile(r'^delta (\d+)$')
|
||||
base85string = re.compile(r'^[0-9A-Za-z!#$%&()*+;<=>?@^_`{|}~-]+$')
|
||||
|
||||
bzr_header_index = re.compile("=== (.+)")
|
||||
bzr_header_index = re.compile('=== (.+)')
|
||||
bzr_header_old_line = unified_header_old_line
|
||||
bzr_header_new_line = unified_header_new_line
|
||||
|
||||
svn_header_index = unified_header_index
|
||||
svn_header_timestamp_version = re.compile(r"\((?:working copy|revision (\d+))\)")
|
||||
svn_header_timestamp = re.compile(r".*(\(.*\))$")
|
||||
svn_header_timestamp_version = re.compile(r'\((?:working copy|revision (\d+))\)')
|
||||
svn_header_timestamp = re.compile(r'.*(\(.*\))$')
|
||||
|
||||
cvs_header_index = unified_header_index
|
||||
cvs_header_rcs = re.compile(r"^RCS file: (.+)(?:,\w{1}$|$)")
|
||||
cvs_header_timestamp = re.compile(r"(.+)\t([\d.]+)")
|
||||
cvs_header_timestamp_colon = re.compile(r":([\d.]+)\t(.+)")
|
||||
old_cvs_diffcmd_header = re.compile("^diff.* (.+):(.*) (.+):(.*)$")
|
||||
cvs_header_rcs = re.compile(r'^RCS file: (.+)(?:,\w{1}$|$)')
|
||||
cvs_header_timestamp = re.compile(r'(.+)\t([\d.]+)')
|
||||
cvs_header_timestamp_colon = re.compile(r':([\d.]+)\t(.+)')
|
||||
old_cvs_diffcmd_header = re.compile('^diff.* (.+):(.*) (.+):(.*)$')
|
||||
|
||||
|
||||
def parse_patch(text):
|
||||
@ -97,7 +97,7 @@ def parse_patch(text):
|
||||
break
|
||||
|
||||
for diff in diffs:
|
||||
difftext = "\n".join(diff) + "\n"
|
||||
difftext = '\n'.join(diff) + '\n'
|
||||
h = parse_header(diff)
|
||||
d = parse_diff(diff)
|
||||
if h or d:
|
||||
@ -133,10 +133,10 @@ def parse_scm_header(text):
|
||||
if res:
|
||||
old_path = res.old_path
|
||||
new_path = res.new_path
|
||||
if old_path.startswith("a/"):
|
||||
if old_path.startswith('a/'):
|
||||
old_path = old_path[2:]
|
||||
|
||||
if new_path.startswith("b/"):
|
||||
if new_path.startswith('b/'):
|
||||
new_path = new_path[2:]
|
||||
|
||||
return header(
|
||||
@ -240,10 +240,10 @@ def parse_git_header(text):
|
||||
new_path = binary.group(2)
|
||||
|
||||
if old_path and new_path:
|
||||
if old_path.startswith("a/"):
|
||||
if old_path.startswith('a/'):
|
||||
old_path = old_path[2:]
|
||||
|
||||
if new_path.startswith("b/"):
|
||||
if new_path.startswith('b/'):
|
||||
new_path = new_path[2:]
|
||||
return header(
|
||||
index_path=None,
|
||||
@ -256,19 +256,19 @@ def parse_git_header(text):
|
||||
# if we go through all of the text without finding our normal info,
|
||||
# use the cmd if available
|
||||
if cmd_old_path and cmd_new_path and old_version and new_version:
|
||||
if cmd_old_path.startswith("a/"):
|
||||
if cmd_old_path.startswith('a/'):
|
||||
cmd_old_path = cmd_old_path[2:]
|
||||
|
||||
if cmd_new_path.startswith("b/"):
|
||||
if cmd_new_path.startswith('b/'):
|
||||
cmd_new_path = cmd_new_path[2:]
|
||||
|
||||
return header(
|
||||
index_path=None,
|
||||
# wow, I kind of hate this:
|
||||
# assume /dev/null if the versions are zeroed out
|
||||
old_path="/dev/null" if old_version == "0000000" else cmd_old_path,
|
||||
old_path='/dev/null' if old_version == '0000000' else cmd_old_path,
|
||||
old_version=old_version,
|
||||
new_path="/dev/null" if new_version == "0000000" else cmd_new_path,
|
||||
new_path='/dev/null' if new_version == '0000000' else cmd_new_path,
|
||||
new_version=new_version,
|
||||
)
|
||||
|
||||
@ -569,10 +569,10 @@ def parse_default_diff(text):
|
||||
kind = c.group(1)
|
||||
line = c.group(2)
|
||||
|
||||
if kind == "<" and (r != old_len or r == 0):
|
||||
if kind == '<' and (r != old_len or r == 0):
|
||||
changes.append(Change(old + r, None, line, hunk_n))
|
||||
r += 1
|
||||
elif kind == ">" and (i != new_len or i == 0):
|
||||
elif kind == '>' and (i != new_len or i == 0):
|
||||
changes.append(Change(None, new + i, line, hunk_n))
|
||||
i += 1
|
||||
|
||||
@ -627,13 +627,13 @@ def parse_unified_diff(text):
|
||||
kind = c.group(1)
|
||||
line = c.group(2)
|
||||
|
||||
if kind == "-" and (r != old_len or r == 0):
|
||||
if kind == '-' and (r != old_len or r == 0):
|
||||
changes.append(Change(old + r, None, line, hunk_n))
|
||||
r += 1
|
||||
elif kind == "+" and (i != new_len or i == 0):
|
||||
elif kind == '+' and (i != new_len or i == 0):
|
||||
changes.append(Change(None, new + i, line, hunk_n))
|
||||
i += 1
|
||||
elif kind == " ":
|
||||
elif kind == ' ':
|
||||
if r != old_len and i != new_len:
|
||||
changes.append(Change(old + r, new + i, line, hunk_n))
|
||||
r += 1
|
||||
@ -667,7 +667,7 @@ def parse_context_diff(text):
|
||||
k = 0
|
||||
parts = split_by_regex(hunk, context_hunk_new)
|
||||
if len(parts) != 2:
|
||||
raise exceptions.ParseException("Context diff invalid", hunk_n)
|
||||
raise exceptions.ParseException('Context diff invalid', hunk_n)
|
||||
|
||||
old_hunk = parts[0]
|
||||
new_hunk = parts[1]
|
||||
@ -695,7 +695,7 @@ def parse_context_diff(text):
|
||||
|
||||
# now have old and new set, can start processing?
|
||||
if len(old_hunk) > 0 and len(new_hunk) == 0:
|
||||
msg = "Got unexpected change in removal hunk: "
|
||||
msg = 'Got unexpected change in removal hunk: '
|
||||
# only removes left?
|
||||
while len(old_hunk) > 0:
|
||||
c = context_change.match(old_hunk[0])
|
||||
@ -707,22 +707,22 @@ def parse_context_diff(text):
|
||||
kind = c.group(1)
|
||||
line = c.group(2)
|
||||
|
||||
if kind == "-" and (j != old_len or j == 0):
|
||||
if kind == '-' and (j != old_len or j == 0):
|
||||
changes.append(Change(old + j, None, line, hunk_n))
|
||||
j += 1
|
||||
elif kind == " " and (
|
||||
elif kind == ' ' and (
|
||||
(j != old_len and k != new_len) or (j == 0 or k == 0)
|
||||
):
|
||||
changes.append(Change(old + j, new + k, line, hunk_n))
|
||||
j += 1
|
||||
k += 1
|
||||
elif kind == "+" or kind == "!":
|
||||
elif kind == '+' or kind == '!':
|
||||
raise exceptions.ParseException(msg + kind, hunk_n)
|
||||
|
||||
continue
|
||||
|
||||
if len(old_hunk) == 0 and len(new_hunk) > 0:
|
||||
msg = "Got unexpected change in removal hunk: "
|
||||
msg = 'Got unexpected change in removal hunk: '
|
||||
# only insertions left?
|
||||
while len(new_hunk) > 0:
|
||||
c = context_change.match(new_hunk[0])
|
||||
@ -734,16 +734,16 @@ def parse_context_diff(text):
|
||||
kind = c.group(1)
|
||||
line = c.group(2)
|
||||
|
||||
if kind == "+" and (k != new_len or k == 0):
|
||||
if kind == '+' and (k != new_len or k == 0):
|
||||
changes.append(Change(None, new + k, line, hunk_n))
|
||||
k += 1
|
||||
elif kind == " " and (
|
||||
elif kind == ' ' and (
|
||||
(j != old_len and k != new_len) or (j == 0 or k == 0)
|
||||
):
|
||||
changes.append(Change(old + j, new + k, line, hunk_n))
|
||||
j += 1
|
||||
k += 1
|
||||
elif kind == "-" or kind == "!":
|
||||
elif kind == '-' or kind == '!':
|
||||
raise exceptions.ParseException(msg + kind, hunk_n)
|
||||
continue
|
||||
|
||||
@ -765,17 +765,17 @@ def parse_context_diff(text):
|
||||
if not (oc or nc):
|
||||
del old_hunk[0]
|
||||
del new_hunk[0]
|
||||
elif okind == " " and nkind == " " and oline == nline:
|
||||
elif okind == ' ' and nkind == ' ' and oline == nline:
|
||||
changes.append(Change(old + j, new + k, oline, hunk_n))
|
||||
j += 1
|
||||
k += 1
|
||||
del old_hunk[0]
|
||||
del new_hunk[0]
|
||||
elif okind == "-" or okind == "!" and (j != old_len or j == 0):
|
||||
elif okind == '-' or okind == '!' and (j != old_len or j == 0):
|
||||
changes.append(Change(old + j, None, oline, hunk_n))
|
||||
j += 1
|
||||
del old_hunk[0]
|
||||
elif nkind == "+" or nkind == "!" and (k != new_len or k == 0):
|
||||
elif nkind == '+' or nkind == '!' and (k != new_len or k == 0):
|
||||
changes.append(Change(None, new + k, nline, hunk_n))
|
||||
k += 1
|
||||
del new_hunk[0]
|
||||
@ -821,7 +821,7 @@ def parse_ed_diff(text):
|
||||
old_end = int(o.group(2)) if len(o.group(2)) else old
|
||||
|
||||
hunk_kind = o.group(3)
|
||||
if hunk_kind == "d":
|
||||
if hunk_kind == 'd':
|
||||
k = 0
|
||||
while old_end >= old:
|
||||
changes.append(Change(old + k, None, None, hunk_n))
|
||||
@ -832,7 +832,7 @@ def parse_ed_diff(text):
|
||||
|
||||
while len(hunk) > 0:
|
||||
e = ed_hunk_end.match(hunk[0])
|
||||
if not e and hunk_kind == "c":
|
||||
if not e and hunk_kind == 'c':
|
||||
k = 0
|
||||
while old_end >= old:
|
||||
changes.append(Change(old + k, None, None, hunk_n))
|
||||
@ -852,7 +852,7 @@ def parse_ed_diff(text):
|
||||
)
|
||||
i += 1
|
||||
j += 1
|
||||
if not e and hunk_kind == "a":
|
||||
if not e and hunk_kind == 'a':
|
||||
changes.append(
|
||||
Change(
|
||||
None,
|
||||
@ -900,7 +900,7 @@ def parse_rcs_ed_diff(text):
|
||||
old = int(o.group(2))
|
||||
size = int(o.group(3))
|
||||
|
||||
if hunk_kind == "a":
|
||||
if hunk_kind == 'a':
|
||||
old += total_change_size + 1
|
||||
total_change_size += size
|
||||
while size > 0 and len(hunk) > 0:
|
||||
@ -910,7 +910,7 @@ def parse_rcs_ed_diff(text):
|
||||
|
||||
del hunk[0]
|
||||
|
||||
elif hunk_kind == "d":
|
||||
elif hunk_kind == 'd':
|
||||
total_change_size -= size
|
||||
while size > 0:
|
||||
changes.append(Change(old + j, None, None, hunk_n))
|
||||
@ -938,8 +938,8 @@ def parse_git_binary_diff(text):
|
||||
# the sizes are used as latch-up
|
||||
new_size = 0
|
||||
old_size = 0
|
||||
old_encoded = ""
|
||||
new_encoded = ""
|
||||
old_encoded = ''
|
||||
new_encoded = ''
|
||||
for line in lines:
|
||||
if cmd_old_path is None and cmd_new_path is None:
|
||||
hm = git_diffcmd_header.match(line)
|
||||
@ -978,11 +978,11 @@ def parse_git_binary_diff(text):
|
||||
change = Change(None, 0, added_data, None)
|
||||
changes.append(change)
|
||||
new_size = 0
|
||||
new_encoded = ""
|
||||
new_encoded = ''
|
||||
else:
|
||||
# Invalid line format
|
||||
new_size = 0
|
||||
new_encoded = ""
|
||||
new_encoded = ''
|
||||
|
||||
# the second is removed file
|
||||
if old_size == 0:
|
||||
@ -1006,10 +1006,10 @@ def parse_git_binary_diff(text):
|
||||
change = Change(0, None, None, removed_data)
|
||||
changes.append(change)
|
||||
old_size = 0
|
||||
old_encoded = ""
|
||||
old_encoded = ''
|
||||
else:
|
||||
# Invalid line format
|
||||
old_size = 0
|
||||
old_encoded = ""
|
||||
old_encoded = ''
|
||||
|
||||
return changes
|
||||
|
||||
@ -54,7 +54,7 @@ def which(program):
|
||||
if is_exe(program):
|
||||
return program
|
||||
else:
|
||||
for path in os.environ["PATH"].split(os.pathsep):
|
||||
for path in os.environ['PATH'].split(os.pathsep):
|
||||
path = path.strip('"')
|
||||
exe_file = os.path.join(path, program)
|
||||
if is_exe(exe_file):
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
This is a Python repo for openhands-resolver, a library that attempts to resolve github issues with the AI agent OpenHands.
|
||||
|
||||
- Setup: `poetry install --with test --with dev`
|
||||
- Testing: `poetry run pytest tests/test_*.py`
|
||||
- Testing: `poetry run pytest tests/test_*.py`
|
||||
|
||||
@ -1,4 +1,4 @@
|
||||
This is a node repo for an RSS parser.
|
||||
- Setup: `yes | npm install`
|
||||
- Testing: `SKIP_BROWSER_TESTS=1 npm test`
|
||||
- Writing Tests: Add to the `test` directory.
|
||||
- Writing Tests: Add to the `test` directory.
|
||||
|
||||
@ -14,4 +14,4 @@ For all changes to actual application code (e.g. in Python or Javascript), add a
|
||||
Run the tests, and if they pass you are done!
|
||||
You do NOT need to write new tests if there are only changes to documentation or configuration files.
|
||||
|
||||
When you think you have fixed the issue through code changes, please call the finish action to end the interaction.
|
||||
When you think you have fixed the issue through code changes, please call the finish action to end the interaction.
|
||||
|
||||
@ -10,4 +10,4 @@ You SHOULD INCLUDE PROPER INDENTATION in your edit commands.{% if repo_instructi
|
||||
Some basic information about this repository:
|
||||
{{ repo_instruction }}{% endif %}
|
||||
|
||||
When you think you have fixed the issue through code changes, please finish the interaction.
|
||||
When you think you have fixed the issue through code changes, please finish the interaction.
|
||||
|
||||
@ -38,7 +38,7 @@ def get_remote_startup_command(
|
||||
'-20', # Highest priority
|
||||
'sh',
|
||||
'-c',
|
||||
f'echo -1000 > /proc/self/oom_score_adj && exec {cmd_str}'
|
||||
f'echo -1000 > /proc/self/oom_score_adj && exec {cmd_str}',
|
||||
]
|
||||
else:
|
||||
# If not root, run with normal priority
|
||||
|
||||
@ -1,22 +1,22 @@
|
||||
from openhands.resolver.issue_definitions import IssueHandler
|
||||
from openhands.resolver.github_issue import GithubIssue
|
||||
from openhands.events.action.message import MessageAction
|
||||
from openhands.core.config import LLMConfig
|
||||
from openhands.events.action.message import MessageAction
|
||||
from openhands.resolver.github_issue import GithubIssue
|
||||
from openhands.resolver.issue_definitions import IssueHandler
|
||||
|
||||
|
||||
def test_guess_success_multiline_explanation():
|
||||
# Mock data
|
||||
issue = GithubIssue(
|
||||
owner="test",
|
||||
repo="test",
|
||||
owner='test',
|
||||
repo='test',
|
||||
number=1,
|
||||
title="Test Issue",
|
||||
body="Test body",
|
||||
title='Test Issue',
|
||||
body='Test body',
|
||||
thread_comments=None,
|
||||
review_comments=None,
|
||||
)
|
||||
history = [MessageAction(content="Test message")]
|
||||
llm_config = LLMConfig(model="test", api_key="test")
|
||||
history = [MessageAction(content='Test message')]
|
||||
llm_config = LLMConfig(model='test', api_key='test')
|
||||
|
||||
# Create a mock response with multi-line explanation
|
||||
mock_response = """--- success
|
||||
@ -31,7 +31,7 @@ The PR successfully addressed the issue by:
|
||||
Automatic fix generated by OpenHands 🙌"""
|
||||
|
||||
# Create a handler instance
|
||||
handler = IssueHandler("test", "test", "test")
|
||||
handler = IssueHandler('test', 'test', 'test')
|
||||
|
||||
# Mock the litellm.completion call
|
||||
def mock_completion(*args, **kwargs):
|
||||
@ -61,11 +61,11 @@ Automatic fix generated by OpenHands 🙌"""
|
||||
|
||||
# Verify the results
|
||||
assert success is True
|
||||
assert "The PR successfully addressed the issue by:" in explanation
|
||||
assert "Fixed bug A" in explanation
|
||||
assert "Added test B" in explanation
|
||||
assert "Updated documentation C" in explanation
|
||||
assert "Automatic fix generated by OpenHands" in explanation
|
||||
assert 'The PR successfully addressed the issue by:' in explanation
|
||||
assert 'Fixed bug A' in explanation
|
||||
assert 'Added test B' in explanation
|
||||
assert 'Updated documentation C' in explanation
|
||||
assert 'Automatic fix generated by OpenHands' in explanation
|
||||
finally:
|
||||
# Restore the original function
|
||||
litellm.completion = original_completion
|
||||
|
||||
@ -1,94 +1,97 @@
|
||||
import pytest
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
import requests
|
||||
from unittest.mock import patch, MagicMock
|
||||
|
||||
from openhands.resolver.issue_definitions import PRHandler
|
||||
from openhands.resolver.github_issue import ReviewThread
|
||||
|
||||
|
||||
def test_handle_nonexistent_issue_reference():
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Mock the requests.get to simulate a 404 error
|
||||
mock_response = MagicMock()
|
||||
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError("404 Client Error: Not Found")
|
||||
|
||||
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
|
||||
'404 Client Error: Not Found'
|
||||
)
|
||||
|
||||
with patch('requests.get', return_value=mock_response):
|
||||
# Call the method with a non-existent issue reference
|
||||
result = handler._PRHandler__get_context_from_external_issues_references(
|
||||
closing_issues=[],
|
||||
closing_issue_numbers=[],
|
||||
issue_body="This references #999999", # Non-existent issue
|
||||
issue_body='This references #999999', # Non-existent issue
|
||||
review_comments=[],
|
||||
review_threads=[],
|
||||
thread_comments=None
|
||||
thread_comments=None,
|
||||
)
|
||||
|
||||
|
||||
# The method should return an empty list since the referenced issue couldn't be fetched
|
||||
assert result == []
|
||||
|
||||
|
||||
def test_handle_rate_limit_error():
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Mock the requests.get to simulate a rate limit error
|
||||
mock_response = MagicMock()
|
||||
mock_response.raise_for_status.side_effect = requests.exceptions.HTTPError(
|
||||
"403 Client Error: Rate Limit Exceeded"
|
||||
'403 Client Error: Rate Limit Exceeded'
|
||||
)
|
||||
|
||||
|
||||
with patch('requests.get', return_value=mock_response):
|
||||
# Call the method with an issue reference
|
||||
result = handler._PRHandler__get_context_from_external_issues_references(
|
||||
closing_issues=[],
|
||||
closing_issue_numbers=[],
|
||||
issue_body="This references #123",
|
||||
issue_body='This references #123',
|
||||
review_comments=[],
|
||||
review_threads=[],
|
||||
thread_comments=None
|
||||
thread_comments=None,
|
||||
)
|
||||
|
||||
|
||||
# The method should return an empty list since the request was rate limited
|
||||
assert result == []
|
||||
|
||||
|
||||
def test_handle_network_error():
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Mock the requests.get to simulate a network error
|
||||
with patch('requests.get', side_effect=requests.exceptions.ConnectionError("Network Error")):
|
||||
with patch(
|
||||
'requests.get', side_effect=requests.exceptions.ConnectionError('Network Error')
|
||||
):
|
||||
# Call the method with an issue reference
|
||||
result = handler._PRHandler__get_context_from_external_issues_references(
|
||||
closing_issues=[],
|
||||
closing_issue_numbers=[],
|
||||
issue_body="This references #123",
|
||||
issue_body='This references #123',
|
||||
review_comments=[],
|
||||
review_threads=[],
|
||||
thread_comments=None
|
||||
thread_comments=None,
|
||||
)
|
||||
|
||||
|
||||
# The method should return an empty list since the network request failed
|
||||
assert result == []
|
||||
|
||||
|
||||
def test_successful_issue_reference():
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Mock a successful response
|
||||
mock_response = MagicMock()
|
||||
mock_response.raise_for_status.return_value = None
|
||||
mock_response.json.return_value = {"body": "This is the referenced issue body"}
|
||||
|
||||
mock_response.json.return_value = {'body': 'This is the referenced issue body'}
|
||||
|
||||
with patch('requests.get', return_value=mock_response):
|
||||
# Call the method with an issue reference
|
||||
result = handler._PRHandler__get_context_from_external_issues_references(
|
||||
closing_issues=[],
|
||||
closing_issue_numbers=[],
|
||||
issue_body="This references #123",
|
||||
issue_body='This references #123',
|
||||
review_comments=[],
|
||||
review_threads=[],
|
||||
thread_comments=None
|
||||
thread_comments=None,
|
||||
)
|
||||
|
||||
|
||||
# The method should return a list with the referenced issue body
|
||||
assert result == ["This is the referenced issue body"]
|
||||
assert result == ['This is the referenced issue body']
|
||||
|
||||
@ -2,13 +2,13 @@ from openhands.resolver.issue_definitions import IssueHandler
|
||||
|
||||
|
||||
def test_extract_issue_references():
|
||||
handler = IssueHandler("test-owner", "test-repo", "test-token")
|
||||
handler = IssueHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Test basic issue reference
|
||||
assert handler._extract_issue_references("Fixes #123") == [123]
|
||||
assert handler._extract_issue_references('Fixes #123') == [123]
|
||||
|
||||
# Test multiple issue references
|
||||
assert handler._extract_issue_references("Fixes #123, #456") == [123, 456]
|
||||
assert handler._extract_issue_references('Fixes #123, #456') == [123, 456]
|
||||
|
||||
# Test issue references in code blocks should be ignored
|
||||
assert handler._extract_issue_references("""
|
||||
@ -22,13 +22,21 @@ def test_extract_issue_references():
|
||||
""") == [789]
|
||||
|
||||
# Test issue references in inline code should be ignored
|
||||
assert handler._extract_issue_references("This `#123` should be ignored but #456 should be extracted") == [456]
|
||||
assert handler._extract_issue_references(
|
||||
'This `#123` should be ignored but #456 should be extracted'
|
||||
) == [456]
|
||||
|
||||
# Test issue references in URLs should be ignored
|
||||
assert handler._extract_issue_references("Check http://example.com/#123 but #456 should be extracted") == [456]
|
||||
assert handler._extract_issue_references(
|
||||
'Check http://example.com/#123 but #456 should be extracted'
|
||||
) == [456]
|
||||
|
||||
# Test issue references in markdown links should be extracted
|
||||
assert handler._extract_issue_references("[Link to #123](http://example.com) and #456") == [123, 456]
|
||||
assert handler._extract_issue_references(
|
||||
'[Link to #123](http://example.com) and #456'
|
||||
) == [123, 456]
|
||||
|
||||
# Test issue references with text around them
|
||||
assert handler._extract_issue_references("Issue #123 is fixed and #456 is pending") == [123, 456]
|
||||
assert handler._extract_issue_references(
|
||||
'Issue #123 is fixed and #456 is pending'
|
||||
) == [123, 456]
|
||||
|
||||
@ -1,39 +1,39 @@
|
||||
import json
|
||||
from unittest.mock import patch, MagicMock
|
||||
from unittest.mock import MagicMock, patch
|
||||
|
||||
from openhands.resolver.issue_definitions import PRHandler
|
||||
from openhands.resolver.github_issue import GithubIssue, ReviewThread
|
||||
from openhands.events.action.message import MessageAction
|
||||
from openhands.core.config import LLMConfig
|
||||
from openhands.events.action.message import MessageAction
|
||||
from openhands.resolver.github_issue import GithubIssue, ReviewThread
|
||||
from openhands.resolver.issue_definitions import PRHandler
|
||||
|
||||
|
||||
def test_guess_success_review_threads_litellm_call():
|
||||
"""Test that the litellm.completion() call for review threads contains the expected content."""
|
||||
# Create a PR handler instance
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Create a mock issue with review threads
|
||||
issue = GithubIssue(
|
||||
owner="test-owner",
|
||||
repo="test-repo",
|
||||
owner='test-owner',
|
||||
repo='test-repo',
|
||||
number=1,
|
||||
title="Test PR",
|
||||
body="Test Body",
|
||||
title='Test PR',
|
||||
body='Test Body',
|
||||
thread_comments=None,
|
||||
closing_issues=["Issue 1 description", "Issue 2 description"],
|
||||
closing_issues=['Issue 1 description', 'Issue 2 description'],
|
||||
review_comments=None,
|
||||
review_threads=[
|
||||
ReviewThread(
|
||||
comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
|
||||
files=["/src/file1.py", "/src/file2.py"],
|
||||
comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings',
|
||||
files=['/src/file1.py', '/src/file2.py'],
|
||||
),
|
||||
ReviewThread(
|
||||
comment="Add more tests\n---\nlatest feedback:\nAdd test cases",
|
||||
files=["/tests/test_file.py"],
|
||||
comment='Add more tests\n---\nlatest feedback:\nAdd test cases',
|
||||
files=['/tests/test_file.py'],
|
||||
),
|
||||
],
|
||||
thread_ids=["1", "2"],
|
||||
head_branch="test-branch",
|
||||
thread_ids=['1', '2'],
|
||||
head_branch='test-branch',
|
||||
)
|
||||
|
||||
# Create mock history with a detailed response
|
||||
@ -47,7 +47,7 @@ def test_guess_success_review_threads_litellm_call():
|
||||
]
|
||||
|
||||
# Create mock LLM config
|
||||
llm_config = LLMConfig(model="test-model", api_key="test-key")
|
||||
llm_config = LLMConfig(model='test-model', api_key='test-key')
|
||||
|
||||
# Mock the LLM response
|
||||
mock_response = MagicMock()
|
||||
@ -64,7 +64,7 @@ The changes successfully address the feedback."""
|
||||
]
|
||||
|
||||
# Test the guess_success method
|
||||
with patch("litellm.completion") as mock_completion:
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.return_value = mock_response
|
||||
success, success_list, explanation = handler.guess_success(
|
||||
issue, history, llm_config
|
||||
@ -75,63 +75,63 @@ The changes successfully address the feedback."""
|
||||
|
||||
# Check first call
|
||||
first_call = mock_completion.call_args_list[0]
|
||||
first_prompt = first_call[1]["messages"][0]["content"]
|
||||
first_prompt = first_call[1]['messages'][0]['content']
|
||||
assert (
|
||||
"Issue descriptions:\n"
|
||||
+ json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
|
||||
'Issue descriptions:\n'
|
||||
+ json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
|
||||
in first_prompt
|
||||
)
|
||||
assert (
|
||||
"Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings"
|
||||
'Feedback:\nPlease fix the formatting\n---\nlatest feedback:\nAdd docstrings'
|
||||
in first_prompt
|
||||
)
|
||||
assert (
|
||||
"Files locations:\n"
|
||||
+ json.dumps(["/src/file1.py", "/src/file2.py"], indent=4)
|
||||
'Files locations:\n'
|
||||
+ json.dumps(['/src/file1.py', '/src/file2.py'], indent=4)
|
||||
in first_prompt
|
||||
)
|
||||
assert "Last message from AI agent:\n" + history[0].content in first_prompt
|
||||
assert 'Last message from AI agent:\n' + history[0].content in first_prompt
|
||||
|
||||
# Check second call
|
||||
second_call = mock_completion.call_args_list[1]
|
||||
second_prompt = second_call[1]["messages"][0]["content"]
|
||||
second_prompt = second_call[1]['messages'][0]['content']
|
||||
assert (
|
||||
"Issue descriptions:\n"
|
||||
+ json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
|
||||
'Issue descriptions:\n'
|
||||
+ json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
|
||||
in second_prompt
|
||||
)
|
||||
assert (
|
||||
"Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases"
|
||||
'Feedback:\nAdd more tests\n---\nlatest feedback:\nAdd test cases'
|
||||
in second_prompt
|
||||
)
|
||||
assert (
|
||||
"Files locations:\n" + json.dumps(["/tests/test_file.py"], indent=4)
|
||||
'Files locations:\n' + json.dumps(['/tests/test_file.py'], indent=4)
|
||||
in second_prompt
|
||||
)
|
||||
assert "Last message from AI agent:\n" + history[0].content in second_prompt
|
||||
assert 'Last message from AI agent:\n' + history[0].content in second_prompt
|
||||
|
||||
|
||||
def test_guess_success_thread_comments_litellm_call():
|
||||
"""Test that the litellm.completion() call for thread comments contains the expected content."""
|
||||
# Create a PR handler instance
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Create a mock issue with thread comments
|
||||
issue = GithubIssue(
|
||||
owner="test-owner",
|
||||
repo="test-repo",
|
||||
owner='test-owner',
|
||||
repo='test-repo',
|
||||
number=1,
|
||||
title="Test PR",
|
||||
body="Test Body",
|
||||
title='Test PR',
|
||||
body='Test Body',
|
||||
thread_comments=[
|
||||
"Please improve error handling",
|
||||
"Add input validation",
|
||||
"latest feedback:\nHandle edge cases",
|
||||
'Please improve error handling',
|
||||
'Add input validation',
|
||||
'latest feedback:\nHandle edge cases',
|
||||
],
|
||||
closing_issues=["Issue 1 description", "Issue 2 description"],
|
||||
closing_issues=['Issue 1 description', 'Issue 2 description'],
|
||||
review_comments=None,
|
||||
thread_ids=None,
|
||||
head_branch="test-branch",
|
||||
head_branch='test-branch',
|
||||
)
|
||||
|
||||
# Create mock history with a detailed response
|
||||
@ -145,7 +145,7 @@ def test_guess_success_thread_comments_litellm_call():
|
||||
]
|
||||
|
||||
# Create mock LLM config
|
||||
llm_config = LLMConfig(model="test-model", api_key="test-key")
|
||||
llm_config = LLMConfig(model='test-model', api_key='test-key')
|
||||
|
||||
# Mock the LLM response
|
||||
mock_response = MagicMock()
|
||||
@ -162,7 +162,7 @@ The changes successfully address the feedback."""
|
||||
]
|
||||
|
||||
# Test the guess_success method
|
||||
with patch("litellm.completion") as mock_completion:
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.return_value = mock_response
|
||||
success, success_list, explanation = handler.guess_success(
|
||||
issue, history, llm_config
|
||||
@ -171,77 +171,77 @@ The changes successfully address the feedback."""
|
||||
# Verify the litellm.completion() call
|
||||
mock_completion.assert_called_once()
|
||||
call_args = mock_completion.call_args
|
||||
prompt = call_args[1]["messages"][0]["content"]
|
||||
prompt = call_args[1]['messages'][0]['content']
|
||||
|
||||
# Check prompt content
|
||||
assert (
|
||||
"Issue descriptions:\n"
|
||||
+ json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
|
||||
'Issue descriptions:\n'
|
||||
+ json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
|
||||
in prompt
|
||||
)
|
||||
assert "PR Thread Comments:\n" + "\n---\n".join(issue.thread_comments) in prompt
|
||||
assert "Last message from AI agent:\n" + history[0].content in prompt
|
||||
assert 'PR Thread Comments:\n' + '\n---\n'.join(issue.thread_comments) in prompt
|
||||
assert 'Last message from AI agent:\n' + history[0].content in prompt
|
||||
|
||||
|
||||
def test_check_feedback_with_llm():
|
||||
"""Test the _check_feedback_with_llm helper function."""
|
||||
# Create a PR handler instance
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Create mock LLM config
|
||||
llm_config = LLMConfig(model="test-model", api_key="test-key")
|
||||
llm_config = LLMConfig(model='test-model', api_key='test-key')
|
||||
|
||||
# Test cases for different LLM responses
|
||||
test_cases = [
|
||||
{
|
||||
"response": "--- success\ntrue\n--- explanation\nChanges look good",
|
||||
"expected": (True, "Changes look good"),
|
||||
'response': '--- success\ntrue\n--- explanation\nChanges look good',
|
||||
'expected': (True, 'Changes look good'),
|
||||
},
|
||||
{
|
||||
"response": "--- success\nfalse\n--- explanation\nNot all issues fixed",
|
||||
"expected": (False, "Not all issues fixed"),
|
||||
'response': '--- success\nfalse\n--- explanation\nNot all issues fixed',
|
||||
'expected': (False, 'Not all issues fixed'),
|
||||
},
|
||||
{
|
||||
"response": "Invalid response format",
|
||||
"expected": (
|
||||
'response': 'Invalid response format',
|
||||
'expected': (
|
||||
False,
|
||||
"Failed to decode answer from LLM response: Invalid response format",
|
||||
'Failed to decode answer from LLM response: Invalid response format',
|
||||
),
|
||||
},
|
||||
{
|
||||
"response": "--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere",
|
||||
"expected": (True, "Multiline\nexplanation\nhere"),
|
||||
'response': '--- success\ntrue\n--- explanation\nMultiline\nexplanation\nhere',
|
||||
'expected': (True, 'Multiline\nexplanation\nhere'),
|
||||
},
|
||||
]
|
||||
|
||||
for case in test_cases:
|
||||
# Mock the LLM response
|
||||
mock_response = MagicMock()
|
||||
mock_response.choices = [MagicMock(message=MagicMock(content=case["response"]))]
|
||||
mock_response.choices = [MagicMock(message=MagicMock(content=case['response']))]
|
||||
|
||||
# Test the function
|
||||
with patch("litellm.completion", return_value=mock_response):
|
||||
with patch('litellm.completion', return_value=mock_response):
|
||||
success, explanation = handler._check_feedback_with_llm(
|
||||
"test prompt", llm_config
|
||||
'test prompt', llm_config
|
||||
)
|
||||
assert (success, explanation) == case["expected"]
|
||||
assert (success, explanation) == case['expected']
|
||||
|
||||
|
||||
def test_check_review_thread():
|
||||
"""Test the _check_review_thread helper function."""
|
||||
# Create a PR handler instance
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Create test data
|
||||
review_thread = ReviewThread(
|
||||
comment="Please fix the formatting\n---\nlatest feedback:\nAdd docstrings",
|
||||
files=["/src/file1.py", "/src/file2.py"],
|
||||
comment='Please fix the formatting\n---\nlatest feedback:\nAdd docstrings',
|
||||
files=['/src/file1.py', '/src/file2.py'],
|
||||
)
|
||||
issues_context = json.dumps(
|
||||
["Issue 1 description", "Issue 2 description"], indent=4
|
||||
['Issue 1 description', 'Issue 2 description'], indent=4
|
||||
)
|
||||
last_message = "I have fixed the formatting and added docstrings"
|
||||
llm_config = LLMConfig(model="test-model", api_key="test-key")
|
||||
last_message = 'I have fixed the formatting and added docstrings'
|
||||
llm_config = LLMConfig(model='test-model', api_key='test-key')
|
||||
|
||||
# Mock the LLM response
|
||||
mock_response = MagicMock()
|
||||
@ -258,7 +258,7 @@ Changes look good"""
|
||||
]
|
||||
|
||||
# Test the function
|
||||
with patch("litellm.completion") as mock_completion:
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.return_value = mock_response
|
||||
success, explanation = handler._check_review_thread(
|
||||
review_thread, issues_context, last_message, llm_config
|
||||
@ -267,37 +267,37 @@ Changes look good"""
|
||||
# Verify the litellm.completion() call
|
||||
mock_completion.assert_called_once()
|
||||
call_args = mock_completion.call_args
|
||||
prompt = call_args[1]["messages"][0]["content"]
|
||||
prompt = call_args[1]['messages'][0]['content']
|
||||
|
||||
# Check prompt content
|
||||
assert "Issue descriptions:\n" + issues_context in prompt
|
||||
assert "Feedback:\n" + review_thread.comment in prompt
|
||||
assert 'Issue descriptions:\n' + issues_context in prompt
|
||||
assert 'Feedback:\n' + review_thread.comment in prompt
|
||||
assert (
|
||||
"Files locations:\n" + json.dumps(review_thread.files, indent=4) in prompt
|
||||
'Files locations:\n' + json.dumps(review_thread.files, indent=4) in prompt
|
||||
)
|
||||
assert "Last message from AI agent:\n" + last_message in prompt
|
||||
assert 'Last message from AI agent:\n' + last_message in prompt
|
||||
|
||||
# Check result
|
||||
assert success is True
|
||||
assert explanation == "Changes look good"
|
||||
assert explanation == 'Changes look good'
|
||||
|
||||
|
||||
def test_check_thread_comments():
|
||||
"""Test the _check_thread_comments helper function."""
|
||||
# Create a PR handler instance
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Create test data
|
||||
thread_comments = [
|
||||
"Please improve error handling",
|
||||
"Add input validation",
|
||||
"latest feedback:\nHandle edge cases",
|
||||
'Please improve error handling',
|
||||
'Add input validation',
|
||||
'latest feedback:\nHandle edge cases',
|
||||
]
|
||||
issues_context = json.dumps(
|
||||
["Issue 1 description", "Issue 2 description"], indent=4
|
||||
['Issue 1 description', 'Issue 2 description'], indent=4
|
||||
)
|
||||
last_message = "I have added error handling and input validation"
|
||||
llm_config = LLMConfig(model="test-model", api_key="test-key")
|
||||
last_message = 'I have added error handling and input validation'
|
||||
llm_config = LLMConfig(model='test-model', api_key='test-key')
|
||||
|
||||
# Mock the LLM response
|
||||
mock_response = MagicMock()
|
||||
@ -314,7 +314,7 @@ Changes look good"""
|
||||
]
|
||||
|
||||
# Test the function
|
||||
with patch("litellm.completion") as mock_completion:
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.return_value = mock_response
|
||||
success, explanation = handler._check_thread_comments(
|
||||
thread_comments, issues_context, last_message, llm_config
|
||||
@ -323,34 +323,34 @@ Changes look good"""
|
||||
# Verify the litellm.completion() call
|
||||
mock_completion.assert_called_once()
|
||||
call_args = mock_completion.call_args
|
||||
prompt = call_args[1]["messages"][0]["content"]
|
||||
prompt = call_args[1]['messages'][0]['content']
|
||||
|
||||
# Check prompt content
|
||||
assert "Issue descriptions:\n" + issues_context in prompt
|
||||
assert "PR Thread Comments:\n" + "\n---\n".join(thread_comments) in prompt
|
||||
assert "Last message from AI agent:\n" + last_message in prompt
|
||||
assert 'Issue descriptions:\n' + issues_context in prompt
|
||||
assert 'PR Thread Comments:\n' + '\n---\n'.join(thread_comments) in prompt
|
||||
assert 'Last message from AI agent:\n' + last_message in prompt
|
||||
|
||||
# Check result
|
||||
assert success is True
|
||||
assert explanation == "Changes look good"
|
||||
assert explanation == 'Changes look good'
|
||||
|
||||
|
||||
def test_check_review_comments():
|
||||
"""Test the _check_review_comments helper function."""
|
||||
# Create a PR handler instance
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Create test data
|
||||
review_comments = [
|
||||
"Please improve code readability",
|
||||
"Add comments to complex functions",
|
||||
"Follow PEP 8 style guide",
|
||||
'Please improve code readability',
|
||||
'Add comments to complex functions',
|
||||
'Follow PEP 8 style guide',
|
||||
]
|
||||
issues_context = json.dumps(
|
||||
["Issue 1 description", "Issue 2 description"], indent=4
|
||||
['Issue 1 description', 'Issue 2 description'], indent=4
|
||||
)
|
||||
last_message = "I have improved code readability and added comments"
|
||||
llm_config = LLMConfig(model="test-model", api_key="test-key")
|
||||
last_message = 'I have improved code readability and added comments'
|
||||
llm_config = LLMConfig(model='test-model', api_key='test-key')
|
||||
|
||||
# Mock the LLM response
|
||||
mock_response = MagicMock()
|
||||
@ -367,7 +367,7 @@ Changes look good"""
|
||||
]
|
||||
|
||||
# Test the function
|
||||
with patch("litellm.completion") as mock_completion:
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.return_value = mock_response
|
||||
success, explanation = handler._check_review_comments(
|
||||
review_comments, issues_context, last_message, llm_config
|
||||
@ -376,39 +376,39 @@ Changes look good"""
|
||||
# Verify the litellm.completion() call
|
||||
mock_completion.assert_called_once()
|
||||
call_args = mock_completion.call_args
|
||||
prompt = call_args[1]["messages"][0]["content"]
|
||||
prompt = call_args[1]['messages'][0]['content']
|
||||
|
||||
# Check prompt content
|
||||
assert "Issue descriptions:\n" + issues_context in prompt
|
||||
assert "PR Review Comments:\n" + "\n---\n".join(review_comments) in prompt
|
||||
assert "Last message from AI agent:\n" + last_message in prompt
|
||||
assert 'Issue descriptions:\n' + issues_context in prompt
|
||||
assert 'PR Review Comments:\n' + '\n---\n'.join(review_comments) in prompt
|
||||
assert 'Last message from AI agent:\n' + last_message in prompt
|
||||
|
||||
# Check result
|
||||
assert success is True
|
||||
assert explanation == "Changes look good"
|
||||
assert explanation == 'Changes look good'
|
||||
|
||||
|
||||
def test_guess_success_review_comments_litellm_call():
|
||||
"""Test that the litellm.completion() call for review comments contains the expected content."""
|
||||
# Create a PR handler instance
|
||||
handler = PRHandler("test-owner", "test-repo", "test-token")
|
||||
handler = PRHandler('test-owner', 'test-repo', 'test-token')
|
||||
|
||||
# Create a mock issue with review comments
|
||||
issue = GithubIssue(
|
||||
owner="test-owner",
|
||||
repo="test-repo",
|
||||
owner='test-owner',
|
||||
repo='test-repo',
|
||||
number=1,
|
||||
title="Test PR",
|
||||
body="Test Body",
|
||||
title='Test PR',
|
||||
body='Test Body',
|
||||
thread_comments=None,
|
||||
closing_issues=["Issue 1 description", "Issue 2 description"],
|
||||
closing_issues=['Issue 1 description', 'Issue 2 description'],
|
||||
review_comments=[
|
||||
"Please improve code readability",
|
||||
"Add comments to complex functions",
|
||||
"Follow PEP 8 style guide",
|
||||
'Please improve code readability',
|
||||
'Add comments to complex functions',
|
||||
'Follow PEP 8 style guide',
|
||||
],
|
||||
thread_ids=None,
|
||||
head_branch="test-branch",
|
||||
head_branch='test-branch',
|
||||
)
|
||||
|
||||
# Create mock history with a detailed response
|
||||
@ -422,7 +422,7 @@ def test_guess_success_review_comments_litellm_call():
|
||||
]
|
||||
|
||||
# Create mock LLM config
|
||||
llm_config = LLMConfig(model="test-model", api_key="test-key")
|
||||
llm_config = LLMConfig(model='test-model', api_key='test-key')
|
||||
|
||||
# Mock the LLM response
|
||||
mock_response = MagicMock()
|
||||
@ -439,7 +439,7 @@ The changes successfully address the feedback."""
|
||||
]
|
||||
|
||||
# Test the guess_success method
|
||||
with patch("litellm.completion") as mock_completion:
|
||||
with patch('litellm.completion') as mock_completion:
|
||||
mock_completion.return_value = mock_response
|
||||
success, success_list, explanation = handler.guess_success(
|
||||
issue, history, llm_config
|
||||
@ -448,13 +448,13 @@ The changes successfully address the feedback."""
|
||||
# Verify the litellm.completion() call
|
||||
mock_completion.assert_called_once()
|
||||
call_args = mock_completion.call_args
|
||||
prompt = call_args[1]["messages"][0]["content"]
|
||||
prompt = call_args[1]['messages'][0]['content']
|
||||
|
||||
# Check prompt content
|
||||
assert (
|
||||
"Issue descriptions:\n"
|
||||
+ json.dumps(["Issue 1 description", "Issue 2 description"], indent=4)
|
||||
'Issue descriptions:\n'
|
||||
+ json.dumps(['Issue 1 description', 'Issue 2 description'], indent=4)
|
||||
in prompt
|
||||
)
|
||||
assert "PR Review Comments:\n" + "\n---\n".join(issue.review_comments) in prompt
|
||||
assert "Last message from AI agent:\n" + history[0].content in prompt
|
||||
assert 'PR Review Comments:\n' + '\n---\n'.join(issue.review_comments) in prompt
|
||||
assert 'Last message from AI agent:\n' + history[0].content in prompt
|
||||
|
||||
@ -1,45 +1,46 @@
|
||||
import os
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from openhands.resolver.github_issue import GithubIssue
|
||||
from openhands.resolver.send_pull_request import make_commit
|
||||
import os
|
||||
import tempfile
|
||||
import subprocess
|
||||
|
||||
|
||||
def test_commit_message_with_quotes():
|
||||
# Create a temporary directory and initialize git repo
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
subprocess.run(["git", "init", temp_dir], check=True)
|
||||
subprocess.run(['git', 'init', temp_dir], check=True)
|
||||
|
||||
# Create a test file and add it to git
|
||||
test_file = os.path.join(temp_dir, "test.txt")
|
||||
with open(test_file, "w") as f:
|
||||
f.write("test content")
|
||||
test_file = os.path.join(temp_dir, 'test.txt')
|
||||
with open(test_file, 'w') as f:
|
||||
f.write('test content')
|
||||
|
||||
subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
|
||||
subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True)
|
||||
|
||||
# Create a test issue with problematic title
|
||||
issue = GithubIssue(
|
||||
owner="test-owner",
|
||||
repo="test-repo",
|
||||
owner='test-owner',
|
||||
repo='test-repo',
|
||||
number=123,
|
||||
title="Issue with 'quotes' and \"double quotes\" and <class 'ValueError'>",
|
||||
body="Test body",
|
||||
body='Test body',
|
||||
labels=[],
|
||||
assignees=[],
|
||||
state="open",
|
||||
created_at="2024-01-01T00:00:00Z",
|
||||
updated_at="2024-01-01T00:00:00Z",
|
||||
state='open',
|
||||
created_at='2024-01-01T00:00:00Z',
|
||||
updated_at='2024-01-01T00:00:00Z',
|
||||
closed_at=None,
|
||||
head_branch=None,
|
||||
thread_ids=None,
|
||||
)
|
||||
|
||||
# Make the commit
|
||||
make_commit(temp_dir, issue, "issue")
|
||||
make_commit(temp_dir, issue, 'issue')
|
||||
|
||||
# Get the commit message
|
||||
result = subprocess.run(
|
||||
["git", "-C", temp_dir, "log", "-1", "--pretty=%B"],
|
||||
['git', '-C', temp_dir, 'log', '-1', '--pretty=%B'],
|
||||
capture_output=True,
|
||||
text=True,
|
||||
check=True,
|
||||
@ -48,7 +49,7 @@ def test_commit_message_with_quotes():
|
||||
|
||||
# The commit message should contain the quotes without excessive escaping
|
||||
expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and <class 'ValueError'>"
|
||||
assert commit_msg == expected, f"Expected: {expected}\nGot: {commit_msg}"
|
||||
assert commit_msg == expected, f'Expected: {expected}\nGot: {commit_msg}'
|
||||
|
||||
|
||||
def test_pr_title_with_quotes(monkeypatch):
|
||||
@ -56,39 +57,39 @@ def test_pr_title_with_quotes(monkeypatch):
|
||||
class MockResponse:
|
||||
def __init__(self, status_code=201):
|
||||
self.status_code = status_code
|
||||
self.text = ""
|
||||
self.text = ''
|
||||
|
||||
def json(self):
|
||||
return {"html_url": "https://github.com/test/test/pull/1"}
|
||||
return {'html_url': 'https://github.com/test/test/pull/1'}
|
||||
|
||||
def raise_for_status(self):
|
||||
pass
|
||||
|
||||
def mock_post(*args, **kwargs):
|
||||
# Verify that the PR title is not over-escaped
|
||||
data = kwargs.get("json", {})
|
||||
title = data.get("title", "")
|
||||
data = kwargs.get('json', {})
|
||||
title = data.get('title', '')
|
||||
expected = "Fix issue #123: Issue with 'quotes' and \"double quotes\" and <class 'ValueError'>"
|
||||
assert (
|
||||
title == expected
|
||||
), f"PR title was incorrectly escaped.\nExpected: {expected}\nGot: {title}"
|
||||
), f'PR title was incorrectly escaped.\nExpected: {expected}\nGot: {title}'
|
||||
return MockResponse()
|
||||
|
||||
class MockGetResponse:
|
||||
def __init__(self, status_code=200):
|
||||
self.status_code = status_code
|
||||
self.text = ""
|
||||
self.text = ''
|
||||
|
||||
def json(self):
|
||||
return {"default_branch": "main"}
|
||||
return {'default_branch': 'main'}
|
||||
|
||||
def raise_for_status(self):
|
||||
pass
|
||||
|
||||
monkeypatch.setattr("requests.post", mock_post)
|
||||
monkeypatch.setattr("requests.get", lambda *args, **kwargs: MockGetResponse())
|
||||
monkeypatch.setattr('requests.post', mock_post)
|
||||
monkeypatch.setattr('requests.get', lambda *args, **kwargs: MockGetResponse())
|
||||
monkeypatch.setattr(
|
||||
"openhands.resolver.send_pull_request.branch_exists",
|
||||
'openhands.resolver.send_pull_request.branch_exists',
|
||||
lambda *args, **kwargs: False,
|
||||
)
|
||||
|
||||
@ -97,69 +98,69 @@ def test_pr_title_with_quotes(monkeypatch):
|
||||
|
||||
def mock_run(*args, **kwargs):
|
||||
print(f"Running command: {args[0] if args else kwargs.get('args', [])}")
|
||||
if isinstance(args[0], list) and args[0][0] == "git":
|
||||
if "push" in args[0]:
|
||||
if isinstance(args[0], list) and args[0][0] == 'git':
|
||||
if 'push' in args[0]:
|
||||
return subprocess.CompletedProcess(
|
||||
args[0], returncode=0, stdout="", stderr=""
|
||||
args[0], returncode=0, stdout='', stderr=''
|
||||
)
|
||||
return original_run(*args, **kwargs)
|
||||
return original_run(*args, **kwargs)
|
||||
|
||||
monkeypatch.setattr("subprocess.run", mock_run)
|
||||
monkeypatch.setattr('subprocess.run', mock_run)
|
||||
|
||||
# Create a temporary directory and initialize git repo
|
||||
with tempfile.TemporaryDirectory() as temp_dir:
|
||||
print("Initializing git repo...")
|
||||
subprocess.run(["git", "init", temp_dir], check=True)
|
||||
print('Initializing git repo...')
|
||||
subprocess.run(['git', 'init', temp_dir], check=True)
|
||||
|
||||
# Add these lines to configure git
|
||||
subprocess.run(
|
||||
["git", "-C", temp_dir, "config", "user.name", "Test User"], check=True
|
||||
['git', '-C', temp_dir, 'config', 'user.name', 'Test User'], check=True
|
||||
)
|
||||
subprocess.run(
|
||||
["git", "-C", temp_dir, "config", "user.email", "test@example.com"],
|
||||
['git', '-C', temp_dir, 'config', 'user.email', 'test@example.com'],
|
||||
check=True,
|
||||
)
|
||||
|
||||
# Create a test file and add it to git
|
||||
test_file = os.path.join(temp_dir, "test.txt")
|
||||
with open(test_file, "w") as f:
|
||||
f.write("test content")
|
||||
test_file = os.path.join(temp_dir, 'test.txt')
|
||||
with open(test_file, 'w') as f:
|
||||
f.write('test content')
|
||||
|
||||
print("Adding and committing test file...")
|
||||
subprocess.run(["git", "-C", temp_dir, "add", "test.txt"], check=True)
|
||||
print('Adding and committing test file...')
|
||||
subprocess.run(['git', '-C', temp_dir, 'add', 'test.txt'], check=True)
|
||||
subprocess.run(
|
||||
["git", "-C", temp_dir, "commit", "-m", "Initial commit"], check=True
|
||||
['git', '-C', temp_dir, 'commit', '-m', 'Initial commit'], check=True
|
||||
)
|
||||
|
||||
# Create a test issue with problematic title
|
||||
print("Creating test issue...")
|
||||
print('Creating test issue...')
|
||||
issue = GithubIssue(
|
||||
owner="test-owner",
|
||||
repo="test-repo",
|
||||
owner='test-owner',
|
||||
repo='test-repo',
|
||||
number=123,
|
||||
title="Issue with 'quotes' and \"double quotes\" and <class 'ValueError'>",
|
||||
body="Test body",
|
||||
body='Test body',
|
||||
labels=[],
|
||||
assignees=[],
|
||||
state="open",
|
||||
created_at="2024-01-01T00:00:00Z",
|
||||
updated_at="2024-01-01T00:00:00Z",
|
||||
state='open',
|
||||
created_at='2024-01-01T00:00:00Z',
|
||||
updated_at='2024-01-01T00:00:00Z',
|
||||
closed_at=None,
|
||||
head_branch=None,
|
||||
thread_ids=None,
|
||||
)
|
||||
|
||||
# Try to send a PR - this will fail if the title is incorrectly escaped
|
||||
print("Sending PR...")
|
||||
from openhands.resolver.send_pull_request import send_pull_request
|
||||
print('Sending PR...')
|
||||
from openhands.core.config import LLMConfig
|
||||
from openhands.resolver.send_pull_request import send_pull_request
|
||||
|
||||
send_pull_request(
|
||||
github_issue=issue,
|
||||
github_token="dummy-token",
|
||||
github_username="test-user",
|
||||
github_token='dummy-token',
|
||||
github_username='test-user',
|
||||
patch_dir=temp_dir,
|
||||
llm_config=LLMConfig(model="test-model", api_key="test-key"),
|
||||
pr_type="ready",
|
||||
llm_config=LLMConfig(model='test-model', api_key='test-key'),
|
||||
pr_type='ready',
|
||||
)
|
||||
|
||||
@ -551,6 +551,7 @@ This is a Python repo for openhands-resolver, a library that attempts to resolve
|
||||
- Setup: `poetry install --with test --with dev`
|
||||
- Testing: `poetry run pytest tests/test_*.py`
|
||||
|
||||
|
||||
When you think you have fixed the issue through code changes, please finish the interaction."""
|
||||
assert instruction == expected_instruction
|
||||
assert issue_handler.issue_type == 'issue'
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user