diff --git a/Development.md b/Development.md index babb42ab8f..c30fd629d5 100644 --- a/Development.md +++ b/Development.md @@ -45,6 +45,7 @@ To configure the LM of your choice, follow these steps: make setup-config ``` This command will prompt you to enter the LLM API key, model name, and other variables ensuring that OpenDevin is tailored to your specific needs. Note that the model name will apply only when you run headless. If you use the UI, please set the model in the UI. + Set `persist_sandbox` to false if you want to use clean sandbox for each task. If `persist_sandbox` is set to true, you will need to set the `ssh_password` as well. **Note on Alternative Models:** Some alternative models may prove more challenging to tame than others. Fear not, brave adventurer! We shall soon unveil LLM-specific documentation to guide you on your quest. And if you've already mastered the art of wielding a model other than OpenAI's GPT, we encourage you to [share your setup instructions with us](https://github.com/OpenDevin/OpenDevin/issues/417). diff --git a/Makefile b/Makefile index 49ad4af557..93e99b62e8 100644 --- a/Makefile +++ b/Makefile @@ -226,6 +226,15 @@ setup-config-prompts: workspace_dir=$${workspace_dir:-$(DEFAULT_WORKSPACE_DIR)}; \ echo "workspace_base=\"$$workspace_dir\"" >> $(CONFIG_FILE).tmp + @read -p "Do you want to persist the sandbox container? [true/false] [default: true]: " persist_sandbox; \ + persist_sandbox=$${persist_sandbox:-true}; \ + if [ "$$persist_sandbox" = "true" ]; then \ + read -p "Enter a password for the sandbox container: " ssh_password; \ + echo "ssh_password=\"$$ssh_password\"" >> $(CONFIG_FILE).tmp; \ + else \ + echo "persist_sandbox=\"$$persist_sandbox\"" >> $(CONFIG_FILE).tmp + fi + @echo "" >> $(CONFIG_FILE).tmp @echo "[llm]" >> $(CONFIG_FILE).tmp diff --git a/README.md b/README.md index f89ec14689..12e57a0231 100644 --- a/README.md +++ b/README.md @@ -64,6 +64,7 @@ export WORKSPACE_BASE=$(pwd)/workspace; docker run -it \ --pull=always \ -e SANDBOX_USER_ID=$(id -u) \ + -e PERSIST_SANDBOX="true" \ -e SSH_PASSWORD="make something up here" \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -v $WORKSPACE_BASE:/opt/workspace_base \ diff --git a/docs/modules/usage/intro.mdx b/docs/modules/usage/intro.mdx index f794da81f8..8da98906bf 100644 --- a/docs/modules/usage/intro.mdx +++ b/docs/modules/usage/intro.mdx @@ -76,6 +76,7 @@ OpenDevin runs bash commands within a Docker sandbox, so it should not affect yo docker run -it \ --pull=always \ -e SANDBOX_USER_ID=$(id -u) \ + -e PERSIST_SANDBOX="true" \ -e SSH_PASSWORD="make something up here" \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_BASE \ -v $WORKSPACE_BASE:/opt/workspace_base \ diff --git a/opendevin/core/config.py b/opendevin/core/config.py index b46003d99c..f4cdf02277 100644 --- a/opendevin/core/config.py +++ b/opendevin/core/config.py @@ -179,7 +179,7 @@ class AppConfig(metaclass=Singleton): disable_color: bool = False sandbox_user_id: int = os.getuid() if hasattr(os, 'getuid') else 1000 sandbox_timeout: int = 120 - persist_sandbox: bool = True + persist_sandbox: bool = False ssh_port: int = 63710 ssh_password: str | None = None github_token: str | None = None diff --git a/opendevin/runtime/docker/ssh_box.py b/opendevin/runtime/docker/ssh_box.py index 2b11e312ba..40218450f3 100644 --- a/opendevin/runtime/docker/ssh_box.py +++ b/opendevin/runtime/docker/ssh_box.py @@ -228,7 +228,9 @@ class DockerSSHBox(Sandbox): # set up random user password if config.persist_sandbox: if not config.ssh_password: - raise Exception('Password must be set for persistent sandbox') + raise Exception( + 'Please add ssh_password to your config.toml or add -e SSH_PASSWORD to your docker run command' + ) self._ssh_password = config.ssh_password self._ssh_port = config.ssh_port else: diff --git a/tests/integration/README.md b/tests/integration/README.md index 02174d7cb6..a94d876fd4 100644 --- a/tests/integration/README.md +++ b/tests/integration/README.md @@ -42,6 +42,8 @@ where `conftest.py` defines the infrastructure needed to load real-world LLM pro and responses for mocking purpose. Prompts and responses generated during real runs of agents with real LLMs are stored under `mock/AgentName/TestName` folders. +**Note:** Set PERSIST_SANDBOX=false to use a clean sandbox for each test. + ## Run Integration Tests Take a look at `run-integration-tests.yml` to learn how integration tests are