From 516c9bf1e0ed6648fbf56b2c3384dd4526516f21 Mon Sep 17 00:00:00 2001 From: Robert Brennan Date: Tue, 16 Apr 2024 01:10:38 +0200 Subject: [PATCH] Revamp docker build process (#1121) * refactor docker building * change to buildx * disable branch filter * disable tags * matrix for building * fix branch filter * rename workflow * sanitize ref name * fix sanitization * fix source command * fix source command * add push arg * enable for all branches * logs * empty commit * try freeing disk space * try disk clean again * try alpine * Update ghcr.yml * Update ghcr.yml * move checkout * ignore .git * add disk space debug * add df h to build script * remove pull * try another failure bypass * remove maximize build space step * remove df -h debug * add no-root * multi-stage python build * add ssh * update readme * remove references to config.toml --- .dockerignore | 1 + .github/ISSUE_TEMPLATE/bug_report.md | 12 ++--- .github/workflows/ghcr.yml | 37 +++---------- README.md | 7 +-- config.toml.template | 4 -- container/Dockerfile | 34 ------------ container/Makefile | 31 ----------- containers/app/Dockerfile | 54 +++++++++++++++++++ containers/app/config.sh | 2 + containers/build.sh | 48 +++++++++++++++++ .../evaluation}/Dockerfile | 2 +- containers/evaluation/config.sh | 2 + {opendevin => containers}/sandbox/Dockerfile | 0 containers/sandbox/config.sh | 2 + docs/documentation/AZURE_LLM_GUIDE.md | 21 ++------ docs/documentation/LOCAL_LLM_GUIDE.md | 53 +++++------------- evaluation/SWE-bench/Makefile | 31 ----------- opendevin/sandbox/Makefile | 31 ----------- 18 files changed, 143 insertions(+), 229 deletions(-) delete mode 100644 config.toml.template delete mode 100644 container/Dockerfile delete mode 100644 container/Makefile create mode 100644 containers/app/Dockerfile create mode 100644 containers/app/config.sh create mode 100755 containers/build.sh rename {evaluation/SWE-bench => containers/evaluation}/Dockerfile (97%) create mode 100644 containers/evaluation/config.sh rename {opendevin => containers}/sandbox/Dockerfile (100%) create mode 100644 containers/sandbox/config.sh delete mode 100644 evaluation/SWE-bench/Makefile delete mode 100644 opendevin/sandbox/Makefile diff --git a/.dockerignore b/.dockerignore index 55424b848c..9b30efb1db 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,3 +2,4 @@ frontend/node_modules config.toml .envrc .env +.git diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index a4a218bee8..000e2b464d 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -16,13 +16,12 @@ assignees: '' ```bash ``` -* Operating System: - - -**My config.toml and environment vars** (be sure to redact API keys): -```toml -``` +**My operating system**: + +**My environment vars and other configuration** (be sure to redact API keys): +```bash +``` **My model and agent** (you can see these settings in the UI): * Model: @@ -40,4 +39,3 @@ assignees: '' **Logs, error messages, and screenshots**: #### Additional Context - diff --git a/.github/workflows/ghcr.yml b/.github/workflows/ghcr.yml index 75129e75fb..f7ff953ea8 100644 --- a/.github/workflows/ghcr.yml +++ b/.github/workflows/ghcr.yml @@ -1,8 +1,8 @@ -name: Build and publish multi-arch container images +name: Publish Docker Image on: push: - branches: [ main ] + branches: [ '**' ] workflow_dispatch: inputs: reason: @@ -14,6 +14,9 @@ jobs: ghcr_build_and_push: runs-on: ubuntu-latest if: github.event_name == 'push' || github.event.inputs.reason != '' + strategy: + matrix: + image: ["app", "evaluation", "sandbox"] steps: - name: checkout @@ -29,31 +32,5 @@ jobs: - name: Log-in to ghcr.io run: echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u ${{ github.actor }} --password-stdin - - name: Build and push multi-arch container images - run: | - # set env for fork repo - DOCKER_BUILD_ORG=$(echo "${{ github.repository }}" | tr '[A-Z]' '[a-z]' | cut -d '/' -f 1) - # Find directories containing Dockerfile but not containing .dockerfileignore - while IFS= read -r dockerfile_dir; do - - # Check if .dockerfileignore exists in the directory - if [ -f "$dockerfile_dir/.dockerfileignore" ]; then - echo "$dockerfile_dir/.dockerfileignore exists, skipping build and push" - continue - fi - - # Check if image was already exist in ghcr.io - pushd "$dockerfile_dir" > /dev/null - FULL_IMAGE=$(make get-full-image DOCKER_BUILD_ORG=$DOCKER_BUILD_ORG) - popd > /dev/null - EXISTS=$(docker manifest inspect "$FULL_IMAGE" > /dev/null 2>&1 && echo "true" || echo "false") - if [ "$EXISTS" == "true" ]; then - echo "Image $FULL_IMAGE already exists in ghcr.io, skipping build and push" - continue - fi - - # Build and push the image to ghcr.io - pushd "$dockerfile_dir" > /dev/null - make all DOCKER_BUILD_ORG=$DOCKER_BUILD_ORG - popd > /dev/null - done < <(find . -type f -name Dockerfile -exec dirname {} \; | sort -u) + - name: Build and push ${{ matrix.image }} + run: ./containers/build.sh ${{ matrix.image }} --push diff --git a/README.md b/README.md index e2330db51e..342b681493 100644 --- a/README.md +++ b/README.md @@ -130,15 +130,13 @@ export LLM_API_KEY="sk-..." # The directory you want OpenDevin to modify. MUST be an absolute path! export WORKSPACE_DIR=$(pwd)/workspace -docker build -t opendevin-app -f container/Dockerfile . - docker run \ -e LLM_API_KEY \ -e WORKSPACE_MOUNT_PATH=$WORKSPACE_DIR \ -v $WORKSPACE_DIR:/opt/workspace_base \ -v /var/run/docker.sock:/var/run/docker.sock \ -p 3000:3000 \ - opendevin-app + ghcr.io/opendevin/opendevin:latest ``` Replace `$(pwd)/workspace` with the path to the code you want OpenDevin to work with. @@ -151,6 +149,9 @@ OpenDevin can work with any LLM backend. For a full list of the LM providers and models available, please consult the [litellm documentation](https://docs.litellm.ai/docs/providers). +The `LLM_MODEL` environment variable controls which model is used in programmatic interactions, +but choosing a model in the OpenDevin UI will override this setting. + The following environment variables might be necessary for some LLMs: * `LLM_API_KEY` * `LLM_BASE_URL` diff --git a/config.toml.template b/config.toml.template deleted file mode 100644 index 06310520e4..0000000000 --- a/config.toml.template +++ /dev/null @@ -1,4 +0,0 @@ -# This is a template. Run `cp config.toml.template config.toml` to use it. - -LLM_API_KEY="" -WORKSPACE_BASE="./workspace" diff --git a/container/Dockerfile b/container/Dockerfile deleted file mode 100644 index b0a7a762ae..0000000000 --- a/container/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -FROM node:21.7.2-bookworm-slim as frontend-builder - -WORKDIR /app - -COPY ./frontend/package.json frontend/package-lock.json ./ -RUN npm install - -COPY ./frontend ./ -RUN npm run make-i18n && npm run build - -FROM python:3.12-slim as runtime - -WORKDIR /app -ENV PYTHONPATH '/app' -ENV RUN_AS_DEVIN=false -ENV USE_HOST_NETWORK=false -ENV SSH_HOSTNAME=host.docker.internal -ENV WORKSPACE_BASE=/opt/workspace_base -RUN mkdir -p $WORKSPACE_BASE - -RUN apt-get update -y \ - && apt-get install -y curl make git build-essential \ - && python3 -m pip install poetry --break-system-packages - -COPY ./pyproject.toml ./poetry.lock ./ -RUN poetry install --without evaluation - -COPY ./opendevin ./opendevin -COPY ./agenthub ./agenthub -RUN poetry run python opendevin/download.py # No-op to download assets - -COPY --from=frontend-builder /app/dist ./frontend/dist - -CMD ["poetry", "run", "uvicorn", "opendevin.server.listen:app", "--host", "0.0.0.0", "--port", "3000"] diff --git a/container/Makefile b/container/Makefile deleted file mode 100644 index 0dd616d2de..0000000000 --- a/container/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -DOCKER_BUILD_REGISTRY=ghcr.io -DOCKER_BUILD_ORG=opendevin -DOCKER_BUILD_REPO=opendevin -DOCKER_BUILD_TAG=v0.2 -FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(DOCKER_BUILD_TAG) - -LATEST_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):latest - -MAJOR_VERSION=$(shell echo $(DOCKER_BUILD_TAG) | cut -d. -f1) -MAJOR_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(MAJOR_VERSION) -MINOR_VERSION=$(shell echo $(DOCKER_BUILD_TAG) | cut -d. -f1,2) -MINOR_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(MINOR_VERSION) - -# normally, for local build testing or development. use cross platform build for sharing images to others. -build: - docker build -f Dockerfile -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} .. - -push: - docker push ${FULL_IMAGE} ${LATEST_FULL_IMAGE} - -test: - docker buildx build --platform linux/amd64 \ - -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} --load -f Dockerfile .. - -# cross platform build, you may need to manually stop the buildx(buildkit) container -all: - docker buildx build --platform linux/amd64,linux/arm64 \ - -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} -t ${MINOR_FULL_IMAGE} --push -f Dockerfile .. - -get-full-image: - @echo ${FULL_IMAGE} diff --git a/containers/app/Dockerfile b/containers/app/Dockerfile new file mode 100644 index 0000000000..6ee55b375e --- /dev/null +++ b/containers/app/Dockerfile @@ -0,0 +1,54 @@ +FROM node:21.7.2-bookworm-slim as frontend-builder + +WORKDIR /app + +COPY ./frontend/package.json frontend/package-lock.json ./ +RUN npm install + +COPY ./frontend ./ +RUN npm run make-i18n && npm run build + +FROM python:3.12-slim as backend-builder + +WORKDIR /app +ENV PYTHONPATH '/app' + +ENV POETRY_NO_INTERACTION=1 \ + POETRY_VIRTUALENVS_IN_PROJECT=1 \ + POETRY_VIRTUALENVS_CREATE=1 \ + POETRY_CACHE_DIR=/tmp/poetry_cache + +RUN apt-get update -y \ + && apt-get install -y curl make git build-essential \ + && python3 -m pip install poetry==1.8.2 --break-system-packages + +COPY ./pyproject.toml ./poetry.lock ./ +RUN touch README.md +RUN poetry install --without evaluation --no-root && rm -rf $POETRY_CACHE_DIR + +FROM python:3.12-slim as runtime + +WORKDIR /app + +ENV RUN_AS_DEVIN=false +ENV USE_HOST_NETWORK=false +ENV SSH_HOSTNAME=host.docker.internal +ENV WORKSPACE_BASE=/opt/workspace_base +RUN mkdir -p $WORKSPACE_BASE + +RUN apt-get update -y \ + && apt-get install -y curl ssh + +ENV VIRTUAL_ENV=/app/.venv \ + PATH="/app/.venv/bin:$PATH" \ + PYTHONPATH='/app' + +COPY --from=backend-builder ${VIRTUAL_ENV} ${VIRTUAL_ENV} + +COPY ./opendevin ./opendevin +COPY ./agenthub ./agenthub +RUN python opendevin/download.py # No-op to download assets + +COPY --from=frontend-builder /app/dist ./frontend/dist + +CMD ["uvicorn", "opendevin.server.listen:app", "--host", "0.0.0.0", "--port", "3000"] diff --git a/containers/app/config.sh b/containers/app/config.sh new file mode 100644 index 0000000000..2082324826 --- /dev/null +++ b/containers/app/config.sh @@ -0,0 +1,2 @@ +DOCKER_REPOSITORY=ghcr.io/opendevin/opendevin +DOCKER_BASE_DIR="." diff --git a/containers/build.sh b/containers/build.sh new file mode 100755 index 0000000000..b20395e4c7 --- /dev/null +++ b/containers/build.sh @@ -0,0 +1,48 @@ +#!/bin/bash +set -eo pipefail + +image_name=$1 +push=0 +if [[ $2 == "--push" ]]; then + push=1 +fi + +echo -e "Building: $image_name" +tags=(latest) +if [[ -n $GITHUB_REF_NAME ]]; then + # check if ref name is a version number + if [[ $GITHUB_REF_NAME =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then + major_version=$(echo $GITHUB_REF_NAME | cut -d. -f1) + minor_version=$(echo $GITHUB_REF_NAME | cut -d. -f1,2) + tags+=($major_version $minor_version) + fi + sanitized=$(echo $GITHUB_REF_NAME | sed 's/[^a-zA-Z0-9.-]\+/-/g') + tags+=($sanitized) +fi +echo "Tags: ${tags[@]}" + +dir=./containers/$image_name +if [ ! -f $dir/Dockerfile ]; then + echo "No Dockerfile found" + exit 1 +fi +if [ ! -f $dir/config.sh ]; then + echo "No config.sh found for Dockerfile" + exit 1 +fi +source $dir/config.sh +echo "Repo: $DOCKER_REPOSITORY" +echo "Base dir: $DOCKER_BASE_DIR" +#docker pull $DOCKER_REPOSITORY:main || true # try to get any cached layers +args="" +for tag in ${tags[@]}; do + args+=" -t $DOCKER_REPOSITORY:$tag" +done +if [[ $push -eq 1 ]]; then + args+=" --push" +fi + +docker buildx build \ + $args \ + --platform linux/amd64,linux/arm64 \ + -f $dir/Dockerfile $DOCKER_BASE_DIR diff --git a/evaluation/SWE-bench/Dockerfile b/containers/evaluation/Dockerfile similarity index 97% rename from evaluation/SWE-bench/Dockerfile rename to containers/evaluation/Dockerfile index 8a235b08ca..9101eca6d3 100644 --- a/evaluation/SWE-bench/Dockerfile +++ b/containers/evaluation/Dockerfile @@ -31,7 +31,7 @@ RUN conda env create -f environment.yml # Add commands COPY ./commands.sh . -RUN source commands.sh +RUN . ./commands.sh # Some missing packages RUN pip install datasets python-dotenv gitpython diff --git a/containers/evaluation/config.sh b/containers/evaluation/config.sh new file mode 100644 index 0000000000..421fe371a5 --- /dev/null +++ b/containers/evaluation/config.sh @@ -0,0 +1,2 @@ +DOCKER_REPOSITORY=ghcr.io/opendevin/eval-swe-bench +DOCKER_BASE_DIR=evaluation/SWE-bench diff --git a/opendevin/sandbox/Dockerfile b/containers/sandbox/Dockerfile similarity index 100% rename from opendevin/sandbox/Dockerfile rename to containers/sandbox/Dockerfile diff --git a/containers/sandbox/config.sh b/containers/sandbox/config.sh new file mode 100644 index 0000000000..2a14a21e29 --- /dev/null +++ b/containers/sandbox/config.sh @@ -0,0 +1,2 @@ +DOCKER_REPOSITORY=ghcr.io/opendevin/sandbox +DOCKER_BASE_DIR="." diff --git a/docs/documentation/AZURE_LLM_GUIDE.md b/docs/documentation/AZURE_LLM_GUIDE.md index e762b9ed4d..5b0fb127d4 100644 --- a/docs/documentation/AZURE_LLM_GUIDE.md +++ b/docs/documentation/AZURE_LLM_GUIDE.md @@ -6,37 +6,26 @@ OpenDevin uses LiteLLM for completion calls. You can find their documentation on ## azure openai configs -During installation of OpenDevin, you can set up the following parameters: +When running the OpenDevin Docker image, you'll need to set the following environment variables using `-e`: ``` LLM_BASE_URL="" # e.g. "https://openai-gpt-4-test-v-1.openai.azure.com/" LLM_API_KEY="" LLM_MODEL="azure/" +AZURE_API_VERSION = "" # e.g. "2024-02-15-preview" ``` -They will be saved in the `config.toml` file in the `OpenDevin` directory. You can add or edit them manually in the file after installation. - -In addition, you need to set the following environment variable, which is used by the LiteLLM library to make requests to the Azure API: - -`AZURE_API_VERSION = "" # e.g. "2024-02-15-preview"` - -You can set the environment variable in your terminal or in an `.env` file in the `OpenDevin` directory. - -Alternatively, you can add all these in .env, however in that case make sure to check the LiteLLM documentation for the correct variables. - # 2. Embeddings OpenDevin uses llama-index for embeddings. You can find their documentation on Azure [here](https://docs.llamaindex.ai/en/stable/api_reference/embeddings/azure_openai/) ## azure openai configs -The model used for Azure OpenAI embeddings is "text-embedding-ada-002". You need the correct deployment name for this model in your Azure account. - -During installation of OpenDevin, you can set the following parameters used for embeddings, when prompted by the makefile: +The model used for Azure OpenAI embeddings is "text-embedding-ada-002". +You need the correct deployment name for this model in your Azure account. +When running OpenDevin in Docker, set the following environment variables using `-e`: ``` LLM_EMBEDDING_MODEL="azureopenai" DEPLOYMENT_NAME = "" # e.g. "TextEmbedding..." LLM_API_VERSION = "" # e.g. "2024-02-15-preview" ``` - -You can re-run ```make setup-config``` anytime, or add or edit them manually in the file afterwards. diff --git a/docs/documentation/LOCAL_LLM_GUIDE.md b/docs/documentation/LOCAL_LLM_GUIDE.md index 14b9818821..c70526327a 100644 --- a/docs/documentation/LOCAL_LLM_GUIDE.md +++ b/docs/documentation/LOCAL_LLM_GUIDE.md @@ -7,7 +7,7 @@ Linux: ``` curl -fsSL https://ollama.com/install.sh | sh ``` -Windows or macOS: +Windows or macOS: - Download from [here](https://ollama.com/download/) @@ -60,30 +60,10 @@ sudo systemctl stop ollama For more info go [here](https://github.com/ollama/ollama/blob/main/docs/faq.md) -## 3. Follow the default installation of OpenDevin: -``` -git clone git@github.com:OpenDevin/OpenDevin.git -``` -or -``` -git clone git@github.com:/OpenDevin.git -``` +## 3. Start OpenDevin -then -``` -cd OpenDevin -``` - -## 4. Run setup commands: -``` -make build -make setup-config -``` - -## 5. Modify config file: - -- After running `make setup-config` you will see a generated file `OpenDevin/config.toml`. -- Open this file and modify it to your needs based on this template: +Use the instructions in [README.md](/README.md) to start OpenDevin using Docker. +When running `docker run`, add the following environment variables using `-e`: ``` LLM_API_KEY="ollama" @@ -92,34 +72,25 @@ LLM_EMBEDDING_MODEL="local" LLM_BASE_URL="http://localhost:" WORKSPACE_DIR="./workspace" ``` -Notes: -- The API key should be set to `"ollama"` -- The base url needs to be `localhost` +Notes: +- The API key should be set to `"ollama"` +- The base url needs to be `localhost` - By default ollama port is `11434` unless you set it - `model_name` needs to be the entire model name - Example: `LLM_MODEL="ollama/llama2:13b-chat-q4_K_M"` -## 6. Start OpenDevin: - -At this point everything should be set up and working properly. -1. Start by running the ollama server using the method outlined above -2. Run `make build` in your terminal `~/OpenDevin/` -3. Run `make run` in your terminal -4. If that fails try running the server and front end in sepparate terminals: - - In the first terminal `make start-backend` - - In the second terminal `make start-frontend` -5. you should now be able to connect to `http://localhost:3001/` with your local model running! +You should now be able to connect to `http://localhost:3001/` with your local model running! ## Additional Notes for WSL2 Users: -1. If you encounter the following error during setup: `Exception: Failed to create opendevin user in sandbox: b'useradd: UID 0 is not unique\n'` -You can resolve it by running: +1. If you encounter the following error during setup: `Exception: Failed to create opendevin user in sandbox: b'useradd: UID 0 is not unique\n'` +You can resolve it by running: ``` export SANDBOX_USER_ID=1000 ``` -2. If you face issues running Poetry even after installing it during the build process, you may need to add its binary path to your environment: +2. If you face issues running Poetry even after installing it during the build process, you may need to add its binary path to your environment: ``` export PATH="$HOME/.local/bin:$PATH" ``` @@ -134,4 +105,4 @@ You can resolve it by running: ``` - Save the `.wslconfig` file. - Restart WSL2 completely by exiting any running WSL2 instances and executing the command `wsl --shutdown` in your command prompt or terminal. - - After restarting WSL, attempt to execute `make run` again. The networking issue should be resolved. \ No newline at end of file + - After restarting WSL, attempt to execute `make run` again. The networking issue should be resolved. diff --git a/evaluation/SWE-bench/Makefile b/evaluation/SWE-bench/Makefile deleted file mode 100644 index 1d72f8f46f..0000000000 --- a/evaluation/SWE-bench/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -DOCKER_BUILD_REGISTRY=ghcr.io -DOCKER_BUILD_ORG=opendevin -DOCKER_BUILD_REPO=eval-swe-bench -DOCKER_BUILD_TAG=v0.1.0 -FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(DOCKER_BUILD_TAG) - -LATEST_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):latest - -MAJOR_VERSION=$(shell echo $(DOCKER_BUILD_TAG) | cut -d. -f1) -MAJOR_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(MAJOR_VERSION) -MINOR_VERSION=$(shell echo $(DOCKER_BUILD_TAG) | cut -d. -f1,2) -MINOR_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(MINOR_VERSION) - -# normally, for local build testing or development. use cross platform build for sharing images to others. -build: - docker build -f Dockerfile -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} . - -push: - docker push ${FULL_IMAGE} ${LATEST_FULL_IMAGE} - -test: - docker buildx build --platform linux/amd64 \ - -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} --load -f Dockerfile . - -# cross platform build, you may need to manually stop the buildx(buildkit) container -all: - docker buildx build --platform linux/amd64,linux/arm64 \ - -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} -t ${MINOR_FULL_IMAGE} --push -f Dockerfile . - -get-full-image: - @echo ${FULL_IMAGE} diff --git a/opendevin/sandbox/Makefile b/opendevin/sandbox/Makefile deleted file mode 100644 index e5b0a2d487..0000000000 --- a/opendevin/sandbox/Makefile +++ /dev/null @@ -1,31 +0,0 @@ -DOCKER_BUILD_REGISTRY=ghcr.io -DOCKER_BUILD_ORG=opendevin -DOCKER_BUILD_REPO=sandbox -DOCKER_BUILD_TAG=v0.2 -FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(DOCKER_BUILD_TAG) - -LATEST_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):latest - -MAJOR_VERSION=$(shell echo $(DOCKER_BUILD_TAG) | cut -d. -f1) -MAJOR_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(MAJOR_VERSION) -MINOR_VERSION=$(shell echo $(DOCKER_BUILD_TAG) | cut -d. -f1,2) -MINOR_FULL_IMAGE=$(DOCKER_BUILD_REGISTRY)/$(DOCKER_BUILD_ORG)/$(DOCKER_BUILD_REPO):$(MINOR_VERSION) - -# normally, for local build testing or development. use cross platform build for sharing images to others. -build: - docker build -f Dockerfile -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} . - -push: - docker push ${FULL_IMAGE} ${LATEST_FULL_IMAGE} - -test: - docker buildx build --platform linux/amd64 \ - -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} --load -f Dockerfile . - -# cross platform build, you may need to manually stop the buildx(buildkit) container -all: - docker buildx build --platform linux/amd64,linux/arm64 \ - -t ${FULL_IMAGE} -t ${LATEST_FULL_IMAGE} -t ${MINOR_FULL_IMAGE} --push -f Dockerfile . - -get-full-image: - @echo ${FULL_IMAGE}