diff --git a/.gitea/workflows/t.yaml b/.gitea/workflows/t.yaml
index 27f3f31..65d0c1a 100644
--- a/.gitea/workflows/t.yaml
+++ b/.gitea/workflows/t.yaml
@@ -40,10 +40,12 @@ jobs:
run: make down
working-directory: ./autogits/integration
- name: Start images
- run: make up
+ run: |
+ make up
+ make wait_healthy
working-directory: ./autogits/integration
- name: Run tests
- run: py.test-3.11 -v tests
+ run: make pytest
working-directory: ./autogits/integration
- name: Make sure the pod is down
if: always()
diff --git a/common/config.go b/common/config.go
index 7a4a53f..6492333 100644
--- a/common/config.go
+++ b/common/config.go
@@ -23,7 +23,6 @@ import (
"errors"
"fmt"
"io"
- "log"
"os"
"slices"
"strings"
@@ -205,16 +204,21 @@ func ReadWorkflowConfig(gitea GiteaFileContentAndRepoFetcher, git_project string
func ResolveWorkflowConfigs(gitea GiteaFileContentAndRepoFetcher, config *ConfigFile) (AutogitConfigs, error) {
configs := make([]*AutogitConfig, 0, len(config.GitProjectNames))
+ var errs []error
for _, git_project := range config.GitProjectNames {
c, err := ReadWorkflowConfig(gitea, git_project)
if err != nil {
// can't sync, so ignore for now
- log.Println(err)
+ errs = append(errs, err)
} else {
configs = append(configs, c)
}
}
+ if len(errs) > 0 {
+ return configs, errors.Join(errs...)
+ }
+
return configs, nil
}
diff --git a/group-review/main.go b/group-review/main.go
index 07d8f3b..9156d23 100644
--- a/group-review/main.go
+++ b/group-review/main.go
@@ -327,6 +327,7 @@ func main() {
interval := flag.Int64("interval", 10, "Notification polling interval in minutes (min 1 min)")
configFile := flag.String("config", "", "PrjGit listing config file")
logging := flag.String("logging", "info", "Logging level: [none, error, info, debug]")
+ exitOnConfigError := flag.Bool("exit-on-config-error", false, "Exit if any repository in configuration cannot be resolved")
flag.BoolVar(&common.IsDryRun, "dry", false, "Dry run, no effect. For debugging")
flag.Parse()
@@ -382,8 +383,10 @@ func main() {
giteaTransport := common.AllocateGiteaTransport(*giteaUrl)
configs, err := common.ResolveWorkflowConfigs(giteaTransport, configData)
if err != nil {
- common.LogError("Cannot parse workflow configs:", err)
- return
+ common.LogError("Failed to resolve some configuration repositories:", err)
+ if *exitOnConfigError {
+ return
+ }
}
reviewer, err := giteaTransport.GetCurrentUser()
diff --git a/integration/Dockerfile b/integration/Dockerfile.tester
similarity index 71%
rename from integration/Dockerfile
rename to integration/Dockerfile.tester
index 19552f6..f1d631d 100644
--- a/integration/Dockerfile
+++ b/integration/Dockerfile.tester
@@ -4,7 +4,7 @@ ENV container=podman
ENV LANG=en_US.UTF-8
-RUN zypper -vvvn install podman podman-compose vim make python3-pytest python3-requests python3-pytest-dependency
+RUN zypper -vvvn install podman podman-compose vim make python3-pytest python3-requests python3-pytest-dependency python3-pytest-httpserver
COPY . /opt/project/
diff --git a/integration/Makefile b/integration/Makefile
index f2d381f..5ce9bce 100644
--- a/integration/Makefile
+++ b/integration/Makefile
@@ -1,51 +1,19 @@
-# We want to be able to test in two **modes**:
-# A. bots are used from official packages as defined in */Dockerfile.package
-# B. bots are just picked up from binaries that are placed in corresponding parent directory.
-
# The topology is defined in podman-compose file and can be spawned in two ways:
-# 1. Privileged container (needs no additional dependancies)
-# 2. podman-compose on a local machine (needs dependencies as defined in the Dockerfile)
-
+# 1. podman-compose on a local machine (needs dependencies as defined in the Dockerfile)
+# 2. pytest in a dedicated container (recommended)
# Typical workflow:
-# A1: - run 'make test_package'
-# B1: - run 'make test_local' (make sure that the go binaries in parent folder are built)
-# A2:
-# 1. 'make build_package' - prepares images (recommended, otherwise there might be surprises if image fails to build during `make up`)
-# 2. 'make up' - spawns podman-compose
-# 3. 'pytest -v tests/*' - run tests
-# 4. 'make down' - once the containers are not needed
-# B2: (make sure the go binaries in the parent folder are built)
-# 1. 'make build_local' - prepared images (recommended, otherwise there might be surprises if image fails to build during `make up`)
-# 2. 'make up' - spawns podman-compose
-# 3. 'pytest -v tests/*' - run tests
-# 4. 'make down' - once the containers are not needed
-
+# 1. 'make build' - prepares images
+# 2. 'make up' - spawns podman-compose
+# 3. 'make pytest' - run tests inside the tester container
+# 4. 'make down' - once the containers are not needed
+#
+# OR just run 'make test' to do it all at once.
AUTO_DETECT_MODE := $(shell if test -e ../workflow-pr/workflow-pr; then echo .local; else echo .package; fi)
-# try to detect mode B1, otherwise mode A1
-test: GIWTF_IMAGE_SUFFIX=$(AUTO_DETECT_MODE)
-test: build_container test_container
-
-# mode A1
-test_package: GIWTF_IMAGE_SUFFIX=.package
-test_package: build_container test_container
-
-# mode B1
-test_local: GIWTF_IMAGE_SUFFIX=.local
-test_local: build_container test_container
-
-MODULES := gitea-events-rabbitmq-publisher obs-staging-bot workflow-pr
-
-# Prepare topology 1
-build_container:
- podman build ../ -f integration/Dockerfile -t autogits_integration
-
-# Run tests in topology 1
-test_container:
- podman run --rm --privileged -t -e GIWTF_IMAGE_SUFFIX=$(GIWTF_IMAGE_SUFFIX) autogits_integration /usr/bin/bash -c "make build && make up && sleep 25 && pytest -v tests/*"
-
+# Default test target
+test: test_b
build_local: AUTO_DETECT_MODE=.local
build_local: build
@@ -53,16 +21,66 @@ build_local: build
build_package: AUTO_DETECT_MODE=.package
build_package: build
-# parse all service images from podman-compose and build them (topology 2)
+# parse all service images from podman-compose and build them
+# mode B with pytest in container
+test_b: AUTO_DETECT_MODE=.local
+test_b: build up wait_healthy pytest
+
+# Complete cycle for CI
+test-ci: test_b down
+
+wait_healthy:
+ @echo "Waiting for services to be healthy..."
+ @echo "Waiting for gitea (max 2m)..."
+ @start_time=$$(date +%s); \
+ until podman exec gitea-test curl -f -s http://localhost:3000/api/v1/version >/dev/null 2>&1; do \
+ current_time=$$(date +%s); \
+ elapsed=$$((current_time - start_time)); \
+ if [ $$elapsed -gt 120 ]; then \
+ echo "ERROR: Gitea failed to start within 2 minutes."; \
+ echo "--- Troubleshooting Info ---"; \
+ echo "Diagnostics output (curl):"; \
+ podman exec gitea-test curl -v http://localhost:3000/api/v1/version || true; \
+ echo "--- Container Logs ---"; \
+ podman logs gitea-test --tail 20; \
+ echo "--- Container Status ---"; \
+ podman inspect gitea-test --format '{{.State.Status}}'; \
+ exit 1; \
+ fi; \
+ sleep 2; \
+ done
+ @echo "Waiting for rabbitmq (max 2m)..."
+ @start_time=$$(date +%s); \
+ until podman exec rabbitmq-test rabbitmq-diagnostics check_running -q >/dev/null 2>&1; do \
+ current_time=$$(date +%s); \
+ elapsed=$$((current_time - start_time)); \
+ if [ $$elapsed -gt 120 ]; then \
+ echo "ERROR: RabbitMQ failed to start within 2 minutes."; \
+ echo "--- Troubleshooting Info ---"; \
+ echo "Diagnostics output:"; \
+ podman exec rabbitmq-test rabbitmq-diagnostics check_running || true; \
+ echo "--- Container Logs ---"; \
+ podman logs rabbitmq-test --tail 20; \
+ echo "--- Container Status ---"; \
+ podman inspect rabbitmq-test --format '{{.State.Status}}'; \
+ exit 1; \
+ fi; \
+ sleep 2; \
+ done
+ @echo "All services are healthy!"
+
+pytest:
+ podman-compose exec tester pytest -v tests/*
+
build:
podman pull docker.io/library/rabbitmq:3.13.7-management
for i in $$(grep -A 1000 services: podman-compose.yml | grep -oE '^ [^: ]+'); do GIWTF_IMAGE_SUFFIX=$(AUTO_DETECT_MODE) podman-compose build $$i || exit 1; done
-# this will spawn prebuilt containers (topology 2)
+# this will spawn prebuilt containers
up:
podman-compose up -d
-# tear down (topology 2)
+# tear down
down:
podman-compose down
@@ -73,4 +91,3 @@ up-bots-package:
# mode B
up-bots-local:
GIWTF_IMAGE_SUFFIX=.local podman-compose up -d
-
diff --git a/integration/Makefile.md b/integration/Makefile.md
new file mode 100644
index 0000000..b4d1644
--- /dev/null
+++ b/integration/Makefile.md
@@ -0,0 +1,52 @@
+# Makefile Targets
+
+This document describes the targets available in the `integration/Makefile`.
+
+## Primary Workflow
+
+### `test` (or `test_b`)
+- **Action**: Performs a complete build-and-test cycle.
+- **Steps**:
+ 1. `build`: Prepares all container images.
+ 2. `up`: Starts all services via `podman-compose`.
+ 3. `wait_healthy`: Polls Gitea and RabbitMQ until they are ready.
+ 4. `pytest`: Executes the test suite inside the `tester` container.
+- **Outcome**: The environment remains active for fast iteration.
+
+### `test-ci`
+- **Action**: Performs the full `test` cycle followed by teardown.
+- **Steps**: `test_b` -> `down`
+- **Purpose**: Ideal for CI environments where a clean state is required after testing.
+
+---
+
+## Individual Targets
+
+### `build`
+- **Action**: Pulls external images (RabbitMQ) and builds all local service images defined in `podman-compose.yml`.
+- **Note**: Use `build_local` or `build_package` to specify bot source mode.
+
+### `up`
+- **Action**: Starts the container topology in detached mode.
+
+### `wait_healthy`
+- **Action**: Polls the health status of `gitea-test` and `rabbitmq-test` containers.
+- **Purpose**: Ensures infrastructure is stable before test execution.
+
+### `pytest`
+- **Action**: Runs `pytest -v tests/*` inside the running `tester` container.
+- **Requirement**: The environment must already be started via `up`.
+
+### `down`
+- **Action**: Stops and removes all containers and networks defined in the compose file.
+
+---
+
+## Configuration Modes
+
+The Makefile supports two deployment modes via `GIWTF_IMAGE_SUFFIX`:
+
+- **.local** (Default): Uses binaries built from the local source (requires `make build` in project root).
+- **.package**: Uses official pre-built packages for the bots.
+
+Targets like `build_local`, `build_package`, `up-bots-local`, and `up-bots-package` allow for explicit mode selection.
diff --git a/integration/Makefile.txt b/integration/Makefile.txt
deleted file mode 100644
index 13bfb44..0000000
--- a/integration/Makefile.txt
+++ /dev/null
@@ -1,57 +0,0 @@
-+-------------------------------------------------------------------------------------------------+
-| Makefile Targets |
-+-------------------------------------------------------------------------------------------------+
-| |
-| [Default Test Workflow] |
-| test (Auto-detects mode: .local or .package) |
-| └─> build_container |
-| └─> test_container |
-| |
-| [Specific Test Workflows - Topology 1: Privileged Container] |
-| test_package (Mode A1: Bots from official packages) |
-| └─> build_container |
-| └─> test_container |
-| |
-| test_local (Mode B1: Bots from local binaries) |
-| └─> build_container |
-| └─> test_container |
-| |
-| build_container |
-| - Action: Builds the `autogits_integration` privileged container image. |
-| - Purpose: Prepares an environment for running tests within a single container. |
-| |
-| test_container |
-| - Action: Runs `autogits_integration` container, executes `make build`, `make up`, and |
-| `pytest -v tests/*` inside it. |
-| - Purpose: Executes the full test suite in Topology 1 (privileged container). |
-| |
-| [Build & Orchestration Workflows - Topology 2: podman-compose] |
-| |
-| build_package (Mode A: Builds service images from official packages) |
-| └─> build |
-| |
-| build_local (Mode B: Builds service images from local binaries) |
-| └─> build |
-| |
-| build |
-| - Action: Pulls `rabbitmq` image and iterates through `podman-compose.yml` services |
-| to build each one. |
-| - Purpose: Prepares all necessary service images for Topology 2 deployment. |
-| |
-| up |
-| - Action: Starts all services defined in `podman-compose.yml` in detached mode. |
-| - Purpose: Deploys the application topology (containers) for testing or development. |
-| |
-| down |
-| - Action: Stops and removes all services started by `up`. |
-| - Purpose: Cleans up the deployed application topology. |
-| |
-| up-bots-package (Mode A: Spawns Topology 2 with official package bots) |
-| - Action: Calls `podman-compose up -d` with `GIWTF_IMAGE_SUFFIX=.package`. |
-| - Purpose: Specifically brings up the environment using official package bots. |
-| |
-| up-bots-local (Mode B: Spawns Topology 2 with local binaries) |
-| - Action: Calls `podman-compose up -d` with `GIWTF_IMAGE_SUFFIX=.local`. |
-| - Purpose: Specifically brings up the environment using local binaries. |
-| |
-+-------------------------------------------------------------------------------------------------+
diff --git a/integration/mock-obs/Dockerfile b/integration/mock-obs/Dockerfile
deleted file mode 100644
index 5e9416a..0000000
--- a/integration/mock-obs/Dockerfile
+++ /dev/null
@@ -1,14 +0,0 @@
-# Use a base Python image
-FROM registry.suse.com/bci/python:3.11
-
-# Set the working directory
-WORKDIR /app
-
-# Copy the server script
-COPY server.py .
-
-# Expose the port the server will run on
-EXPOSE 8080
-
-# Command to run the server
-CMD ["python3", "-u", "server.py"]
diff --git a/integration/mock-obs/responses/GET_source_openSUSE:Leap:16.0:PullRequest__meta b/integration/mock-obs/responses/GET_source_openSUSE:Leap:16.0:PullRequest__meta
deleted file mode 100644
index 1dfee8c..0000000
--- a/integration/mock-obs/responses/GET_source_openSUSE:Leap:16.0:PullRequest__meta
+++ /dev/null
@@ -1,18 +0,0 @@
-
- Leap 16.0 PullRequest area
- Base project to define the pull request builds
-
-
-
-
-
-
-
-
- x86_64
- i586
- aarch64
- ppc64le
- s390x
-
-
diff --git a/integration/mock-obs/responses/GET_source_openSUSE:Leap:16.0__meta b/integration/mock-obs/responses/GET_source_openSUSE:Leap:16.0__meta
deleted file mode 100644
index dbbf926..0000000
--- a/integration/mock-obs/responses/GET_source_openSUSE:Leap:16.0__meta
+++ /dev/null
@@ -1,59 +0,0 @@
-
- openSUSE Leap 16.0 based on SLFO
- Leap 16.0 based on SLES 16.0 (specifically SLFO:1.2)
-
- http://gitea-test:3000/myproducts/mySLFO#staging-main
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- local
- i586
- x86_64
- aarch64
- ppc64le
- s390x
-
-
-
-
-
-
-
-
- local
- i586
- x86_64
- aarch64
- ppc64le
- s390x
-
-
- armv7l
-
-
-
-
-
-
- i586
- x86_64
- aarch64
- ppc64le
- s390x
-
-
diff --git a/integration/mock-obs/responses/PUT_source_openSUSE:Leap:16.0:PullRequest:2__meta b/integration/mock-obs/responses/PUT_source_openSUSE:Leap:16.0:PullRequest:2__meta
deleted file mode 100644
index 473a0f4..0000000
diff --git a/integration/mock-obs/server.py b/integration/mock-obs/server.py
deleted file mode 100644
index 316b276..0000000
--- a/integration/mock-obs/server.py
+++ /dev/null
@@ -1,140 +0,0 @@
-import http.server
-import socketserver
-import os
-import logging
-import signal
-import sys
-import threading
-import fnmatch
-
-PORT = 8080
-RESPONSE_DIR = "/app/responses"
-STATE_DIR = "/tmp/mock_obs_state"
-
-class MockOBSHandler(http.server.SimpleHTTPRequestHandler):
- def do_GET(self):
- logging.info(f"GET request for: {self.path}")
- path_without_query = self.path.split('?')[0]
-
- # Check for state stored by a PUT request first
- sanitized_put_path = 'PUT' + path_without_query.replace('/', '_')
- state_file_path = os.path.join(STATE_DIR, sanitized_put_path)
- if os.path.exists(state_file_path):
- logging.info(f"Found stored PUT state for {self.path} at {state_file_path}")
- self.send_response(200)
- self.send_header("Content-type", "application/xml")
- file_size = os.path.getsize(state_file_path)
- self.send_header("Content-Length", str(file_size))
- self.end_headers()
- with open(state_file_path, 'rb') as f:
- self.wfile.write(f.read())
- return
-
- # If no PUT state file, fall back to the glob/exact match logic
- self.handle_request('GET')
-
- def do_PUT(self):
- logging.info(f"PUT request for: {self.path}")
- logging.info(f"Headers: {self.headers}")
- path_without_query = self.path.split('?')[0]
-
- body = b''
- if self.headers.get('Transfer-Encoding', '').lower() == 'chunked':
- logging.info("Chunked transfer encoding detected")
- while True:
- line = self.rfile.readline().strip()
- if not line:
- break
- chunk_length = int(line, 16)
- if chunk_length == 0:
- self.rfile.readline()
- break
- body += self.rfile.read(chunk_length)
- self.rfile.read(2) # Read the trailing CRLF
- else:
- content_length = int(self.headers.get('Content-Length', 0))
- body = self.rfile.read(content_length)
-
- logging.info(f"Body: {body.decode('utf-8')}")
- sanitized_path = 'PUT' + path_without_query.replace('/', '_')
- state_file_path = os.path.join(STATE_DIR, sanitized_path)
-
- logging.info(f"Saving state for {self.path} to {state_file_path}")
- os.makedirs(os.path.dirname(state_file_path), exist_ok=True)
- with open(state_file_path, 'wb') as f:
- f.write(body)
-
- self.send_response(200)
- self.send_header("Content-type", "text/plain")
- response_body = b"OK"
- self.send_header("Content-Length", str(len(response_body)))
- self.end_headers()
- self.wfile.write(response_body)
-
- def do_POST(self):
- logging.info(f"POST request for: {self.path}")
- self.handle_request('POST')
-
- def do_DELETE(self):
- logging.info(f"DELETE request for: {self.path}")
- self.handle_request('DELETE')
-
- def handle_request(self, method):
- path_without_query = self.path.split('?')[0]
- sanitized_request_path = method + path_without_query.replace('/', '_')
- logging.info(f"Handling request, looking for match for: {sanitized_request_path}")
-
- response_file = None
- # Check for glob match first
- if os.path.exists(RESPONSE_DIR):
- for filename in os.listdir(RESPONSE_DIR):
- if fnmatch.fnmatch(sanitized_request_path, filename):
- response_file = os.path.join(RESPONSE_DIR, filename)
- logging.info(f"Found matching response file (glob): {response_file}")
- break
-
- # Fallback to exact match if no glob match
- if response_file is None:
- exact_file = os.path.join(RESPONSE_DIR, sanitized_request_path)
- if os.path.exists(exact_file):
- response_file = exact_file
- logging.info(f"Found matching response file (exact): {response_file}")
-
- if response_file:
- logging.info(f"Serving content from {response_file}")
- self.send_response(200)
- self.send_header("Content-type", "application/xml")
- file_size = os.path.getsize(response_file)
- self.send_header("Content-Length", str(file_size))
- self.end_headers()
- with open(response_file, 'rb') as f:
- self.wfile.write(f.read())
- else:
- logging.info(f"Response file not found for {sanitized_request_path}. Sending 404.")
- self.send_response(404)
- self.send_header("Content-type", "text/plain")
- body = f"Mock response not found for {sanitized_request_path}".encode('utf-8')
- self.send_header("Content-Length", str(len(body)))
- self.end_headers()
- self.wfile.write(body)
-
-if __name__ == "__main__":
- logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
-
- if not os.path.exists(STATE_DIR):
- logging.info(f"Creating state directory: {STATE_DIR}")
- os.makedirs(STATE_DIR)
- if not os.path.exists(RESPONSE_DIR):
- os.makedirs(RESPONSE_DIR)
-
- with socketserver.TCPServer(("", PORT), MockOBSHandler) as httpd:
- logging.info(f"Serving mock OBS API on port {PORT}")
-
- def graceful_shutdown(sig, frame):
- logging.info("Received SIGTERM, shutting down gracefully...")
- threading.Thread(target=httpd.shutdown).start()
-
- signal.signal(signal.SIGTERM, graceful_shutdown)
-
- httpd.serve_forever()
- logging.info("Server has shut down.")
\ No newline at end of file
diff --git a/integration/podman-compose.md b/integration/podman-compose.md
new file mode 100644
index 0000000..6ee6b0f
--- /dev/null
+++ b/integration/podman-compose.md
@@ -0,0 +1,64 @@
+# Podman-Compose Services Architecture
+
+This document describes the services defined in `podman-compose.yml` used for integration testing.
+
+## Network
+- **gitea-network**: A bridge network that enables communication between all services.
+
+## Services
+
+### gitea
+- **Description**: Self-hosted Git service, serving as the central hub for repositories.
+- **Container Name**: `gitea-test`
+- **Image**: Built from `./gitea/Dockerfile`
+- **Ports**: `3000` (HTTP), `3022` (SSH)
+- **Volumes**: `./gitea-data` (persistent data), `./gitea-logs` (logs)
+- **Healthcheck**: Monitors the Gitea API version endpoint.
+
+### rabbitmq
+- **Description**: Message broker for asynchronous communication between services.
+- **Container Name**: `rabbitmq-test`
+- **Image**: `rabbitmq:3.13.7-management`
+- **Ports**: `5671` (AMQP with TLS), `15672` (Management UI)
+- **Volumes**: `./rabbitmq-data`, `./rabbitmq-config/certs`, `./rabbitmq-config/rabbitmq.conf`, `./rabbitmq-config/definitions.json`
+- **Healthcheck**: Ensures the broker is running and ready to accept connections.
+
+### gitea-publisher
+- **Description**: Publishes events from Gitea webhooks to the RabbitMQ message queue.
+- **Container Name**: `gitea-publisher`
+- **Dependencies**: `gitea` (started), `rabbitmq` (healthy)
+- **Topic Domain**: `suse`
+
+### workflow-pr
+- **Description**: Manages pull request workflows, synchronizing between ProjectGit and PackageGit.
+- **Container Name**: `workflow-pr`
+- **Dependencies**: `gitea` (started), `rabbitmq` (healthy)
+- **Environment**: Configured via `AUTOGITS_*` variables.
+- **Volumes**: `./gitea-data` (read-only), `./workflow-pr/workflow-pr.json` (config), `./workflow-pr-repos` (working directories)
+
+### tester
+- **Description**: The dedicated test runner container. It hosts the `pytest` suite and provides a mock OBS API using `pytest-httpserver`.
+- **Container Name**: `tester`
+- **Image**: Built from `./Dockerfile.tester`
+- **Mock API**: Listens on port `8080` within the container network to simulate OBS.
+- **Volumes**: Project root mounted at `/opt/project` for source access.
+
+### obs-staging-bot
+- **Description**: Interacts with Gitea and the OBS API (mocked by `tester`) to manage staging projects.
+- **Container Name**: `obs-staging-bot`
+- **Dependencies**: `gitea` (started), `tester` (started)
+- **Environment**:
+ - `AUTOGITS_STAGING_BOT_POLL_INTERVAL`: Set to `2s` for fast integration testing.
+- **Mock Integration**: Points to `http://tester:8080` for both OBS API and Web hosts.
+
+---
+
+## Testing Workflow
+
+1. **Build**: `make build` (root) then `make build` (integration).
+2. **Up**: `make up` starts all services.
+3. **Wait**: `make wait_healthy` ensures infrastructure is ready.
+4. **Test**: `make pytest` runs the suite inside the `tester` container.
+5. **Down**: `make down` stops and removes containers.
+
+Use `make test` to perform steps 1-4 automatically.
diff --git a/integration/podman-compose.txt b/integration/podman-compose.txt
deleted file mode 100644
index 9834cac..0000000
--- a/integration/podman-compose.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-+-------------------------------------------------------------------------------------------------+
-| Podman-Compose Services Diagram |
-+-------------------------------------------------------------------------------------------------+
-| |
-| [Network] |
-| gitea-network (Bridge network for inter-service communication) |
-| |
-|-------------------------------------------------------------------------------------------------|
-| |
-| [Service: gitea] |
-| Description: Self-hosted Git service, central hub for repositories and code management. |
-| Container Name: gitea-test |
-| Image: Built from ./gitea Dockerfile |
-| Ports: 3000 (HTTP), 3022 (SSH) |
-| Volumes: ./gitea-data (for persistent data), ./gitea-logs (for logs) |
-| Network: gitea-network |
-| |
-|-------------------------------------------------------------------------------------------------|
-| |
-| [Service: rabbitmq] |
-| Description: Message broker for asynchronous communication between services. |
-| Container Name: rabbitmq-test |
-| Image: rabbitmq:3.13.7-management |
-| Ports: 5671 (AMQP), 15672 (Management UI) |
-| Volumes: ./rabbitmq-data (for persistent data), ./rabbitmq-config/certs (TLS certs), |
-| ./rabbitmq-config/rabbitmq.conf (config), ./rabbitmq-config/definitions.json (exchanges)|
-| Healthcheck: Ensures RabbitMQ is running and healthy. |
-| Network: gitea-network |
-| |
-|-------------------------------------------------------------------------------------------------|
-| |
-| [Service: gitea-publisher] |
-| Description: Publishes events from Gitea to the RabbitMQ message queue. |
-| Container Name: gitea-publisher |
-| Image: Built from ../gitea-events-rabbitmq-publisher/Dockerfile (local/package) |
-| Dependencies: gitea (started), rabbitmq (healthy) |
-| Environment: RABBITMQ_HOST, RABBITMQ_USERNAME, RABBITMQ_PASSWORD, SSL_CERT_FILE |
-| Command: Listens for Gitea events, publishes to 'suse' topic, debug enabled. |
-| Network: gitea-network |
-| |
-|-------------------------------------------------------------------------------------------------|
-| |
-| [Service: workflow-pr] |
-| Description: Manages pull request workflows, likely consuming events from RabbitMQ and |
-| interacting with Gitea. |
-| Container Name: workflow-pr |
-| Image: Built from ../workflow-pr/Dockerfile (local/package) |
-| Dependencies: gitea (started), rabbitmq (healthy) |
-| Environment: AMQP_USERNAME, AMQP_PASSWORD, SSL_CERT_FILE |
-| Volumes: ./gitea-data (read-only), ./workflow-pr/workflow-pr.json (config), |
-| ./workflow-pr-repos (for repositories) |
-| Command: Configures Gitea/RabbitMQ URLs, enables debug, manages repositories. |
-| Network: gitea-network |
-| |
-|-------------------------------------------------------------------------------------------------|
-| |
-| [Service: mock-obs] |
-| Description: A mock (simulated) service for the Open Build Service (OBS) for testing. |
-| Container Name: mock-obs |
-| Image: Built from ./mock-obs Dockerfile |
-| Ports: 8080 |
-| Volumes: ./mock-obs/responses (for mock API responses) |
-| Network: gitea-network |
-| |
-|-------------------------------------------------------------------------------------------------|
-| |
-| [Service: obs-staging-bot] |
-| Description: A bot that interacts with Gitea and the mock OBS, likely for staging processes. |
-| Container Name: obs-staging-bot |
-| Image: Built from ../obs-staging-bot/Dockerfile (local/package) |
-| Dependencies: gitea (started), mock-obs (started) |
-| Environment: OBS_USER, OBS_PASSWORD |
-| Volumes: ./gitea-data (read-only) |
-| Command: Configures Gitea/OBS URLs, enables debug. |
-| Network: gitea-network |
-| |
-+-------------------------------------------------------------------------------------------------+
diff --git a/integration/podman-compose.yml b/integration/podman-compose.yml
index 2eed1f9..572626a 100644
--- a/integration/podman-compose.yml
+++ b/integration/podman-compose.yml
@@ -95,7 +95,8 @@ services:
- ./workflow-pr/workflow-pr.json:/etc/workflow-pr.json:ro,z
- ./workflow-pr-repos:/var/lib/workflow-pr/repos:Z
command: [
- "-check-on-start",
+ "-check-on-start",
+ "-exit-on-config-error",
"-debug",
"-gitea-url", "http://gitea-test:3000",
"-url", "amqps://rabbitmq-test:5671",
@@ -104,17 +105,21 @@ services:
]
restart: unless-stopped
- mock-obs:
- build: ./mock-obs
- container_name: mock-obs
+ tester:
+ build:
+ context: .
+ dockerfile: Dockerfile.tester
+ container_name: tester
init: true
+ dns_search: .
networks:
- gitea-network
- ports:
- - "8080:8080"
+ environment:
+ - PYTEST_HTTPSERVER_HOST=0.0.0.0
+ - PYTEST_HTTPSERVER_PORT=8080
volumes:
- - ./mock-obs/responses:/app/responses:z # Use :z for shared SELinux label
- restart: unless-stopped
+ - ..:/opt/project:z
+ command: sleep infinity
obs-staging-bot:
build:
@@ -127,16 +132,17 @@ services:
depends_on:
gitea:
condition: service_started
- mock-obs:
+ tester:
condition: service_started
environment:
- OBS_USER=mock
- OBS_PASSWORD=mock-long-password
+ - AUTOGITS_STAGING_BOT_POLL_INTERVAL=2s
volumes:
- ./gitea-data:/gitea-data:ro,z
command:
- "-debug"
- "-gitea-url=http://gitea-test:3000"
- - "-obs=http://mock-obs:8080"
- - "-obs-web=http://mock-obs:8080"
+ - "-obs=http://tester:8080"
+ - "-obs-web=http://tester:8080"
restart: unless-stopped
diff --git a/integration/tests/conftest.py b/integration/tests/conftest.py
index 80d5104..520d263 100644
--- a/integration/tests/conftest.py
+++ b/integration/tests/conftest.py
@@ -8,7 +8,83 @@ import time
import os
import json
import base64
-from tests.lib.common_test_utils import GiteaAPIClient
+import re
+from tests.lib.common_test_utils import GiteaAPIClient, vprint
+import tests.lib.common_test_utils as common_utils
+
+@pytest.fixture(autouse=True)
+def is_test_run():
+ common_utils.IS_TEST_RUN = True
+ yield
+ common_utils.IS_TEST_RUN = False
+
+if os.environ.get("AUTOGITS_PRINT_FIXTURES") is None:
+ print("--- Fixture messages are suppressed. Set AUTOGITS_PRINT_FIXTURES=1 to enable them. ---")
+
+class ObsMockState:
+ def __init__(self):
+ self.build_results = {} # project -> (package, code)
+ self.project_metas = {} # project -> scmsync
+ self.default_build_result = None
+
+@pytest.fixture
+def obs_mock_state():
+ return ObsMockState()
+
+@pytest.fixture(autouse=True)
+def default_obs_handlers(httpserver, obs_mock_state):
+ """
+ Sets up default handlers for OBS API to avoid 404s.
+ """
+ def project_meta_handler(request):
+ project = request.path.split("/")[2]
+ scmsync = obs_mock_state.project_metas.get(project, "http://gitea-test:3000/myproducts/mySLFO.git")
+ return f'{scmsync}'
+
+ def build_result_handler(request):
+ project = request.path.split("/")[2]
+ res = obs_mock_state.build_results.get(project) or obs_mock_state.default_build_result
+
+ if not res:
+ return ''
+
+ package_name, code = res
+
+ # We'll use a simple hardcoded XML here to avoid re-parsing template every time
+ # or we can use the template. For simplicity, let's use a basic one.
+ xml_template = f"""
+
+ http://gitea-test:3000/myproducts/mySLFO.git?onlybuild={package_name}#sha
+
+
+"""
+ return xml_template
+
+ # Register handlers
+ httpserver.expect_request(re.compile(r"/source/[^/]+/_meta$"), method="GET").respond_with_handler(project_meta_handler)
+ httpserver.expect_request(re.compile(r"/build/[^/]+/_result"), method="GET").respond_with_handler(build_result_handler)
+ httpserver.expect_request(re.compile(r"/source/[^/]+/_meta$"), method="PUT").respond_with_data("OK")
+ httpserver.expect_request(re.compile(r"/source/[^/]+$"), method="DELETE").respond_with_data("OK")
+
+@pytest.fixture
+def mock_build_result(obs_mock_state):
+ """
+ Fixture to set up mock build results.
+ """
+ def _setup_mock(package_name: str, code: str, project: str = None):
+ if project:
+ obs_mock_state.build_results[project] = (package_name, code)
+ else:
+ # If no project specified, we can't easily know which one to set
+ # but usually it's the one the bot will request.
+ # We'll use a special key to signify "all" or we can just wait for the request.
+ # For now, let's assume we want to match openSUSE:Leap:16.0:PullRequest:*
+ # The test will call it with specific project if needed.
+ # In test_pr_workflow, it doesn't know the PR number yet.
+ # So we'll make the handler fallback to this if project not found.
+ obs_mock_state.default_build_result = (package_name, code)
+
+ return _setup_mock
BRANCH_CONFIG_COMMON = {
"workflow.config": {
@@ -96,7 +172,7 @@ _CREATED_USERS = set()
_CREATED_LABELS = set()
_ADDED_COLLABORATORS = set() # format: (org_repo, username)
-def setup_users_from_config(client: GiteaAPIClient, wf: dict, mt: dict):
+def setup_users_from_config(client: GiteaAPIClient, wf: dict, mt: dict, stats: dict = None, handled: dict = None):
"""
Parses workflow.config and _maintainership.json, creates users, and adds them as collaborators.
"""
@@ -116,13 +192,19 @@ def setup_users_from_config(client: GiteaAPIClient, wf: dict, mt: dict):
# Create all users
for username in all_users:
- if username not in _CREATED_USERS:
- client.create_user(username, "password123", f"{username}@example.com")
- _CREATED_USERS.add(username)
+ new_user = client.create_user(username, "password123", f"{username}@example.com")
+ _CREATED_USERS.add(username)
+ if stats and handled and username not in handled["users"]:
+ handled["users"].add(username)
+ if new_user: stats["users"]["new"] += 1
+ else: stats["users"]["reused"] += 1
- if ("myproducts/mySLFO", username) not in _ADDED_COLLABORATORS:
- client.add_collaborator("myproducts", "mySLFO", username, "write")
- _ADDED_COLLABORATORS.add(("myproducts/mySLFO", username))
+ new_coll = client.add_collaborator("myproducts", "mySLFO", username, "write")
+ _ADDED_COLLABORATORS.add(("myproducts/mySLFO", username))
+ if stats and handled and ("myproducts/mySLFO", username) not in handled["collaborators"]:
+ handled["collaborators"].add(("myproducts/mySLFO", username))
+ if new_coll: stats["collaborators"]["new"] += 1
+ else: stats["collaborators"]["reused"] += 1
# Set specific repository permissions based on maintainership
for pkg, users in mt.items():
@@ -130,20 +212,34 @@ def setup_users_from_config(client: GiteaAPIClient, wf: dict, mt: dict):
for username in users:
if not repo_name:
for r in ["pkgA", "pkgB"]:
- if (f"mypool/{r}", username) not in _ADDED_COLLABORATORS:
- client.add_collaborator("mypool", r, username, "write")
- _ADDED_COLLABORATORS.add((f"mypool/{r}", username))
+ new_coll = client.add_collaborator("mypool", r, username, "write")
+ _ADDED_COLLABORATORS.add((f"mypool/{r}", username))
+ if stats and handled and (f"mypool/{r}", username) not in handled["collaborators"]:
+ handled["collaborators"].add((f"mypool/{r}", username))
+ if new_coll: stats["collaborators"]["new"] += 1
+ else: stats["collaborators"]["reused"] += 1
else:
- if (f"mypool/{repo_name}", username) not in _ADDED_COLLABORATORS:
- client.add_collaborator("mypool", repo_name, username, "write")
- _ADDED_COLLABORATORS.add((f"mypool/{repo_name}", username))
+ new_coll = client.add_collaborator("mypool", repo_name, username, "write")
+ _ADDED_COLLABORATORS.add((f"mypool/{repo_name}", username))
+ if stats and handled and (f"mypool/{repo_name}", username) not in handled["collaborators"]:
+ handled["collaborators"].add((f"mypool/{repo_name}", username))
+ if new_coll: stats["collaborators"]["new"] += 1
+ else: stats["collaborators"]["reused"] += 1
-def ensure_config_file(client: GiteaAPIClient, owner: str, repo: str, branch: str, file_name: str, expected_content_dict: dict):
+def ensure_config_file(client: GiteaAPIClient, owner: str, repo: str, branch: str, file_name: str, expected_content_dict: dict, existing_files: list = None):
"""
Checks if a config file exists and has the correct content.
Returns True if a change was made, False otherwise.
"""
- file_info = client.get_file_info(owner, repo, file_name, branch=branch)
+ file_info = None
+ if existing_files is not None:
+ if file_name not in [f["path"] for f in existing_files]:
+ pass # File definitely doesn't exist
+ else:
+ file_info = client.get_file_info(owner, repo, file_name, branch=branch)
+ else:
+ file_info = client.get_file_info(owner, repo, file_name, branch=branch)
+
expected_content = json.dumps(expected_content_dict, indent=4)
if file_info:
@@ -163,8 +259,28 @@ def gitea_env():
"""
Global fixture to set up the Gitea environment for all tests.
"""
- gitea_url = "http://127.0.0.1:3000"
- admin_token_path = "./gitea-data/admin.token"
+ setup_start_time = time.time()
+ stats = {
+ "orgs": {"new": 0, "reused": 0},
+ "repos": {"new": 0, "reused": 0},
+ "users": {"new": 0, "reused": 0},
+ "labels": {"new": 0, "reused": 0},
+ "collaborators": {"new": 0, "reused": 0},
+ "branches": {"new": 0, "reused": 0},
+ "webhooks": {"new": 0, "reused": 0},
+ }
+ handled_in_session = {
+ "orgs": set(),
+ "repos": set(),
+ "users": set(),
+ "labels": set(),
+ "collaborators": set(),
+ "branches": set(),
+ "webhooks": set(),
+ }
+
+ gitea_url = "http://gitea-test:3000"
+ admin_token_path = os.path.join(os.path.dirname(__file__), "..", "gitea-data", "admin.token")
admin_token = None
try:
@@ -174,35 +290,55 @@ def gitea_env():
raise Exception(f"Admin token file not found at {admin_token_path}.")
client = GiteaAPIClient(base_url=gitea_url, token=admin_token)
+ client.use_cache = True
# Wait for Gitea
for i in range(10):
try:
- if client._request("GET", "version").status_code == 200:
+ resp, dur = client._request("GET", "version")
+ if resp.status_code == 200:
+ vprint(f"DEBUG: Gitea connection successful (duration: {dur:.3f}s)")
break
- except:
+ except Exception as e:
+ vprint(f"DEBUG: Gitea connection attempt {i+1} failed: {e}")
pass
time.sleep(1)
- else:
- raise Exception("Gitea not available.")
+ else: raise Exception("Gitea not available.")
- print("--- Starting Gitea Global Setup ---")
+ vprint("--- Starting Gitea Global Setup ---")
for org in ["myproducts", "mypool"]:
- if org not in _CREATED_ORGS:
- client.create_org(org)
- _CREATED_ORGS.add(org)
+ new_org = client.create_org(org)
+ _CREATED_ORGS.add(org)
+ if org not in handled_in_session["orgs"]:
+ handled_in_session["orgs"].add(org)
+ if new_org: stats["orgs"]["new"] += 1
+ else: stats["orgs"]["reused"] += 1
for org, repo in [("myproducts", "mySLFO"), ("mypool", "pkgA"), ("mypool", "pkgB")]:
- if f"{org}/{repo}" not in _CREATED_REPOS:
- client.create_repo(org, repo)
- client.update_repo_settings(org, repo)
- _CREATED_REPOS.add(f"{org}/{repo}")
+ new_repo = client.create_repo(org, repo)
+ client.update_repo_settings(org, repo)
+ repo_full = f"{org}/{repo}"
+ _CREATED_REPOS.add(repo_full)
+ if repo_full not in handled_in_session["repos"]:
+ handled_in_session["repos"].add(repo_full)
+ if new_repo: stats["repos"]["new"] += 1
+ else: stats["repos"]["reused"] += 1
+
+ # Create webhook for publisher
+ new_hook = client.create_webhook(org, repo, "http://gitea-publisher:8002/rabbitmq-forwarder")
+ if repo_full not in handled_in_session["webhooks"]:
+ handled_in_session["webhooks"].add(repo_full)
+ if new_hook: stats["webhooks"]["new"] += 1
+ else: stats["webhooks"]["reused"] += 1
# Create labels
for name, color in [("staging/Backlog", "#0000ff"), ("review/Pending", "#ffff00")]:
- if ("myproducts/mySLFO", name) not in _CREATED_LABELS:
- client.create_label("myproducts", "mySLFO", name, color=color)
- _CREATED_LABELS.add(("myproducts/mySLFO", name))
+ new_label = client.create_label("myproducts", "mySLFO", name, color=color)
+ _CREATED_LABELS.add(("myproducts/mySLFO", name))
+ if ("myproducts/mySLFO", name) not in handled_in_session["labels"]:
+ handled_in_session["labels"].add(("myproducts/mySLFO", name))
+ if new_label: stats["labels"]["new"] += 1
+ else: stats["labels"]["reused"] += 1
# Submodules in mySLFO
client.add_submodules("myproducts", "mySLFO")
@@ -211,24 +347,51 @@ def gitea_env():
("myproducts/mySLFO", "workflow-pr"),
("mypool/pkgA", "workflow-pr"),
("mypool/pkgB", "workflow-pr")]:
- if (repo_full, bot) not in _ADDED_COLLABORATORS:
- org_part, repo_part = repo_full.split("/")
- client.add_collaborator(org_part, repo_part, bot, "write")
- _ADDED_COLLABORATORS.add((repo_full, bot))
+ org_part, repo_part = repo_full.split("/")
+ new_coll = client.add_collaborator(org_part, repo_part, bot, "write")
+ _ADDED_COLLABORATORS.add((repo_full, bot))
+ if (repo_full, bot) not in handled_in_session["collaborators"]:
+ handled_in_session["collaborators"].add((repo_full, bot))
+ if new_coll: stats["collaborators"]["new"] += 1
+ else: stats["collaborators"]["reused"] += 1
+
+ # Collect all users from all configurations first to do setup once
+ all_setup_users_wf = {}
+ all_setup_users_mt = {}
- restart_needed = False
-
# Setup all branches and configs
+ repo_list = [("mypool", "pkgA"), ("mypool", "pkgB"), ("myproducts", "mySLFO")]
+ repo_branches = {}
+ for owner, repo in repo_list:
+ resp, _ = client._request("GET", f"repos/{owner}/{repo}/branches")
+ repo_branches[(owner, repo)] = {b["name"] for b in resp.json()}
+
for branch_name, custom_configs in BRANCH_CONFIG_CUSTOM.items():
# Ensure branch exists in all 3 repos
- for owner, repo in [("myproducts", "mySLFO"), ("mypool", "pkgA"), ("mypool", "pkgB")]:
+ for owner, repo in repo_list:
if branch_name != "main":
- try:
- main_sha = client._request("GET", f"repos/{owner}/{repo}/branches/main").json()["commit"]["id"]
- client.create_branch(owner, repo, branch_name, main_sha)
- except Exception as e:
- if "already exists" not in str(e).lower():
- raise
+ if branch_name not in repo_branches[(owner, repo)]:
+ try:
+ resp, _ = client._request("GET", f"repos/{owner}/{repo}/branches/main")
+ main_sha = resp.json()["commit"]["id"]
+ new_branch = client.create_branch(owner, repo, branch_name, main_sha)
+ repo_branches[(owner, repo)].add(branch_name)
+ if (f"{owner}/{repo}", branch_name) not in handled_in_session["branches"]:
+ handled_in_session["branches"].add((f"{owner}/{repo}", branch_name))
+ if new_branch: stats["branches"]["new"] += 1
+ else: stats["branches"]["reused"] += 1
+ except Exception as e:
+ if "already exists" not in str(e).lower():
+ raise
+ else:
+ if (f"{owner}/{repo}", branch_name) not in handled_in_session["branches"]:
+ handled_in_session["branches"].add((f"{owner}/{repo}", branch_name))
+ stats["branches"]["reused"] += 1
+ else:
+ # main branch always exists, but let's track it as reused if not handled
+ if (f"{owner}/{repo}", "main") not in handled_in_session["branches"]:
+ handled_in_session["branches"].add((f"{owner}/{repo}", "main"))
+ stats["branches"]["reused"] += 1
# Merge configs
merged_configs = {}
@@ -247,19 +410,40 @@ def gitea_env():
else:
merged_configs[file_name] = custom_content
+ # Pre-fetch existing files in this branch to avoid 404s in ensure_config_file
+ try:
+ resp, _ = client._request("GET", f"repos/myproducts/mySLFO/contents?ref={branch_name}")
+ existing_files = resp.json()
+ except:
+ existing_files = []
+
# Ensure config files in myproducts/mySLFO
for file_name, content_dict in merged_configs.items():
- if ensure_config_file(client, "myproducts", "mySLFO", branch_name, file_name, content_dict):
- restart_needed = True
+ ensure_config_file(client, "myproducts", "mySLFO", branch_name, file_name, content_dict, existing_files=existing_files)
- # Setup users (using configs from this branch)
- setup_users_from_config(client, merged_configs.get("workflow.config", {}), merged_configs.get("_maintainership.json", {}))
+ # Collect configs for user setup
+ wf_cfg = merged_configs.get("workflow.config", {})
+ mt_cfg = merged_configs.get("_maintainership.json", {})
+ # Simple merge for user collection
+ if "Reviewers" in wf_cfg:
+ all_setup_users_wf.setdefault("Reviewers", []).extend(wf_cfg["Reviewers"])
+ for k, v in mt_cfg.items():
+ all_setup_users_mt.setdefault(k, []).extend(v)
- if restart_needed:
- client.restart_service("workflow-pr")
- time.sleep(2) # Give it time to pick up changes
+ # Dedup and setup users once
+ if "Reviewers" in all_setup_users_wf:
+ all_setup_users_wf["Reviewers"] = list(set(all_setup_users_wf["Reviewers"]))
+ for k in all_setup_users_mt:
+ all_setup_users_mt[k] = list(set(all_setup_users_mt[k]))
+
+ setup_users_from_config(client, all_setup_users_wf, all_setup_users_mt, stats=stats, handled=handled_in_session)
- print("--- Gitea Global Setup Complete ---")
+ setup_duration = time.time() - setup_start_time
+ print(f"--- Gitea Global Setup Complete (took {setup_duration:.2f}s) ---\n"
+ f"Objects created: {stats['orgs']['new']} orgs, {stats['repos']['new']} repos, {stats['branches']['new']} branches, {stats['webhooks']['new']} webhooks, {stats['users']['new']} users, {stats['labels']['new']} labels, {stats['collaborators']['new']} collaborators\n"
+ f"Objects reused: {stats['orgs']['reused']} orgs, {stats['repos']['reused']} repos, {stats['branches']['reused']} branches, {stats['webhooks']['reused']} webhooks, {stats['users']['reused']} users, {stats['labels']['reused']} labels, {stats['collaborators']['reused']} collaborators")
+
+ client.use_cache = False
yield client
@pytest.fixture(scope="session")
diff --git a/integration/tests/lib/common_test_utils.py b/integration/tests/lib/common_test_utils.py
index 477584a..1e7437e 100644
--- a/integration/tests/lib/common_test_utils.py
+++ b/integration/tests/lib/common_test_utils.py
@@ -7,42 +7,12 @@ import re
import xml.etree.ElementTree as ET
from pathlib import Path
import base64
-import subprocess
-TEST_DATA_DIR = Path(__file__).parent.parent / "data"
-BUILD_RESULT_TEMPLATE = TEST_DATA_DIR / "build_result.xml.template"
-MOCK_RESPONSES_DIR = Path(__file__).parent.parent.parent / "mock-obs" / "responses"
-MOCK_BUILD_RESULT_FILE = (
- MOCK_RESPONSES_DIR / "GET_build_openSUSE:Leap:16.0:PullRequest:*__result"
-)
-MOCK_BUILD_RESULT_FILE1 = MOCK_RESPONSES_DIR / "GET_build_openSUSE:Leap:16.0__result"
-
-
-@pytest.fixture
-def mock_build_result():
- """
- Fixture to create a mock build result file from the template.
- Returns a factory function that the test can call with parameters.
- """
-
- def _create_result_file(package_name: str, code: str):
- tree = ET.parse(BUILD_RESULT_TEMPLATE)
- root = tree.getroot()
- for status_tag in root.findall(".//status"):
- status_tag.set("package", package_name)
- status_tag.set("code", code)
-
- MOCK_RESPONSES_DIR.mkdir(exist_ok=True)
- tree.write(MOCK_BUILD_RESULT_FILE)
- tree.write(MOCK_BUILD_RESULT_FILE1)
- return str(MOCK_BUILD_RESULT_FILE)
-
- yield _create_result_file
-
- if MOCK_BUILD_RESULT_FILE.exists():
- MOCK_BUILD_RESULT_FILE.unlink()
- MOCK_BUILD_RESULT_FILE1.unlink()
+IS_TEST_RUN = False
+def vprint(*args, **kwargs):
+ if IS_TEST_RUN or os.environ.get("AUTOGITS_PRINT_FIXTURES") == "1":
+ print(*args, **kwargs)
class GiteaAPIClient:
def __init__(self, base_url, token, sudo=None):
@@ -50,24 +20,45 @@ class GiteaAPIClient:
self.headers = {"Authorization": f"token {token}", "Content-Type": "application/json"}
if sudo:
self.headers["Sudo"] = sudo
+ self._cache = {}
+ self.use_cache = False
def _request(self, method, path, **kwargs):
+ # Very basic cache for GET requests to speed up setup
+ cache_key = (method, path, json.dumps(kwargs, sort_keys=True))
+ if self.use_cache and method == "GET" and cache_key in self._cache:
+ return self._cache[cache_key], 0.0
+
url = f"{self.base_url}/api/v1/{path}"
- response = requests.request(method, url, headers=self.headers, **kwargs)
+ start_time = time.time()
try:
+ response = requests.request(method, url, headers=self.headers, **kwargs)
+ duration = time.time() - start_time
response.raise_for_status()
+
+ if self.use_cache:
+ if method == "GET":
+ self._cache[cache_key] = response
+ else:
+ self._cache.clear()
+
+ return response, duration
except requests.exceptions.HTTPError as e:
- print(f"HTTPError in _request: {e}")
- print(f"Response Content: {e.response.text}")
+ duration = time.time() - start_time
+ vprint(f"[{duration:.3f}s] HTTPError in _request: {e}")
+ vprint(f"Response Content: {e.response.text}")
+ raise
+ except requests.exceptions.RequestException as e:
+ duration = time.time() - start_time
+ vprint(f"[{duration:.3f}s] Request failed: {e}")
raise
- return response
def get_file_info(self, owner: str, repo: str, file_path: str, branch: str = "main"):
url = f"repos/{owner}/{repo}/contents/{file_path}"
if branch and branch != "main":
url += f"?ref={branch}"
try:
- response = self._request("GET", url)
+ response, duration = self._request("GET", url)
return response.json()
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
@@ -75,7 +66,7 @@ class GiteaAPIClient:
raise
def create_user(self, username, password, email):
- print(f"--- Creating user: {username} ---")
+ vprint(f"--- Creating user: {username} ---")
data = {
"username": username,
"password": password,
@@ -84,18 +75,20 @@ class GiteaAPIClient:
"send_notify": False
}
try:
- self._request("POST", "admin/users", json=data)
- print(f"User '{username}' created.")
+ response, duration = self._request("POST", "admin/users", json=data)
+ vprint(f"[{duration:.3f}s] User '{username}' created.")
+ return True
except requests.exceptions.HTTPError as e:
if e.response.status_code == 422: # Already exists
- print(f"User '{username}' already exists. Updating password...")
+ vprint(f"User '{username}' already exists. Updating password...")
# Update password to be sure it matches our expectation
- self._request("PATCH", f"admin/users/{username}", json={"password": password, "login_name": username})
+ response, duration = self._request("PATCH", f"admin/users/{username}", json={"password": password, "login_name": username})
+ return False
else:
raise
def get_user_token(self, username, password, token_name="test-token"):
- print(f"--- Getting token for user: {username} ---")
+ vprint(f"--- Getting token for user: {username} ---")
url = f"{self.base_url}/api/v1/users/{username}/tokens"
# Create new token using Basic Auth
@@ -105,39 +98,30 @@ class GiteaAPIClient:
response.raise_for_status()
def create_org(self, org_name):
- print(f"--- Checking organization: {org_name} ---")
+ vprint(f"--- Checking organization: {org_name} ---")
try:
- self._request("GET", f"orgs/{org_name}")
- print(f"Organization '{org_name}' already exists.")
+ response, duration = self._request("GET", f"orgs/{org_name}")
+ vprint(f"[{duration:.3f}s] Organization '{org_name}' already exists.")
+ return False
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
- print(f"Creating organization '{org_name}'...")
+ vprint(f"Creating organization '{org_name}'...")
data = {"username": org_name, "full_name": org_name}
- self._request("POST", "orgs", json=data)
- print(f"Organization '{org_name}' created.")
- else:
- raise
- print(f"--- Checking organization: {org_name} ---")
- try:
- self._request("GET", f"orgs/{org_name}")
- print(f"Organization '{org_name}' already exists.")
- except requests.exceptions.HTTPError as e:
- if e.response.status_code == 404:
- print(f"Creating organization '{org_name}'...")
- data = {"username": org_name, "full_name": org_name}
- self._request("POST", "orgs", json=data)
- print(f"Organization '{org_name}' created.")
+ response, duration = self._request("POST", "orgs", json=data)
+ vprint(f"[{duration:.3f}s] Organization '{org_name}' created.")
+ return True
else:
raise
def create_repo(self, org_name, repo_name):
- print(f"--- Checking repository: {org_name}/{repo_name} ---")
+ vprint(f"--- Checking repository: {org_name}/{repo_name} ---")
try:
- self._request("GET", f"repos/{org_name}/{repo_name}")
- print(f"Repository '{org_name}/{repo_name}' already exists.")
+ response, duration = self._request("GET", f"repos/{org_name}/{repo_name}")
+ vprint(f"[{duration:.3f}s] Repository '{org_name}/{repo_name}' already exists.")
+ return False
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
- print(f"Creating repository '{org_name}/{repo_name}'...")
+ vprint(f"Creating repository '{org_name}/{repo_name}'...")
data = {
"name": repo_name,
"auto_init": True,
@@ -147,34 +131,48 @@ class GiteaAPIClient:
"private": False,
"readme": "Default"
}
- self._request("POST", f"orgs/{org_name}/repos", json=data)
- print(f"Repository '{org_name}/{repo_name}' created with a README.")
+ response, duration = self._request("POST", f"orgs/{org_name}/repos", json=data)
+ vprint(f"[{duration:.3f}s] Repository '{org_name}/{repo_name}' created with a README.")
time.sleep(0.1) # Added delay to allow Git operations to become available
+ return True
else:
raise
def add_collaborator(self, org_name, repo_name, collaborator_name, permission="write"):
- print(f"--- Adding {collaborator_name} as a collaborator to {org_name}/{repo_name} with '{permission}' permission ---")
+ vprint(f"--- Adding {collaborator_name} as a collaborator to {org_name}/{repo_name} with '{permission}' permission ---")
+
+ # Check if already a collaborator to provide accurate stats
+ try:
+ self._request("GET", f"repos/{org_name}/{repo_name}/collaborators/{collaborator_name}")
+ vprint(f"{collaborator_name} is already a collaborator of {org_name}/{repo_name}.")
+ return False
+ except requests.exceptions.HTTPError as e:
+ if e.response.status_code != 404:
+ raise
+
data = {"permission": permission}
# Gitea API returns 204 No Content on success and doesn't fail if already present.
- self._request("PUT", f"repos/{org_name}/{repo_name}/collaborators/{collaborator_name}", json=data)
- print(f"Attempted to add {collaborator_name} to {org_name}/{repo_name}.")
+ response, duration = self._request("PUT", f"repos/{org_name}/{repo_name}/collaborators/{collaborator_name}", json=data)
+ vprint(f"[{duration:.3f}s] Added {collaborator_name} to {org_name}/{repo_name}.")
+ return True
def add_submodules(self, org_name, repo_name):
- print(f"--- Adding submodules to {org_name}/{repo_name} using diffpatch ---")
+ vprint(f"--- Adding submodules to {org_name}/{repo_name} using diffpatch ---")
parent_repo_path = f"repos/{org_name}/{repo_name}"
try:
- self._request("GET", f"{parent_repo_path}/contents/.gitmodules")
- print("Submodules appear to be already added. Skipping.")
+ response, duration = self._request("GET", f"{parent_repo_path}/contents/.gitmodules")
+ vprint(f"[{duration:.3f}s] Submodules appear to be already added. Skipping.")
return
except requests.exceptions.HTTPError as e:
if e.response.status_code != 404:
raise
# Get latest commit SHAs for the submodules
- pkg_a_sha = self._request("GET", "repos/mypool/pkgA/branches/main").json()["commit"]["id"]
- pkg_b_sha = self._request("GET", "repos/mypool/pkgB/branches/main").json()["commit"]["id"]
+ response_a, duration_a = self._request("GET", "repos/mypool/pkgA/branches/main")
+ pkg_a_sha = response_a.json()["commit"]["id"]
+ response_b, duration_b = self._request("GET", "repos/mypool/pkgB/branches/main")
+ pkg_b_sha = response_b.json()["commit"]["id"]
if not pkg_a_sha or not pkg_b_sha:
raise Exception("Error: Could not get submodule commit SHAs. Cannot apply patch.")
@@ -212,34 +210,81 @@ index 0000000..{pkg_b_sha}
"content": diff_content,
"message": message
}
- print(f"Applying submodule patch to {org_name}/{repo_name}...")
- self._request("POST", f"{parent_repo_path}/diffpatch", json=data)
- print("Submodule patch applied.")
+ vprint(f"Applying submodule patch to {org_name}/{repo_name}...")
+ response, duration = self._request("POST", f"{parent_repo_path}/diffpatch", json=data)
+ vprint(f"[{duration:.3f}s] Submodule patch applied.")
def update_repo_settings(self, org_name, repo_name):
- print(f"--- Updating repository settings for: {org_name}/{repo_name} ---")
- repo_data = self._request("GET", f"repos/{org_name}/{repo_name}").json()
+ vprint(f"--- Updating repository settings for: {org_name}/{repo_name} ---")
+ response, duration = self._request("GET", f"repos/{org_name}/{repo_name}")
+ repo_data = response.json()
# Ensure these are boolean values, not string
repo_data["allow_manual_merge"] = True
repo_data["autodetect_manual_merge"] = True
- self._request("PATCH", f"repos/{org_name}/{repo_name}", json=repo_data)
- print(f"Repository settings for '{org_name}/{repo_name}' updated.")
+ response, duration = self._request("PATCH", f"repos/{org_name}/{repo_name}", json=repo_data)
+ vprint(f"[{duration:.3f}s] Repository settings for '{org_name}/{repo_name}' updated.")
+
+ def create_webhook(self, owner: str, repo: str, target_url: str):
+ vprint(f"--- Checking webhook for {owner}/{repo} -> {target_url} ---")
+ url = f"repos/{owner}/{repo}/hooks"
+
+ try:
+ response, duration = self._request("GET", url)
+ hooks = response.json()
+ for hook in hooks:
+ if hook["config"]["url"] == target_url:
+ vprint(f"Webhook for {owner}/{repo} already exists with correct URL.")
+ return False
+ elif "gitea-publisher" in hook["config"]["url"] or "10.89.0." in hook["config"]["url"]:
+ vprint(f"Found old webhook {hook['id']} with URL {hook['config']['url']}. Deleting...")
+ self._request("DELETE", f"{url}/{hook['id']}")
+ except requests.exceptions.HTTPError:
+ pass
+
+ vprint(f"--- Creating webhook for {owner}/{repo} -> {target_url} ---")
+ data = {
+ "type": "gitea",
+ "config": {
+ "url": target_url,
+ "content_type": "json"
+ },
+ "events": ["push", "pull_request", "pull_request_review", "issue_comment"],
+ "active": True
+ }
+ response, duration = self._request("POST", url, json=data)
+ vprint(f"[{duration:.3f}s] Webhook created for {owner}/{repo}.")
+ return True
def create_label(self, owner: str, repo: str, name: str, color: str = "#abcdef"):
- print(f"--- Creating label '{name}' in {owner}/{repo} ---")
+ vprint(f"--- Checking label '{name}' in {owner}/{repo} ---")
url = f"repos/{owner}/{repo}/labels"
+
+ # Check if label exists first
+ try:
+ response, duration = self._request("GET", url)
+ labels = response.json()
+ for label in labels:
+ if label["name"] == name:
+ vprint(f"Label '{name}' already exists in {owner}/{repo}.")
+ return False
+ except requests.exceptions.HTTPError:
+ pass
+
+ vprint(f"--- Creating label '{name}' in {owner}/{repo} ---")
data = {
"name": name,
"color": color
}
try:
- self._request("POST", url, json=data)
- print(f"Label '{name}' created.")
+ response, duration = self._request("POST", url, json=data)
+ vprint(f"[{duration:.3f}s] Label '{name}' created.")
+ return True
except requests.exceptions.HTTPError as e:
- if e.response.status_code == 422: # Already exists
- print(f"Label '{name}' already exists.")
+ if e.response.status_code == 422: # Already exists (race condition or other reason)
+ vprint(f"Label '{name}' already exists.")
+ return False
else:
raise
@@ -253,7 +298,7 @@ index 0000000..{pkg_b_sha}
}
if file_info:
- print(f"--- Updating file {file_path} in {owner}/{repo} ---")
+ vprint(f"--- Updating file {file_path} in {owner}/{repo} ---")
# Re-fetch file_info to get the latest SHA right before update
latest_file_info = self.get_file_info(owner, repo, file_path, branch=branch)
if not latest_file_info:
@@ -262,12 +307,12 @@ index 0000000..{pkg_b_sha}
data["message"] = f"Update {file_path}"
method = "PUT"
else:
- print(f"--- Creating file {file_path} in {owner}/{repo} ---")
+ vprint(f"--- Creating file {file_path} in {owner}/{repo} ---")
method = "POST"
url = f"repos/{owner}/{repo}/contents/{file_path}"
- self._request(method, url, json=data)
- print(f"File {file_path} {'updated' if file_info else 'created'} in {owner}/{repo}.")
+ response, duration = self._request(method, url, json=data)
+ vprint(f"[{duration:.3f}s] File {file_path} {'updated' if file_info else 'created'} in {owner}/{repo}.")
def create_gitea_pr(self, repo_full_name: str, diff_content: str, title: str, use_fork: bool, base_branch: str = "main", body: str = ""):
owner, repo = repo_full_name.split("/")
@@ -280,20 +325,20 @@ index 0000000..{pkg_b_sha}
head_owner = sudo_user
head_repo = repo
- print(f"--- Forking {repo_full_name} ---")
+ vprint(f"--- Forking {repo_full_name} ---")
try:
- self._request("POST", f"repos/{owner}/{repo}/forks", json={})
- print(f"--- Forked to {head_owner}/{head_repo} ---")
+ response, duration = self._request("POST", f"repos/{owner}/{repo}/forks", json={})
+ vprint(f"[{duration:.3f}s] --- Forked to {head_owner}/{head_repo} ---")
time.sleep(0.5) # Give more time for fork to be ready
except requests.exceptions.HTTPError as e:
if e.response.status_code == 409: # Already forked
- print(f"--- Already forked to {head_owner}/{head_repo} ---")
+ vprint(f"--- Already forked to {head_owner}/{head_repo} ---")
else:
raise
# Apply the diff using diffpatch and create the new branch automatically
- print(f"--- Applying diff to {head_owner}/{head_repo} from {base_branch} to new branch {new_branch_name} ---")
- self._request("POST", f"repos/{head_owner}/{head_repo}/diffpatch", json={
+ vprint(f"--- Applying diff to {head_owner}/{head_repo} from {base_branch} to new branch {new_branch_name} ---")
+ response, duration = self._request("POST", f"repos/{head_owner}/{head_repo}/diffpatch", json={
"branch": base_branch,
"new_branch": new_branch_name,
"content": diff_content,
@@ -308,59 +353,59 @@ index 0000000..{pkg_b_sha}
"body": body,
"allow_maintainer_edit": True
}
- print(f"--- Creating PR in {repo_full_name} from {data['head']} ---")
- response = self._request("POST", f"repos/{owner}/{repo}/pulls", json=data)
+ vprint(f"--- Creating PR in {repo_full_name} from {data['head']} ---")
+ response, duration = self._request("POST", f"repos/{owner}/{repo}/pulls", json=data)
return response.json()
def create_branch(self, owner: str, repo: str, new_branch_name: str, old_ref: str):
- print(f"--- Checking branch '{new_branch_name}' in {owner}/{repo} ---")
+ vprint(f"--- Checking branch '{new_branch_name}' in {owner}/{repo} ---")
try:
- self._request("GET", f"repos/{owner}/{repo}/branches/{new_branch_name}")
- print(f"Branch '{new_branch_name}' already exists.")
- return
+ response, duration = self._request("GET", f"repos/{owner}/{repo}/branches/{new_branch_name}")
+ vprint(f"[{duration:.3f}s] Branch '{new_branch_name}' already exists.")
+ return False
except requests.exceptions.HTTPError as e:
if e.response.status_code != 404:
raise # Re-raise other HTTP errors
- print(f"--- Creating branch '{new_branch_name}' in {owner}/{repo} from {old_ref} ---")
+ vprint(f"--- Creating branch '{new_branch_name}' in {owner}/{repo} from {old_ref} ---")
url = f"repos/{owner}/{repo}/branches"
data = {
"new_branch_name": new_branch_name,
"old_ref": old_ref
}
- self._request("POST", url, json=data)
- print(f"Branch '{new_branch_name}' created in {owner}/{repo}.")
+ response, duration = self._request("POST", url, json=data)
+ vprint(f"[{duration:.3f}s] Branch '{new_branch_name}' created in {owner}/{repo}.")
+ return True
def ensure_branch_exists(self, owner: str, repo: str, branch: str = "main", timeout: int = 10):
- print(f"--- Ensuring branch '{branch}' exists in {owner}/{repo} ---")
+ vprint(f"--- Ensuring branch '{branch}' exists in {owner}/{repo} ---")
start_time = time.time()
while time.time() - start_time < timeout:
try:
- self._request("GET", f"repos/{owner}/{repo}/branches/{branch}")
- print(f"Branch '{branch}' confirmed in {owner}/{repo}.")
+ response, duration = self._request("GET", f"repos/{owner}/{repo}/branches/{branch}")
+ vprint(f"[{duration:.3f}s] Branch '{branch}' confirmed in {owner}/{repo}.")
return
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
- print(f"Branch '{branch}' not found yet in {owner}/{repo}. Retrying...")
+ vprint(f"Branch '{branch}' not found yet in {owner}/{repo}. Retrying...")
time.sleep(1)
continue
raise
raise Exception(f"Timeout waiting for branch {branch} in {owner}/{repo}")
-
-
def modify_gitea_pr(self, repo_full_name: str, pr_number: int, diff_content: str, message: str):
owner, repo = repo_full_name.split("/")
# Get PR details to find the head branch AND head repo
- pr_details = self._request("GET", f"repos/{owner}/{repo}/pulls/{pr_number}").json()
+ response, duration = self._request("GET", f"repos/{owner}/{repo}/pulls/{pr_number}")
+ pr_details = response.json()
head_branch = pr_details["head"]["ref"]
head_repo_owner = pr_details["head"]["repo"]["owner"]["login"]
head_repo_name = pr_details["head"]["repo"]["name"]
# Apply the diff using diffpatch
- print(f"--- Modifying PR #{pr_number} in {head_repo_owner}/{head_repo_name} branch {head_branch} ---")
- self._request("POST", f"repos/{head_repo_owner}/{head_repo_name}/diffpatch", json={
+ vprint(f"--- Modifying PR #{pr_number} in {head_repo_owner}/{head_repo_name} branch {head_branch} ---")
+ response, duration = self._request("POST", f"repos/{head_repo_owner}/{head_repo_name}/diffpatch", json={
"branch": head_branch,
"content": diff_content,
"message": message
@@ -369,15 +414,15 @@ index 0000000..{pkg_b_sha}
def update_gitea_pr_properties(self, repo_full_name: str, pr_number: int, **kwargs):
owner, repo = repo_full_name.split("/")
url = f"repos/{owner}/{repo}/pulls/{pr_number}"
- response = self._request("PATCH", url, json=kwargs)
+ response, duration = self._request("PATCH", url, json=kwargs)
return response.json()
def create_issue_comment(self, repo_full_name: str, issue_number: int, body: str):
owner, repo = repo_full_name.split("/")
url = f"repos/{owner}/{repo}/issues/{issue_number}/comments"
data = {"body": body}
- print(f"--- Creating comment on {repo_full_name} issue #{issue_number} ---")
- response = self._request("POST", url, json=data)
+ vprint(f"--- Creating comment on {repo_full_name} issue #{issue_number} ---")
+ response, duration = self._request("POST", url, json=data)
return response.json()
def get_timeline_events(self, repo_full_name: str, pr_number: int):
@@ -387,15 +432,15 @@ index 0000000..{pkg_b_sha}
# Retry logic for timeline events
for i in range(10): # Try up to 10 times
try:
- response = self._request("GET", url)
+ response, duration = self._request("GET", url)
timeline_events = response.json()
if timeline_events: # Check if timeline_events list is not empty
return timeline_events
- print(f"Attempt {i+1}: Timeline for PR {pr_number} is empty. Retrying in 1 seconds...")
+ vprint(f"Attempt {i+1}: Timeline for PR {pr_number} is empty. Retrying in 1 seconds...")
time.sleep(1)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
- print(f"Attempt {i+1}: Timeline for PR {pr_number} not found yet. Retrying in 1 seconds...")
+ vprint(f"Attempt {i+1}: Timeline for PR {pr_number} not found yet. Retrying in 1 seconds...")
time.sleep(1)
else:
raise # Re-raise other HTTP errors
@@ -408,16 +453,16 @@ index 0000000..{pkg_b_sha}
# Retry logic for comments
for i in range(10): # Try up to 10 times
try:
- response = self._request("GET", url)
+ response, duration = self._request("GET", url)
comments = response.json()
- print(f"Attempt {i+1}: Comments for PR {pr_number} received: {comments}") # Added debug print
+ vprint(f"[{duration:.3f}s] Attempt {i+1}: Comments for PR {pr_number} received: {comments}") # Added debug print
if comments: # Check if comments list is not empty
return comments
- print(f"Attempt {i+1}: Comments for PR {pr_number} are empty. Retrying in 1 seconds...")
+ vprint(f"Attempt {i+1}: Comments for PR {pr_number} are empty. Retrying in 1 seconds...")
time.sleep(1)
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
- print(f"Attempt {i+1}: Comments for PR {pr_number} not found yet. Retrying in 1 seconds...")
+ vprint(f"Attempt {i+1}: Comments for PR {pr_number} not found yet. Retrying in 1 seconds...")
time.sleep(1)
else:
raise # Re-raise other HTTP errors
@@ -426,7 +471,7 @@ index 0000000..{pkg_b_sha}
def get_pr_details(self, repo_full_name: str, pr_number: int):
owner, repo = repo_full_name.split("/")
url = f"repos/{owner}/{repo}/pulls/{pr_number}"
- response = self._request("GET", url)
+ response, duration = self._request("GET", url)
return response.json()
def create_review(self, repo_full_name: str, pr_number: int, event: str = "APPROVED", body: str = "LGTM"):
@@ -437,7 +482,7 @@ index 0000000..{pkg_b_sha}
existing_reviews = self.list_reviews(repo_full_name, pr_number)
for r in existing_reviews:
if r["user"]["login"] == current_user and r["state"] == "APPROVED" and event == "APPROVED":
- print(f"User {current_user} already has an APPROVED review for {repo_full_name} PR #{pr_number}")
+ vprint(f"User {current_user} already has an APPROVED review for {repo_full_name} PR #{pr_number}")
return r
url = f"repos/{owner}/{repo}/pulls/{pr_number}/reviews"
@@ -445,13 +490,13 @@ index 0000000..{pkg_b_sha}
"event": event,
"body": body
}
- print(f"--- Creating and submitting review ({event}) for {repo_full_name} PR #{pr_number} as {current_user} ---")
+ vprint(f"--- Creating and submitting review ({event}) for {repo_full_name} PR #{pr_number} as {current_user} ---")
try:
- response = self._request("POST", url, json=data)
+ response, duration = self._request("POST", url, json=data)
review = response.json()
except requests.exceptions.HTTPError as e:
# If it fails with 422, it might be because a review is already pending or something else
- print(f"Failed to create review: {e.response.text}")
+ vprint(f"Failed to create review: {e.response.text}")
# Try to find a pending review to submit
existing_reviews = self.list_reviews(repo_full_name, pr_number)
pending_review = next((r for r in existing_reviews if r["user"]["login"] == current_user and r["state"] == "PENDING"), None)
@@ -469,11 +514,11 @@ index 0000000..{pkg_b_sha}
"body": body
}
try:
- self._request("POST", submit_url, json=submit_data)
- print(f"--- Review {review_id} submitted ---")
+ response, duration = self._request("POST", submit_url, json=submit_data)
+ vprint(f"[{duration:.3f}s] --- Review {review_id} submitted ---")
except requests.exceptions.HTTPError as e:
if "already" in e.response.text.lower() or "stay pending" in e.response.text.lower():
- print(f"Review {review_id} could not be submitted further: {e.response.text}")
+ vprint(f"Review {review_id} could not be submitted further: {e.response.text}")
else:
raise
@@ -482,39 +527,29 @@ index 0000000..{pkg_b_sha}
def list_reviews(self, repo_full_name: str, pr_number: int):
owner, repo = repo_full_name.split("/")
url = f"repos/{owner}/{repo}/pulls/{pr_number}/reviews"
- response = self._request("GET", url)
+ response, duration = self._request("GET", url)
return response.json()
def approve_requested_reviews(self, repo_full_name: str, pr_number: int):
- print(f"--- Checking for REQUEST_REVIEW state in {repo_full_name} PR #{pr_number} ---")
+ vprint(f"--- Checking for REQUEST_REVIEW state in {repo_full_name} PR #{pr_number} ---")
reviews = self.list_reviews(repo_full_name, pr_number)
requested_reviews = [r for r in reviews if r["state"] == "REQUEST_REVIEW"]
if not requested_reviews:
- print(f"No reviews in REQUEST_REVIEW state found for {repo_full_name} PR #{pr_number}")
+ vprint(f"No reviews in REQUEST_REVIEW state found for {repo_full_name} PR #{pr_number}")
return
admin_token = self.headers["Authorization"].split(" ")[1]
for r in requested_reviews:
reviewer_username = r["user"]["login"]
- print(f"Reacting on REQUEST_REVIEW for user {reviewer_username} by approving...")
+ vprint(f"Reacting on REQUEST_REVIEW for user {reviewer_username} by approving...")
reviewer_client = GiteaAPIClient(base_url=self.base_url, token=admin_token, sudo=reviewer_username)
time.sleep(1) # give a chance to avoid possible concurrency issues with reviews request/approval
reviewer_client.create_review(repo_full_name, pr_number, event="APPROVED", body="Approving requested review")
- def restart_service(self, service_name: str):
- print(f"--- Restarting service: {service_name} ---")
- try:
- # Assumes podman-compose.yml is in the parent directory of tests/lib
- subprocess.run(["podman-compose", "restart", service_name], check=True, cwd=os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
- print(f"Service {service_name} restarted successfully.")
- except subprocess.CalledProcessError as e:
- print(f"Error restarting service {service_name}: {e}")
- raise
-
def wait_for_project_pr(self, package_pr_repo, package_pr_number, project_pr_repo="myproducts/mySLFO", timeout=60):
- print(f"Polling {package_pr_repo} PR #{package_pr_number} timeline for forwarded PR event in {project_pr_repo}...")
+ vprint(f"Polling {package_pr_repo} PR #{package_pr_number} timeline for forwarded PR event in {project_pr_repo}...")
for _ in range(timeout):
time.sleep(1)
timeline_events = self.get_timeline_events(package_pr_repo, package_pr_number)
@@ -529,7 +564,7 @@ index 0000000..{pkg_b_sha}
return None
def approve_and_wait_merge(self, package_pr_repo, package_pr_number, project_pr_number, project_pr_repo="myproducts/mySLFO", timeout=30):
- print(f"Approving reviews and verifying both PRs are merged ({package_pr_repo}#{package_pr_number} and {project_pr_repo}#{project_pr_number})...")
+ vprint(f"Approving reviews and verifying both PRs are merged ({package_pr_repo}#{package_pr_number} and {project_pr_repo}#{project_pr_number})...")
package_merged = False
project_merged = False
@@ -541,17 +576,16 @@ index 0000000..{pkg_b_sha}
pkg_details = self.get_pr_details(package_pr_repo, package_pr_number)
if pkg_details.get("merged"):
package_merged = True
- print(f"Package PR {package_pr_repo}#{package_pr_number} merged.")
+ vprint(f"Package PR {package_pr_repo}#{package_pr_number} merged.")
if not project_merged:
prj_details = self.get_pr_details(project_pr_repo, project_pr_number)
if prj_details.get("merged"):
project_merged = True
- print(f"Project PR {project_pr_repo}#{project_pr_number} merged.")
+ vprint(f"Project PR {project_pr_repo}#{project_pr_number} merged.")
if package_merged and project_merged:
return True, True
time.sleep(1)
return package_merged, project_merged
-
diff --git a/integration/tests/test_pr_workflow.py b/integration/tests/test_pr_workflow.py
index 9264866..d67dd6b 100755
--- a/integration/tests/test_pr_workflow.py
+++ b/integration/tests/test_pr_workflow.py
@@ -1,12 +1,9 @@
import pytest
import re
import time
-import subprocess
import requests
-from pathlib import Path
from tests.lib.common_test_utils import (
GiteaAPIClient,
- mock_build_result,
)
# =============================================================================
@@ -21,8 +18,6 @@ def test_pr_workflow_succeeded(staging_main_env, mock_build_result):
pr = gitea_env.create_gitea_pr("mypool/pkgA", diff, "Test PR - should succeed", False, base_branch=merge_branch_name)
initial_pr_number = pr["number"]
- compose_dir = Path(__file__).parent.parent
-
forwarded_pr_number = gitea_env.wait_for_project_pr("mypool/pkgA", initial_pr_number)
assert (
forwarded_pr_number is not None
@@ -43,17 +38,10 @@ def test_pr_workflow_succeeded(staging_main_env, mock_build_result):
assert reviewer_added, "Staging bot was not added as a reviewer."
print("Staging bot has been added as a reviewer.")
- mock_build_result(package_name="pkgA", code="succeeded")
-
- print("Restarting obs-staging-bot...")
- subprocess.run(
- ["podman-compose", "restart", "obs-staging-bot"],
- cwd=compose_dir,
- check=True,
- capture_output=True,
- )
+ mock_build_result(package_name="pkgA", code="succeeded")
print(f"Polling myproducts/mySLFO PR #{forwarded_pr_number} for final status...")
+
status_comment_found = False
for _ in range(20):
time.sleep(1)
@@ -75,8 +63,6 @@ def test_pr_workflow_failed(staging_main_env, mock_build_result):
pr = gitea_env.create_gitea_pr("mypool/pkgA", diff, "Test PR - should fail", False, base_branch=merge_branch_name)
initial_pr_number = pr["number"]
- compose_dir = Path(__file__).parent.parent
-
forwarded_pr_number = gitea_env.wait_for_project_pr("mypool/pkgA", initial_pr_number)
assert (
forwarded_pr_number is not None
@@ -99,14 +85,6 @@ def test_pr_workflow_failed(staging_main_env, mock_build_result):
mock_build_result(package_name="pkgA", code="failed")
- print("Restarting obs-staging-bot...")
- subprocess.run(
- ["podman-compose", "restart", "obs-staging-bot"],
- cwd=compose_dir,
- check=True,
- capture_output=True,
- )
-
print(f"Polling myproducts/mySLFO PR #{forwarded_pr_number} for final status...")
status_comment_found = False
for _ in range(20):
diff --git a/integration/tests/workflow_pr_merge_test.py b/integration/tests/workflow_pr_merge_test.py
index e8d0f49..13bbfb4 100644
--- a/integration/tests/workflow_pr_merge_test.py
+++ b/integration/tests/workflow_pr_merge_test.py
@@ -36,7 +36,7 @@ index 0000000..e69de29
print("Both PRs merged successfully.")
@pytest.mark.t002
-def test_002_manual_merge(manual_merge_env, test_user_client, usera_client, staging_bot_client):
+def test_002_manual_merge(manual_merge_env, test_user_client, usera_client, staging_bot_client, ownerA_client):
"""
Test scenario TC-MERGE-002:
1. Create a PackageGit PR with ManualMergeOnly set to true.
@@ -52,7 +52,7 @@ new file mode 100644
index 0000000..e69de29
"""
print(f"--- Creating package PR in mypool/pkgA on branch {merge_branch_name} ---")
- package_pr = test_user_client.create_gitea_pr("mypool/pkgA", diff, "Test Manual Merge Fixture", False, base_branch=merge_branch_name)
+ package_pr = gitea_env.create_gitea_pr("mypool/pkgA", diff, "Test Manual Merge Fixture", False, base_branch=merge_branch_name)
package_pr_number = package_pr["number"]
print(f"Created package PR mypool/pkgA#{package_pr_number}")
@@ -62,13 +62,14 @@ index 0000000..e69de29
print(f"Found project PR: myproducts/mySLFO#{project_pr_number}")
# 3. Approve reviews and verify NOT merged
- print("Waiting for all expected review requests and approving them...")
+ print("Waiting for required review requests and approving them...")
# Expected reviewers based on manual-merge branch config and pkgA maintainership
- expected_reviewers = {"usera", "userb", "ownerA", "ownerX", "ownerY"}
+ mandatory_reviewers = {"usera", "userb"}
+ maintainers = {"ownerA", "ownerX", "ownerY"}
# ManualMergeOnly still requires regular reviews to be satisfied.
- # We poll until all expected reviewers are requested, then approve them.
- all_requested = False
+ # We poll until required reviewers have approved.
+ all_approved = False
for _ in range(30):
# Trigger approvals for whatever is already requested
gitea_env.approve_requested_reviews("mypool/pkgA", package_pr_number)
@@ -80,20 +81,17 @@ index 0000000..e69de29
print("Staging bot has a pending/requested review. Approving...")
staging_bot_client.create_review("myproducts/mySLFO", project_pr_number, event="APPROVED", body="Staging bot approves")
- # Check if all expected reviewers have at least one review record (any state)
+ # Check if mandatory reviewers and at least one maintainer have approved
pkg_reviews = gitea_env.list_reviews("mypool/pkgA", package_pr_number)
- current_reviewers = {r["user"]["login"] for r in pkg_reviews}
+ approved_reviewers = {r["user"]["login"] for r in pkg_reviews if r["state"] == "APPROVED"}
- if expected_reviewers.issubset(current_reviewers):
- # Also ensure they are all approved (not just requested)
- approved_reviewers = {r["user"]["login"] for r in pkg_reviews if r["state"] == "APPROVED"}
- if expected_reviewers.issubset(approved_reviewers):
- # And check project PR for bot approval
- prj_approved = any(r["user"]["login"] == "autogits_obs_staging_bot" and r["state"] == "APPROVED" for r in prj_reviews)
- if prj_approved:
- all_requested = True
- print(f"All expected reviewers {expected_reviewers} and staging bot have approved.")
- break
+ if mandatory_reviewers.issubset(approved_reviewers) and any(m in approved_reviewers for m in maintainers):
+ # And check project PR for bot approval
+ prj_approved = any(r["user"]["login"] == "autogits_obs_staging_bot" and r["state"] == "APPROVED" for r in prj_reviews)
+ if prj_approved:
+ all_approved = True
+ print(f"Required reviewers approved: mandatory={mandatory_reviewers}, maintainer={[m for m in maintainers if m in approved_reviewers]}, staging_bot=True")
+ break
pkg_details = gitea_env.get_pr_details("mypool/pkgA", package_pr_number)
prj_details = gitea_env.get_pr_details("myproducts/mySLFO", project_pr_number)
@@ -103,12 +101,12 @@ index 0000000..e69de29
time.sleep(2)
- assert all_requested, f"Timed out waiting for all expected reviewers {expected_reviewers} to approve. Current: {current_reviewers}"
+ assert all_approved, f"Timed out waiting for required approvals. Mandatory: {mandatory_reviewers}, Maintainers: {maintainers}. Current approved: {approved_reviewers}"
print("Both PRs have all required approvals but are not merged (as expected with ManualMergeOnly).")
- # 4. Comment "merge ok" from a requested reviewer (usera)
- print("Commenting 'merge ok' on package PR...")
- usera_client.create_issue_comment("mypool/pkgA", package_pr_number, "merge ok")
+ # 4. Comment "merge ok" from a requested reviewer (ownerA)
+ print("Commenting 'merge ok' on package PR from a maintainer...")
+ ownerA_client.create_issue_comment("mypool/pkgA", package_pr_number, "merge ok")
# 5. Verify both PRs are merged
print("Polling for PR merge status...")
@@ -164,13 +162,14 @@ index 0000000..e69de29
print(f"Found project PR: myproducts/mySLFO#{project_pr_number}")
# 3. Approve reviews and verify NOT merged
- print("Waiting for all expected review requests and approving them...")
+ print("Waiting for required review requests and approving them...")
# Expected reviewers based on manual-merge branch config and pkgA maintainership
- expected_reviewers = {"usera", "userb", "ownerA", "ownerX", "ownerY"}
+ mandatory_reviewers = {"usera", "userb"}
+ maintainers = {"ownerA", "ownerX", "ownerY"}
# ManualMergeOnly still requires regular reviews to be satisfied.
- # We poll until all expected reviewers are requested, then approve them.
- all_requested = False
+ # We poll until required reviewers have approved.
+ all_approved = False
for _ in range(30):
# Trigger approvals for whatever is already requested
gitea_env.approve_requested_reviews("mypool/pkgA", package_pr_number)
@@ -182,20 +181,17 @@ index 0000000..e69de29
print("Staging bot has a pending/requested review. Approving...")
staging_bot_client.create_review("myproducts/mySLFO", project_pr_number, event="APPROVED", body="Staging bot approves")
- # Check if all expected reviewers have at least one review record (any state)
+ # Check if mandatory reviewers and at least one maintainer have approved
pkg_reviews = gitea_env.list_reviews("mypool/pkgA", package_pr_number)
- current_reviewers = {r["user"]["login"] for r in pkg_reviews}
+ approved_reviewers = {r["user"]["login"] for r in pkg_reviews if r["state"] == "APPROVED"}
- if expected_reviewers.issubset(current_reviewers):
- # Also ensure they are all approved (not just requested)
- approved_reviewers = {r["user"]["login"] for r in pkg_reviews if r["state"] == "APPROVED"}
- if expected_reviewers.issubset(approved_reviewers):
- # And check project PR for bot approval
- prj_approved = any(r["user"]["login"] == "autogits_obs_staging_bot" and r["state"] == "APPROVED" for r in prj_reviews)
- if prj_approved:
- all_requested = True
- print(f"All expected reviewers {expected_reviewers} and staging bot have approved.")
- break
+ if mandatory_reviewers.issubset(approved_reviewers) and any(m in approved_reviewers for m in maintainers):
+ # And check project PR for bot approval
+ prj_approved = any(r["user"]["login"] == "autogits_obs_staging_bot" and r["state"] == "APPROVED" for r in prj_reviews)
+ if prj_approved:
+ all_approved = True
+ print(f"Required reviewers approved: mandatory={mandatory_reviewers}, maintainer={[m for m in maintainers if m in approved_reviewers]}, staging_bot=True")
+ break
pkg_details = gitea_env.get_pr_details("mypool/pkgA", package_pr_number)
prj_details = gitea_env.get_pr_details("myproducts/mySLFO", project_pr_number)
@@ -205,7 +201,7 @@ index 0000000..e69de29
time.sleep(2)
- assert all_requested, f"Timed out waiting for all expected reviewers {expected_reviewers} to approve. Current: {current_reviewers}"
+ assert all_approved, f"Timed out waiting for required approvals. Mandatory: {mandatory_reviewers}, Maintainers: {maintainers}. Current approved: {approved_reviewers}"
print("Both PRs have all required approvals but are not merged (as expected with ManualMergeOnly).")
# 4. Comment "merge ok" from a requested reviewer (ownerB)
@@ -398,10 +394,12 @@ index 0000000..e69de29
print("Replace merge successful.")
# Verify that the project branch HEAD is a merge commit
- branch_info = gitea_env._request("GET", f"repos/myproducts/mySLFO/branches/{merge_branch_name}").json()
+ resp, _ = gitea_env._request("GET", f"repos/myproducts/mySLFO/branches/{merge_branch_name}")
+ branch_info = resp.json()
new_head_sha = branch_info["commit"]["id"]
- commit_details = gitea_env._request("GET", f"repos/myproducts/mySLFO/git/commits/{new_head_sha}").json()
+ resp, _ = gitea_env._request("GET", f"repos/myproducts/mySLFO/git/commits/{new_head_sha}")
+ commit_details = resp.json()
assert len(commit_details["parents"]) > 1, f"Project branch {merge_branch_name} HEAD should be a merge commit but has {len(commit_details['parents'])} parents"
# Verify that pkgA submodule points to the correct SHA
@@ -441,11 +439,13 @@ index 0000000..e69de29
print("Devel FF merge successful.")
# Verify that the package base branch HEAD is the same as the PR head (FF)
- branch_info = gitea_env._request("GET", f"repos/mypool/pkgA/branches/{merge_branch_name}").json()
+ resp, _ = gitea_env._request("GET", f"repos/mypool/pkgA/branches/{merge_branch_name}")
+ branch_info = resp.json()
new_head_sha = branch_info["commit"]["id"]
assert new_head_sha == pkg_head_sha, f"Package branch {merge_branch_name} HEAD should be {pkg_head_sha} but is {new_head_sha}"
- commit_details = gitea_env._request("GET", f"repos/mypool/pkgA/git/commits/{new_head_sha}").json()
+ resp, _ = gitea_env._request("GET", f"repos/mypool/pkgA/git/commits/{new_head_sha}")
+ commit_details = resp.json()
assert len(commit_details["parents"]) == 1, f"Package branch {merge_branch_name} HEAD should have 1 parent but has {len(commit_details['parents'])}"
@pytest.mark.t013
@@ -481,10 +481,12 @@ index 0000000..e69de29
print("Replace FF merge successful.")
# Verify that the package base branch HEAD is the same as the PR head (FF)
- branch_info = gitea_env._request("GET", f"repos/mypool/pkgA/branches/{merge_branch_name}").json()
+ resp, _ = gitea_env._request("GET", f"repos/mypool/pkgA/branches/{merge_branch_name}")
+ branch_info = resp.json()
new_head_sha = branch_info["commit"]["id"]
assert new_head_sha == pkg_head_sha, f"Package branch {merge_branch_name} HEAD should be {pkg_head_sha} but is {new_head_sha}"
- commit_details = gitea_env._request("GET", f"repos/mypool/pkgA/git/commits/{new_head_sha}").json()
+ resp, _ = gitea_env._request("GET", f"repos/mypool/pkgA/git/commits/{new_head_sha}")
+ commit_details = resp.json()
assert len(commit_details["parents"]) == 1, f"Package branch {merge_branch_name} HEAD should have 1 parent but has {len(commit_details['parents'])}"
diff --git a/integration/workflow-pr/entrypoint.sh b/integration/workflow-pr/entrypoint.sh
index c7a7d1b..16e507b 100644
--- a/integration/workflow-pr/entrypoint.sh
+++ b/integration/workflow-pr/entrypoint.sh
@@ -23,12 +23,19 @@ echo "Waiting for workflow.config in myproducts/mySLFO..."
API_URL="http://gitea-test:3000/api/v1/repos/myproducts/mySLFO/contents/workflow.config"
HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token $GITEA_TOKEN" "$API_URL")
+WAITED=false
while [ "$HTTP_STATUS" != "200" ]; do
+ WAITED=true
echo "workflow.config not found yet (HTTP Status: $HTTP_STATUS). Retrying in 5s..."
sleep 5
HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token $GITEA_TOKEN" "$API_URL")
done
+if [ "$WAITED" = true ]; then
+ echo "workflow.config found. Sleeping 15s to let other configurations settle..."
+ sleep 15
+fi
+
# Wait for the shared SSH key to be generated by the gitea setup script
echo "Waiting for /var/lib/gitea/ssh-keys/id_ed25519..."
while [ ! -f /var/lib/gitea/ssh-keys/id_ed25519 ]; do
@@ -63,4 +70,5 @@ package=$(rpm -qa | grep autogits-workflow-pr) || :
echo "!!!!!!!!!!!!!!!! using binary $exe; installed package: $package"
which strings > /dev/null 2>&1 && strings "$exe" | grep -A 2 vcs.revision= | head -4 || :
+set -x
exec "$exe" "$@"
diff --git a/obs-staging-bot/main.go b/obs-staging-bot/main.go
index 717457b..cb07981 100644
--- a/obs-staging-bot/main.go
+++ b/obs-staging-bot/main.go
@@ -1171,6 +1171,7 @@ var IsDryRun bool
var ProcessPROnly string
var ObsClient common.ObsClientInterface
var BotUser string
+var PollInterval = 5 * time.Minute
func ObsWebHostFromApiHost(apihost string) string {
u, err := url.Parse(apihost)
@@ -1193,9 +1194,18 @@ func main() {
flag.StringVar(&ObsApiHost, "obs", "", "API for OBS instance")
flag.StringVar(&ObsWebHost, "obs-web", "", "Web OBS instance, if not derived from the obs config")
flag.BoolVar(&IsDryRun, "dry", false, "Dry-run, don't actually create any build projects or review changes")
+ pollIntervalStr := flag.String("poll-interval", common.GetEnvOverrideString(os.Getenv("AUTOGITS_STAGING_BOT_POLL_INTERVAL"), ""), "Polling interval for notifications (e.g. 5m, 10s)")
debug := flag.Bool("debug", false, "Turns on debug logging")
flag.Parse()
+ if len(*pollIntervalStr) > 0 {
+ if d, err := time.ParseDuration(*pollIntervalStr); err == nil {
+ PollInterval = d
+ } else {
+ common.LogError("Invalid poll interval:", err)
+ }
+ }
+
if *debug {
common.SetLoggingLevel(common.LogLevelDebug)
} else {
@@ -1264,6 +1274,6 @@ func main() {
for {
PollWorkNotifications(ObsClient, gitea)
common.LogInfo("Poll cycle finished")
- time.Sleep(5 * time.Minute)
+ time.Sleep(PollInterval)
}
}
diff --git a/workflow-direct/main.go b/workflow-direct/main.go
index b6f1dfc..669c49a 100644
--- a/workflow-direct/main.go
+++ b/workflow-direct/main.go
@@ -503,7 +503,10 @@ func updateConfiguration(configFilename string, orgs *[]string) {
os.Exit(4)
}
- configs, _ := common.ResolveWorkflowConfigs(gitea, configFile)
+ configs, err := common.ResolveWorkflowConfigs(gitea, configFile)
+ if err != nil {
+ common.LogError("Failed to resolve some configuration repositories:", err)
+ }
configuredRepos = make(map[string][]*common.AutogitConfig)
*orgs = make([]string, 0, 1)
for _, c := range configs {
diff --git a/workflow-pr/main.go b/workflow-pr/main.go
index facaffc..ef4ce21 100644
--- a/workflow-pr/main.go
+++ b/workflow-pr/main.go
@@ -58,6 +58,7 @@ func main() {
checkOnStart := flag.Bool("check-on-start", common.GetEnvOverrideBool(os.Getenv("AUTOGITS_CHECK_ON_START"), false), "Check all repositories for consistency on start, without delays")
checkIntervalHours := flag.Float64("check-interval", 5, "Check interval (+-random delay) for repositories for consitency, in hours")
flag.BoolVar(&ListPROnly, "list-prs-only", false, "Only lists PRs without acting on them")
+ exitOnConfigError := flag.Bool("exit-on-config-error", false, "Exit if any repository in configuration cannot be resolved")
flag.Int64Var(&PRID, "id", -1, "Process only the specific ID and ignore the rest. Use for debugging")
basePath := flag.String("repo-path", common.GetEnvOverrideString(os.Getenv("AUTOGITS_REPO_PATH"), ""), "Repository path. Default is temporary directory")
pr := flag.String("only-pr", "", "Only specific PR to process. For debugging")
@@ -97,8 +98,10 @@ func main() {
configs, err := common.ResolveWorkflowConfigs(Gitea, config)
if err != nil {
- common.LogError("Cannot resolve config files:", err)
- return
+ common.LogError("Failed to resolve some configuration repositories:", err)
+ if *exitOnConfigError {
+ return
+ }
}
for _, c := range configs {