1 Commits

Author SHA256 Message Date
Andrii Nikitin
6feb04a14d common: fix timeline cache race condition and update logic
All checks were successful
go-generate-check / go-generate-check (pull_request) Successful in 14s
Integration tests / t (pull_request) Successful in 8m21s
The GetTimeline function previously used a strict timestamp comparison
(Created > LastCachedTime) to fetch new events. This caused the bot to
miss events occurring within the same second as the last update.

This change:
- Switches to ID-based deduplication to safely handle same-second events.
- Correctly updates existing timeline items if they are modified.
- Calculates the next 'Since' parameter using the maximum 'Updated'
  timestamp found in the current cache.

This fixes flakiness in integration tests (specifically test_006) where
maintainer rejections were occasionally ignored by the workflow-pr service.
2026-03-04 12:11:35 +01:00
21 changed files with 539 additions and 394 deletions

View File

@@ -40,12 +40,10 @@ jobs:
run: make down
working-directory: ./autogits/integration
- name: Start images
run: |
make up
make wait_healthy
run: make up
working-directory: ./autogits/integration
- name: Run tests
run: make pytest
run: py.test-3.11 -v tests
working-directory: ./autogits/integration
- name: Make sure the pod is down
if: always()

View File

@@ -863,9 +863,10 @@ func (gitea *GiteaTransport) GetTimeline(org, repo string, idx int64) ([]*models
TimelineCache, IsCached := giteaTimelineCache[prID]
var LastCachedTime strfmt.DateTime
if IsCached {
l := len(TimelineCache.data)
if l > 0 {
LastCachedTime = TimelineCache.data[0].Updated
for _, d := range TimelineCache.data {
if time.Time(d.Updated).Compare(time.Time(LastCachedTime)) > 0 {
LastCachedTime = d.Updated
}
}
// cache data for 5 seconds
@@ -894,14 +895,20 @@ func (gitea *GiteaTransport) GetTimeline(org, repo string, idx int64) ([]*models
}
for _, d := range res.Payload {
if d != nil {
if time.Time(d.Created).Compare(time.Time(LastCachedTime)) > 0 {
// created after last check, so we append here
TimelineCache.data = append(TimelineCache.data, d)
} else {
// we need something updated in the timeline, maybe
if d == nil {
continue
}
found := false
for i := range TimelineCache.data {
if TimelineCache.data[i].ID == d.ID {
TimelineCache.data[i] = d
found = true
break
}
}
if !found {
TimelineCache.data = append(TimelineCache.data, d)
}
}
if resCount < 10 {

View File

@@ -4,7 +4,7 @@ ENV container=podman
ENV LANG=en_US.UTF-8
RUN zypper -vvvn install podman podman-compose vim make python3-pytest python3-requests python3-pytest-dependency python3-pytest-httpserver
RUN zypper -vvvn install podman podman-compose vim make python3-pytest python3-requests python3-pytest-dependency
COPY . /opt/project/

View File

@@ -1,19 +1,51 @@
# We want to be able to test in two **modes**:
# A. bots are used from official packages as defined in */Dockerfile.package
# B. bots are just picked up from binaries that are placed in corresponding parent directory.
# The topology is defined in podman-compose file and can be spawned in two ways:
# 1. podman-compose on a local machine (needs dependencies as defined in the Dockerfile)
# 2. pytest in a dedicated container (recommended)
# 1. Privileged container (needs no additional dependancies)
# 2. podman-compose on a local machine (needs dependencies as defined in the Dockerfile)
# Typical workflow:
# 1. 'make build' - prepares images
# 2. 'make up' - spawns podman-compose
# 3. 'make pytest' - run tests inside the tester container
# 4. 'make down' - once the containers are not needed
#
# OR just run 'make test' to do it all at once.
# A1: - run 'make test_package'
# B1: - run 'make test_local' (make sure that the go binaries in parent folder are built)
# A2:
# 1. 'make build_package' - prepares images (recommended, otherwise there might be surprises if image fails to build during `make up`)
# 2. 'make up' - spawns podman-compose
# 3. 'pytest -v tests/*' - run tests
# 4. 'make down' - once the containers are not needed
# B2: (make sure the go binaries in the parent folder are built)
# 1. 'make build_local' - prepared images (recommended, otherwise there might be surprises if image fails to build during `make up`)
# 2. 'make up' - spawns podman-compose
# 3. 'pytest -v tests/*' - run tests
# 4. 'make down' - once the containers are not needed
AUTO_DETECT_MODE := $(shell if test -e ../workflow-pr/workflow-pr; then echo .local; else echo .package; fi)
# Default test target
test: test_b
# try to detect mode B1, otherwise mode A1
test: GIWTF_IMAGE_SUFFIX=$(AUTO_DETECT_MODE)
test: build_container test_container
# mode A1
test_package: GIWTF_IMAGE_SUFFIX=.package
test_package: build_container test_container
# mode B1
test_local: GIWTF_IMAGE_SUFFIX=.local
test_local: build_container test_container
MODULES := gitea-events-rabbitmq-publisher obs-staging-bot workflow-pr
# Prepare topology 1
build_container:
podman build ../ -f integration/Dockerfile -t autogits_integration
# Run tests in topology 1
test_container:
podman run --rm --privileged -t -e GIWTF_IMAGE_SUFFIX=$(GIWTF_IMAGE_SUFFIX) autogits_integration /usr/bin/bash -c "make build && make up && sleep 25 && pytest -v tests/*"
build_local: AUTO_DETECT_MODE=.local
build_local: build
@@ -21,66 +53,16 @@ build_local: build
build_package: AUTO_DETECT_MODE=.package
build_package: build
# parse all service images from podman-compose and build them
# mode B with pytest in container
test_b: AUTO_DETECT_MODE=.local
test_b: build up wait_healthy pytest
# Complete cycle for CI
test-ci: test_b down
wait_healthy:
@echo "Waiting for services to be healthy..."
@echo "Waiting for gitea (max 2m)..."
@start_time=$$(date +%s); \
until podman exec gitea-test curl -f -s http://localhost:3000/api/v1/version >/dev/null 2>&1; do \
current_time=$$(date +%s); \
elapsed=$$((current_time - start_time)); \
if [ $$elapsed -gt 120 ]; then \
echo "ERROR: Gitea failed to start within 2 minutes."; \
echo "--- Troubleshooting Info ---"; \
echo "Diagnostics output (curl):"; \
podman exec gitea-test curl -v http://localhost:3000/api/v1/version || true; \
echo "--- Container Logs ---"; \
podman logs gitea-test --tail 20; \
echo "--- Container Status ---"; \
podman inspect gitea-test --format '{{.State.Status}}'; \
exit 1; \
fi; \
sleep 2; \
done
@echo "Waiting for rabbitmq (max 2m)..."
@start_time=$$(date +%s); \
until podman exec rabbitmq-test rabbitmq-diagnostics check_running -q >/dev/null 2>&1; do \
current_time=$$(date +%s); \
elapsed=$$((current_time - start_time)); \
if [ $$elapsed -gt 120 ]; then \
echo "ERROR: RabbitMQ failed to start within 2 minutes."; \
echo "--- Troubleshooting Info ---"; \
echo "Diagnostics output:"; \
podman exec rabbitmq-test rabbitmq-diagnostics check_running || true; \
echo "--- Container Logs ---"; \
podman logs rabbitmq-test --tail 20; \
echo "--- Container Status ---"; \
podman inspect rabbitmq-test --format '{{.State.Status}}'; \
exit 1; \
fi; \
sleep 2; \
done
@echo "All services are healthy!"
pytest:
podman-compose exec tester pytest -v tests/*
# parse all service images from podman-compose and build them (topology 2)
build:
podman pull docker.io/library/rabbitmq:3.13.7-management
for i in $$(grep -A 1000 services: podman-compose.yml | grep -oE '^ [^: ]+'); do GIWTF_IMAGE_SUFFIX=$(AUTO_DETECT_MODE) podman-compose build $$i || exit 1; done
# this will spawn prebuilt containers
# this will spawn prebuilt containers (topology 2)
up:
podman-compose up -d
# tear down
# tear down (topology 2)
down:
podman-compose down
@@ -91,3 +73,4 @@ up-bots-package:
# mode B
up-bots-local:
GIWTF_IMAGE_SUFFIX=.local podman-compose up -d

View File

@@ -1,52 +0,0 @@
# Makefile Targets
This document describes the targets available in the `integration/Makefile`.
## Primary Workflow
### `test` (or `test_b`)
- **Action**: Performs a complete build-and-test cycle.
- **Steps**:
1. `build`: Prepares all container images.
2. `up`: Starts all services via `podman-compose`.
3. `wait_healthy`: Polls Gitea and RabbitMQ until they are ready.
4. `pytest`: Executes the test suite inside the `tester` container.
- **Outcome**: The environment remains active for fast iteration.
### `test-ci`
- **Action**: Performs the full `test` cycle followed by teardown.
- **Steps**: `test_b` -> `down`
- **Purpose**: Ideal for CI environments where a clean state is required after testing.
---
## Individual Targets
### `build`
- **Action**: Pulls external images (RabbitMQ) and builds all local service images defined in `podman-compose.yml`.
- **Note**: Use `build_local` or `build_package` to specify bot source mode.
### `up`
- **Action**: Starts the container topology in detached mode.
### `wait_healthy`
- **Action**: Polls the health status of `gitea-test` and `rabbitmq-test` containers.
- **Purpose**: Ensures infrastructure is stable before test execution.
### `pytest`
- **Action**: Runs `pytest -v tests/*` inside the running `tester` container.
- **Requirement**: The environment must already be started via `up`.
### `down`
- **Action**: Stops and removes all containers and networks defined in the compose file.
---
## Configuration Modes
The Makefile supports two deployment modes via `GIWTF_IMAGE_SUFFIX`:
- **.local** (Default): Uses binaries built from the local source (requires `make build` in project root).
- **.package**: Uses official pre-built packages for the bots.
Targets like `build_local`, `build_package`, `up-bots-local`, and `up-bots-package` allow for explicit mode selection.

57
integration/Makefile.txt Normal file
View File

@@ -0,0 +1,57 @@
+-------------------------------------------------------------------------------------------------+
| Makefile Targets |
+-------------------------------------------------------------------------------------------------+
| |
| [Default Test Workflow] |
| test (Auto-detects mode: .local or .package) |
| > build_container |
| > test_container |
| |
| [Specific Test Workflows - Topology 1: Privileged Container] |
| test_package (Mode A1: Bots from official packages) |
| > build_container |
| > test_container |
| |
| test_local (Mode B1: Bots from local binaries) |
| > build_container |
| > test_container |
| |
| build_container |
| - Action: Builds the `autogits_integration` privileged container image. |
| - Purpose: Prepares an environment for running tests within a single container. |
| |
| test_container |
| - Action: Runs `autogits_integration` container, executes `make build`, `make up`, and |
| `pytest -v tests/*` inside it. |
| - Purpose: Executes the full test suite in Topology 1 (privileged container). |
| |
| [Build & Orchestration Workflows - Topology 2: podman-compose] |
| |
| build_package (Mode A: Builds service images from official packages) |
| > build |
| |
| build_local (Mode B: Builds service images from local binaries) |
| > build |
| |
| build |
| - Action: Pulls `rabbitmq` image and iterates through `podman-compose.yml` services |
| to build each one. |
| - Purpose: Prepares all necessary service images for Topology 2 deployment. |
| |
| up |
| - Action: Starts all services defined in `podman-compose.yml` in detached mode. |
| - Purpose: Deploys the application topology (containers) for testing or development. |
| |
| down |
| - Action: Stops and removes all services started by `up`. |
| - Purpose: Cleans up the deployed application topology. |
| |
| up-bots-package (Mode A: Spawns Topology 2 with official package bots) |
| - Action: Calls `podman-compose up -d` with `GIWTF_IMAGE_SUFFIX=.package`. |
| - Purpose: Specifically brings up the environment using official package bots. |
| |
| up-bots-local (Mode B: Spawns Topology 2 with local binaries) |
| - Action: Calls `podman-compose up -d` with `GIWTF_IMAGE_SUFFIX=.local`. |
| - Purpose: Specifically brings up the environment using local binaries. |
| |
+-------------------------------------------------------------------------------------------------+

View File

@@ -0,0 +1,14 @@
# Use a base Python image
FROM registry.suse.com/bci/python:3.11
# Set the working directory
WORKDIR /app
# Copy the server script
COPY server.py .
# Expose the port the server will run on
EXPOSE 8080
# Command to run the server
CMD ["python3", "-u", "server.py"]

View File

@@ -0,0 +1,18 @@
<project name="openSUSE:Leap:16.0:PullRequest">
<title>Leap 16.0 PullRequest area</title>
<description>Base project to define the pull request builds</description>
<person userid="autogits_obs_staging_bot" role="maintainer"/>
<person userid="maxlin_factory" role="maintainer"/>
<group groupid="maintenance-opensuse.org" role="maintainer"/>
<debuginfo>
<enable/>
</debuginfo>
<repository name="standard">
<path project="openSUSE:Leap:16.0" repository="standard"/>
<arch>x86_64</arch>
<arch>i586</arch>
<arch>aarch64</arch>
<arch>ppc64le</arch>
<arch>s390x</arch>
</repository>
</project>

View File

@@ -0,0 +1,59 @@
<project name="openSUSE:Leap:16.0">
<title>openSUSE Leap 16.0 based on SLFO</title>
<description>Leap 16.0 based on SLES 16.0 (specifically SLFO:1.2)</description>
<link project="openSUSE:Backports:SLE-16.0"/>
<scmsync>http://gitea-test:3000/myproducts/mySLFO#staging-main</scmsync>
<person userid="dimstar_suse" role="maintainer"/>
<person userid="lkocman-factory" role="maintainer"/>
<person userid="maxlin_factory" role="maintainer"/>
<person userid="factory-auto" role="reviewer"/>
<person userid="licensedigger" role="reviewer"/>
<group groupid="autobuild-team" role="maintainer"/>
<group groupid="factory-maintainers" role="maintainer"/>
<group groupid="maintenance-opensuse.org" role="maintainer"/>
<group groupid="factory-staging" role="reviewer"/>
<build>
<disable repository="ports"/>
</build>
<debuginfo>
<enable/>
</debuginfo>
<repository name="standard" rebuild="local">
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
<path project="SUSE:SLFO:1.2" repository="standard"/>
<arch>local</arch>
<arch>i586</arch>
<arch>x86_64</arch>
<arch>aarch64</arch>
<arch>ppc64le</arch>
<arch>s390x</arch>
</repository>
<repository name="product">
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="product" trigger="manual"/>
<path project="openSUSE:Leap:16.0:NonFree" repository="standard"/>
<path project="openSUSE:Leap:16.0" repository="images"/>
<path project="openSUSE:Leap:16.0" repository="standard"/>
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
<path project="SUSE:SLFO:1.2" repository="standard"/>
<arch>local</arch>
<arch>i586</arch>
<arch>x86_64</arch>
<arch>aarch64</arch>
<arch>ppc64le</arch>
<arch>s390x</arch>
</repository>
<repository name="ports">
<arch>armv7l</arch>
</repository>
<repository name="images">
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="images" trigger="manual"/>
<path project="openSUSE:Leap:16.0" repository="standard"/>
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
<path project="SUSE:SLFO:1.2" repository="standard"/>
<arch>i586</arch>
<arch>x86_64</arch>
<arch>aarch64</arch>
<arch>ppc64le</arch>
<arch>s390x</arch>
</repository>
</project>

View File

@@ -0,0 +1,140 @@
import http.server
import socketserver
import os
import logging
import signal
import sys
import threading
import fnmatch
PORT = 8080
RESPONSE_DIR = "/app/responses"
STATE_DIR = "/tmp/mock_obs_state"
class MockOBSHandler(http.server.SimpleHTTPRequestHandler):
def do_GET(self):
logging.info(f"GET request for: {self.path}")
path_without_query = self.path.split('?')[0]
# Check for state stored by a PUT request first
sanitized_put_path = 'PUT' + path_without_query.replace('/', '_')
state_file_path = os.path.join(STATE_DIR, sanitized_put_path)
if os.path.exists(state_file_path):
logging.info(f"Found stored PUT state for {self.path} at {state_file_path}")
self.send_response(200)
self.send_header("Content-type", "application/xml")
file_size = os.path.getsize(state_file_path)
self.send_header("Content-Length", str(file_size))
self.end_headers()
with open(state_file_path, 'rb') as f:
self.wfile.write(f.read())
return
# If no PUT state file, fall back to the glob/exact match logic
self.handle_request('GET')
def do_PUT(self):
logging.info(f"PUT request for: {self.path}")
logging.info(f"Headers: {self.headers}")
path_without_query = self.path.split('?')[0]
body = b''
if self.headers.get('Transfer-Encoding', '').lower() == 'chunked':
logging.info("Chunked transfer encoding detected")
while True:
line = self.rfile.readline().strip()
if not line:
break
chunk_length = int(line, 16)
if chunk_length == 0:
self.rfile.readline()
break
body += self.rfile.read(chunk_length)
self.rfile.read(2) # Read the trailing CRLF
else:
content_length = int(self.headers.get('Content-Length', 0))
body = self.rfile.read(content_length)
logging.info(f"Body: {body.decode('utf-8')}")
sanitized_path = 'PUT' + path_without_query.replace('/', '_')
state_file_path = os.path.join(STATE_DIR, sanitized_path)
logging.info(f"Saving state for {self.path} to {state_file_path}")
os.makedirs(os.path.dirname(state_file_path), exist_ok=True)
with open(state_file_path, 'wb') as f:
f.write(body)
self.send_response(200)
self.send_header("Content-type", "text/plain")
response_body = b"OK"
self.send_header("Content-Length", str(len(response_body)))
self.end_headers()
self.wfile.write(response_body)
def do_POST(self):
logging.info(f"POST request for: {self.path}")
self.handle_request('POST')
def do_DELETE(self):
logging.info(f"DELETE request for: {self.path}")
self.handle_request('DELETE')
def handle_request(self, method):
path_without_query = self.path.split('?')[0]
sanitized_request_path = method + path_without_query.replace('/', '_')
logging.info(f"Handling request, looking for match for: {sanitized_request_path}")
response_file = None
# Check for glob match first
if os.path.exists(RESPONSE_DIR):
for filename in os.listdir(RESPONSE_DIR):
if fnmatch.fnmatch(sanitized_request_path, filename):
response_file = os.path.join(RESPONSE_DIR, filename)
logging.info(f"Found matching response file (glob): {response_file}")
break
# Fallback to exact match if no glob match
if response_file is None:
exact_file = os.path.join(RESPONSE_DIR, sanitized_request_path)
if os.path.exists(exact_file):
response_file = exact_file
logging.info(f"Found matching response file (exact): {response_file}")
if response_file:
logging.info(f"Serving content from {response_file}")
self.send_response(200)
self.send_header("Content-type", "application/xml")
file_size = os.path.getsize(response_file)
self.send_header("Content-Length", str(file_size))
self.end_headers()
with open(response_file, 'rb') as f:
self.wfile.write(f.read())
else:
logging.info(f"Response file not found for {sanitized_request_path}. Sending 404.")
self.send_response(404)
self.send_header("Content-type", "text/plain")
body = f"Mock response not found for {sanitized_request_path}".encode('utf-8')
self.send_header("Content-Length", str(len(body)))
self.end_headers()
self.wfile.write(body)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
if not os.path.exists(STATE_DIR):
logging.info(f"Creating state directory: {STATE_DIR}")
os.makedirs(STATE_DIR)
if not os.path.exists(RESPONSE_DIR):
os.makedirs(RESPONSE_DIR)
with socketserver.TCPServer(("", PORT), MockOBSHandler) as httpd:
logging.info(f"Serving mock OBS API on port {PORT}")
def graceful_shutdown(sig, frame):
logging.info("Received SIGTERM, shutting down gracefully...")
threading.Thread(target=httpd.shutdown).start()
signal.signal(signal.SIGTERM, graceful_shutdown)
httpd.serve_forever()
logging.info("Server has shut down.")

View File

@@ -1,64 +0,0 @@
# Podman-Compose Services Architecture
This document describes the services defined in `podman-compose.yml` used for integration testing.
## Network
- **gitea-network**: A bridge network that enables communication between all services.
## Services
### gitea
- **Description**: Self-hosted Git service, serving as the central hub for repositories.
- **Container Name**: `gitea-test`
- **Image**: Built from `./gitea/Dockerfile`
- **Ports**: `3000` (HTTP), `3022` (SSH)
- **Volumes**: `./gitea-data` (persistent data), `./gitea-logs` (logs)
- **Healthcheck**: Monitors the Gitea API version endpoint.
### rabbitmq
- **Description**: Message broker for asynchronous communication between services.
- **Container Name**: `rabbitmq-test`
- **Image**: `rabbitmq:3.13.7-management`
- **Ports**: `5671` (AMQP with TLS), `15672` (Management UI)
- **Volumes**: `./rabbitmq-data`, `./rabbitmq-config/certs`, `./rabbitmq-config/rabbitmq.conf`, `./rabbitmq-config/definitions.json`
- **Healthcheck**: Ensures the broker is running and ready to accept connections.
### gitea-publisher
- **Description**: Publishes events from Gitea webhooks to the RabbitMQ message queue.
- **Container Name**: `gitea-publisher`
- **Dependencies**: `gitea` (started), `rabbitmq` (healthy)
- **Topic Domain**: `suse`
### workflow-pr
- **Description**: Manages pull request workflows, synchronizing between ProjectGit and PackageGit.
- **Container Name**: `workflow-pr`
- **Dependencies**: `gitea` (started), `rabbitmq` (healthy)
- **Environment**: Configured via `AUTOGITS_*` variables.
- **Volumes**: `./gitea-data` (read-only), `./workflow-pr/workflow-pr.json` (config), `./workflow-pr-repos` (working directories)
### tester
- **Description**: The dedicated test runner container. It hosts the `pytest` suite and provides a mock OBS API using `pytest-httpserver`.
- **Container Name**: `tester`
- **Image**: Built from `./Dockerfile.tester`
- **Mock API**: Listens on port `8080` within the container network to simulate OBS.
- **Volumes**: Project root mounted at `/opt/project` for source access.
### obs-staging-bot
- **Description**: Interacts with Gitea and the OBS API (mocked by `tester`) to manage staging projects.
- **Container Name**: `obs-staging-bot`
- **Dependencies**: `gitea` (started), `tester` (started)
- **Environment**:
- `AUTOGITS_STAGING_BOT_POLL_INTERVAL`: Set to `2s` for fast integration testing.
- **Mock Integration**: Points to `http://tester:8080` for both OBS API and Web hosts.
---
## Testing Workflow
1. **Build**: `make build` (root) then `make build` (integration).
2. **Up**: `make up` starts all services.
3. **Wait**: `make wait_healthy` ensures infrastructure is ready.
4. **Test**: `make pytest` runs the suite inside the `tester` container.
5. **Down**: `make down` stops and removes containers.
Use `make test` to perform steps 1-4 automatically.

View File

@@ -0,0 +1,77 @@
+-------------------------------------------------------------------------------------------------+
| Podman-Compose Services Diagram |
+-------------------------------------------------------------------------------------------------+
| |
| [Network] |
| gitea-network (Bridge network for inter-service communication) |
| |
|-------------------------------------------------------------------------------------------------|
| |
| [Service: gitea] |
| Description: Self-hosted Git service, central hub for repositories and code management. |
| Container Name: gitea-test |
| Image: Built from ./gitea Dockerfile |
| Ports: 3000 (HTTP), 3022 (SSH) |
| Volumes: ./gitea-data (for persistent data), ./gitea-logs (for logs) |
| Network: gitea-network |
| |
|-------------------------------------------------------------------------------------------------|
| |
| [Service: rabbitmq] |
| Description: Message broker for asynchronous communication between services. |
| Container Name: rabbitmq-test |
| Image: rabbitmq:3.13.7-management |
| Ports: 5671 (AMQP), 15672 (Management UI) |
| Volumes: ./rabbitmq-data (for persistent data), ./rabbitmq-config/certs (TLS certs), |
| ./rabbitmq-config/rabbitmq.conf (config), ./rabbitmq-config/definitions.json (exchanges)|
| Healthcheck: Ensures RabbitMQ is running and healthy. |
| Network: gitea-network |
| |
|-------------------------------------------------------------------------------------------------|
| |
| [Service: gitea-publisher] |
| Description: Publishes events from Gitea to the RabbitMQ message queue. |
| Container Name: gitea-publisher |
| Image: Built from ../gitea-events-rabbitmq-publisher/Dockerfile (local/package) |
| Dependencies: gitea (started), rabbitmq (healthy) |
| Environment: RABBITMQ_HOST, RABBITMQ_USERNAME, RABBITMQ_PASSWORD, SSL_CERT_FILE |
| Command: Listens for Gitea events, publishes to 'suse' topic, debug enabled. |
| Network: gitea-network |
| |
|-------------------------------------------------------------------------------------------------|
| |
| [Service: workflow-pr] |
| Description: Manages pull request workflows, likely consuming events from RabbitMQ and |
| interacting with Gitea. |
| Container Name: workflow-pr |
| Image: Built from ../workflow-pr/Dockerfile (local/package) |
| Dependencies: gitea (started), rabbitmq (healthy) |
| Environment: AMQP_USERNAME, AMQP_PASSWORD, SSL_CERT_FILE |
| Volumes: ./gitea-data (read-only), ./workflow-pr/workflow-pr.json (config), |
| ./workflow-pr-repos (for repositories) |
| Command: Configures Gitea/RabbitMQ URLs, enables debug, manages repositories. |
| Network: gitea-network |
| |
|-------------------------------------------------------------------------------------------------|
| |
| [Service: mock-obs] |
| Description: A mock (simulated) service for the Open Build Service (OBS) for testing. |
| Container Name: mock-obs |
| Image: Built from ./mock-obs Dockerfile |
| Ports: 8080 |
| Volumes: ./mock-obs/responses (for mock API responses) |
| Network: gitea-network |
| |
|-------------------------------------------------------------------------------------------------|
| |
| [Service: obs-staging-bot] |
| Description: A bot that interacts with Gitea and the mock OBS, likely for staging processes. |
| Container Name: obs-staging-bot |
| Image: Built from ../obs-staging-bot/Dockerfile (local/package) |
| Dependencies: gitea (started), mock-obs (started) |
| Environment: OBS_USER, OBS_PASSWORD |
| Volumes: ./gitea-data (read-only) |
| Command: Configures Gitea/OBS URLs, enables debug. |
| Network: gitea-network |
| |
+-------------------------------------------------------------------------------------------------+

View File

@@ -29,6 +29,11 @@ services:
image: rabbitmq:3.13.7-management
container_name: rabbitmq-test
init: true
healthcheck:
test: ["CMD", "rabbitmq-diagnostics", "check_running", "-q"]
interval: 30s
timeout: 30s
retries: 3
networks:
- gitea-network
ports:
@@ -99,21 +104,17 @@ services:
]
restart: unless-stopped
tester:
build:
context: .
dockerfile: Dockerfile.tester
container_name: tester
mock-obs:
build: ./mock-obs
container_name: mock-obs
init: true
dns_search: .
networks:
- gitea-network
environment:
- PYTEST_HTTPSERVER_HOST=0.0.0.0
- PYTEST_HTTPSERVER_PORT=8080
ports:
- "8080:8080"
volumes:
- ..:/opt/project:z
command: sleep infinity
- ./mock-obs/responses:/app/responses:z # Use :z for shared SELinux label
restart: unless-stopped
obs-staging-bot:
build:
@@ -126,17 +127,16 @@ services:
depends_on:
gitea:
condition: service_started
tester:
mock-obs:
condition: service_started
environment:
- OBS_USER=mock
- OBS_PASSWORD=mock-long-password
- AUTOGITS_STAGING_BOT_POLL_INTERVAL=2s
volumes:
- ./gitea-data:/gitea-data:ro,z
command:
- "-debug"
- "-gitea-url=http://gitea-test:3000"
- "-obs=http://tester:8080"
- "-obs-web=http://tester:8080"
- "-obs=http://mock-obs:8080"
- "-obs-web=http://mock-obs:8080"
restart: unless-stopped

View File

@@ -8,74 +8,8 @@ import time
import os
import json
import base64
import re
from tests.lib.common_test_utils import GiteaAPIClient
class ObsMockState:
def __init__(self):
self.build_results = {} # project -> (package, code)
self.project_metas = {} # project -> scmsync
self.default_build_result = None
@pytest.fixture
def obs_mock_state():
return ObsMockState()
@pytest.fixture(autouse=True)
def default_obs_handlers(httpserver, obs_mock_state):
"""
Sets up default handlers for OBS API to avoid 404s.
"""
def project_meta_handler(request):
project = request.path.split("/")[2]
scmsync = obs_mock_state.project_metas.get(project, "http://gitea-test:3000/myproducts/mySLFO.git")
return f'<project name="{project}"><scmsync>{scmsync}</scmsync></project>'
def build_result_handler(request):
project = request.path.split("/")[2]
res = obs_mock_state.build_results.get(project) or obs_mock_state.default_build_result
if not res:
return '<resultlist></resultlist>'
package_name, code = res
# We'll use a simple hardcoded XML here to avoid re-parsing template every time
# or we can use the template. For simplicity, let's use a basic one.
xml_template = f"""<resultlist state="mock">
<result project="{project}" repository="standard" arch="x86_64" code="unpublished" state="unpublished">
<scmsync>http://gitea-test:3000/myproducts/mySLFO.git?onlybuild={package_name}#sha</scmsync>
<status package="{package_name}" code="{code}"/>
</result>
</resultlist>"""
return xml_template
# Register handlers
httpserver.expect_request(re.compile(r"/source/[^/]+/_meta$"), method="GET").respond_with_handler(project_meta_handler)
httpserver.expect_request(re.compile(r"/build/[^/]+/_result"), method="GET").respond_with_handler(build_result_handler)
httpserver.expect_request(re.compile(r"/source/[^/]+/_meta$"), method="PUT").respond_with_data("OK")
httpserver.expect_request(re.compile(r"/source/[^/]+$"), method="DELETE").respond_with_data("OK")
@pytest.fixture
def mock_build_result(obs_mock_state):
"""
Fixture to set up mock build results.
"""
def _setup_mock(package_name: str, code: str, project: str = None):
if project:
obs_mock_state.build_results[project] = (package_name, code)
else:
# If no project specified, we can't easily know which one to set
# but usually it's the one the bot will request.
# We'll use a special key to signify "all" or we can just wait for the request.
# For now, let's assume we want to match openSUSE:Leap:16.0:PullRequest:*
# The test will call it with specific project if needed.
# In test_pr_workflow, it doesn't know the PR number yet.
# So we'll make the handler fallback to this if project not found.
obs_mock_state.default_build_result = (package_name, code)
return _setup_mock
BRANCH_CONFIG_COMMON = {
"workflow.config": {
"Workflows": ["pr"],
@@ -229,8 +163,8 @@ def gitea_env():
"""
Global fixture to set up the Gitea environment for all tests.
"""
gitea_url = "http://gitea-test:3000"
admin_token_path = os.path.join(os.path.dirname(__file__), "..", "gitea-data", "admin.token")
gitea_url = "http://127.0.0.1:3000"
admin_token_path = "./gitea-data/admin.token"
admin_token = None
try:
@@ -321,6 +255,10 @@ def gitea_env():
# Setup users (using configs from this branch)
setup_users_from_config(client, merged_configs.get("workflow.config", {}), merged_configs.get("_maintainership.json", {}))
if restart_needed:
client.restart_service("workflow-pr")
time.sleep(2) # Give it time to pick up changes
print("--- Gitea Global Setup Complete ---")
yield client

View File

@@ -7,6 +7,42 @@ import re
import xml.etree.ElementTree as ET
from pathlib import Path
import base64
import subprocess
TEST_DATA_DIR = Path(__file__).parent.parent / "data"
BUILD_RESULT_TEMPLATE = TEST_DATA_DIR / "build_result.xml.template"
MOCK_RESPONSES_DIR = Path(__file__).parent.parent.parent / "mock-obs" / "responses"
MOCK_BUILD_RESULT_FILE = (
MOCK_RESPONSES_DIR / "GET_build_openSUSE:Leap:16.0:PullRequest:*__result"
)
MOCK_BUILD_RESULT_FILE1 = MOCK_RESPONSES_DIR / "GET_build_openSUSE:Leap:16.0__result"
@pytest.fixture
def mock_build_result():
"""
Fixture to create a mock build result file from the template.
Returns a factory function that the test can call with parameters.
"""
def _create_result_file(package_name: str, code: str):
tree = ET.parse(BUILD_RESULT_TEMPLATE)
root = tree.getroot()
for status_tag in root.findall(".//status"):
status_tag.set("package", package_name)
status_tag.set("code", code)
MOCK_RESPONSES_DIR.mkdir(exist_ok=True)
tree.write(MOCK_BUILD_RESULT_FILE)
tree.write(MOCK_BUILD_RESULT_FILE1)
return str(MOCK_BUILD_RESULT_FILE)
yield _create_result_file
if MOCK_BUILD_RESULT_FILE.exists():
MOCK_BUILD_RESULT_FILE.unlink()
MOCK_BUILD_RESULT_FILE1.unlink()
class GiteaAPIClient:
def __init__(self, base_url, token, sudo=None):
@@ -81,6 +117,18 @@ class GiteaAPIClient:
print(f"Organization '{org_name}' created.")
else:
raise
print(f"--- Checking organization: {org_name} ---")
try:
self._request("GET", f"orgs/{org_name}")
print(f"Organization '{org_name}' already exists.")
except requests.exceptions.HTTPError as e:
if e.response.status_code == 404:
print(f"Creating organization '{org_name}'...")
data = {"username": org_name, "full_name": org_name}
self._request("POST", "orgs", json=data)
print(f"Organization '{org_name}' created.")
else:
raise
def create_repo(self, org_name, repo_name):
print(f"--- Checking repository: {org_name}/{repo_name} ---")
@@ -299,6 +347,8 @@ index 0000000..{pkg_b_sha}
raise
raise Exception(f"Timeout waiting for branch {branch} in {owner}/{repo}")
def modify_gitea_pr(self, repo_full_name: str, pr_number: int, diff_content: str, message: str):
owner, repo = repo_full_name.split("/")
@@ -453,6 +503,16 @@ index 0000000..{pkg_b_sha}
time.sleep(1) # give a chance to avoid possible concurrency issues with reviews request/approval
reviewer_client.create_review(repo_full_name, pr_number, event="APPROVED", body="Approving requested review")
def restart_service(self, service_name: str):
print(f"--- Restarting service: {service_name} ---")
try:
# Assumes podman-compose.yml is in the parent directory of tests/lib
subprocess.run(["podman-compose", "restart", service_name], check=True, cwd=os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
print(f"Service {service_name} restarted successfully.")
except subprocess.CalledProcessError as e:
print(f"Error restarting service {service_name}: {e}")
raise
def wait_for_project_pr(self, package_pr_repo, package_pr_number, project_pr_repo="myproducts/mySLFO", timeout=60):
print(f"Polling {package_pr_repo} PR #{package_pr_number} timeline for forwarded PR event in {project_pr_repo}...")
for _ in range(timeout):
@@ -494,3 +554,4 @@ index 0000000..{pkg_b_sha}
time.sleep(1)
return package_merged, project_merged

View File

@@ -1,9 +1,12 @@
import pytest
import re
import time
import subprocess
import requests
from pathlib import Path
from tests.lib.common_test_utils import (
GiteaAPIClient,
mock_build_result,
)
# =============================================================================
@@ -18,6 +21,8 @@ def test_pr_workflow_succeeded(staging_main_env, mock_build_result):
pr = gitea_env.create_gitea_pr("mypool/pkgA", diff, "Test PR - should succeed", False, base_branch=merge_branch_name)
initial_pr_number = pr["number"]
compose_dir = Path(__file__).parent.parent
forwarded_pr_number = gitea_env.wait_for_project_pr("mypool/pkgA", initial_pr_number)
assert (
forwarded_pr_number is not None
@@ -38,10 +43,17 @@ def test_pr_workflow_succeeded(staging_main_env, mock_build_result):
assert reviewer_added, "Staging bot was not added as a reviewer."
print("Staging bot has been added as a reviewer.")
mock_build_result(package_name="pkgA", code="succeeded")
mock_build_result(package_name="pkgA", code="succeeded")
print("Restarting obs-staging-bot...")
subprocess.run(
["podman-compose", "restart", "obs-staging-bot"],
cwd=compose_dir,
check=True,
capture_output=True,
)
print(f"Polling myproducts/mySLFO PR #{forwarded_pr_number} for final status...")
status_comment_found = False
for _ in range(20):
time.sleep(1)
@@ -63,6 +75,8 @@ def test_pr_workflow_failed(staging_main_env, mock_build_result):
pr = gitea_env.create_gitea_pr("mypool/pkgA", diff, "Test PR - should fail", False, base_branch=merge_branch_name)
initial_pr_number = pr["number"]
compose_dir = Path(__file__).parent.parent
forwarded_pr_number = gitea_env.wait_for_project_pr("mypool/pkgA", initial_pr_number)
assert (
forwarded_pr_number is not None
@@ -85,6 +99,14 @@ def test_pr_workflow_failed(staging_main_env, mock_build_result):
mock_build_result(package_name="pkgA", code="failed")
print("Restarting obs-staging-bot...")
subprocess.run(
["podman-compose", "restart", "obs-staging-bot"],
cwd=compose_dir,
check=True,
capture_output=True,
)
print(f"Polling myproducts/mySLFO PR #{forwarded_pr_number} for final status...")
status_comment_found = False
for _ in range(20):

View File

@@ -137,108 +137,6 @@ index 0000000..e69de29
assert project_merged, f"Project PR myproducts/mySLFO#{project_pr_number} was not merged after 'merge ok'."
print("Both PRs merged successfully after 'merge ok'.")
@pytest.mark.t003
def test_003_refuse_manual_merge(manual_merge_env, test_user_client, ownerB_client, staging_bot_client):
"""
Test scenario TC-MERGE-003:
1. Create a PackageGit PR with ManualMergeOnly set to true.
2. Ensure all mandatory reviews are completed on both project and package PRs.
3. Comment "merge ok" on the package PR from the account of a not requested reviewer.
4. Verify the PR is not merged.
"""
gitea_env, test_full_repo_name, merge_branch_name = manual_merge_env
# 1. Create a package PR
diff = """diff --git a/manual_merge_test.txt b/manual_merge_test.txt
new file mode 100644
index 0000000..e69de29
"""
print(f"--- Creating package PR in mypool/pkgA on branch {merge_branch_name} ---")
package_pr = test_user_client.create_gitea_pr("mypool/pkgA", diff, "Test Manual Merge Fixture", False, base_branch=merge_branch_name)
package_pr_number = package_pr["number"]
print(f"Created package PR mypool/pkgA#{package_pr_number}")
# 2. Make sure the workflow-pr service created related project PR
project_pr_number = gitea_env.wait_for_project_pr("mypool/pkgA", package_pr_number)
assert project_pr_number is not None, "Workflow bot did not create a project PR."
print(f"Found project PR: myproducts/mySLFO#{project_pr_number}")
# 3. Approve reviews and verify NOT merged
print("Waiting for all expected review requests and approving them...")
# Expected reviewers based on manual-merge branch config and pkgA maintainership
expected_reviewers = {"usera", "userb", "ownerA", "ownerX", "ownerY"}
# ManualMergeOnly still requires regular reviews to be satisfied.
# We poll until all expected reviewers are requested, then approve them.
all_requested = False
for _ in range(30):
# Trigger approvals for whatever is already requested
gitea_env.approve_requested_reviews("mypool/pkgA", package_pr_number)
gitea_env.approve_requested_reviews("myproducts/mySLFO", project_pr_number)
# Explicitly handle staging bot if it is requested or pending
prj_reviews = gitea_env.list_reviews("myproducts/mySLFO", project_pr_number)
if any(r["user"]["login"] == "autogits_obs_staging_bot" and r["state"] in ["REQUEST_REVIEW", "PENDING"] for r in prj_reviews):
print("Staging bot has a pending/requested review. Approving...")
staging_bot_client.create_review("myproducts/mySLFO", project_pr_number, event="APPROVED", body="Staging bot approves")
# Check if all expected reviewers have at least one review record (any state)
pkg_reviews = gitea_env.list_reviews("mypool/pkgA", package_pr_number)
current_reviewers = {r["user"]["login"] for r in pkg_reviews}
if expected_reviewers.issubset(current_reviewers):
# Also ensure they are all approved (not just requested)
approved_reviewers = {r["user"]["login"] for r in pkg_reviews if r["state"] == "APPROVED"}
if expected_reviewers.issubset(approved_reviewers):
# And check project PR for bot approval
prj_approved = any(r["user"]["login"] == "autogits_obs_staging_bot" and r["state"] == "APPROVED" for r in prj_reviews)
if prj_approved:
all_requested = True
print(f"All expected reviewers {expected_reviewers} and staging bot have approved.")
break
pkg_details = gitea_env.get_pr_details("mypool/pkgA", package_pr_number)
prj_details = gitea_env.get_pr_details("myproducts/mySLFO", project_pr_number)
assert not pkg_details.get("merged"), "Package PR merged prematurely (ManualMergeOnly ignored?)"
assert not prj_details.get("merged"), "Project PR merged prematurely (ManualMergeOnly ignored?)"
time.sleep(2)
assert all_requested, f"Timed out waiting for all expected reviewers {expected_reviewers} to approve. Current: {current_reviewers}"
print("Both PRs have all required approvals but are not merged (as expected with ManualMergeOnly).")
# 4. Comment "merge ok" from a requested reviewer (ownerB)
print("Commenting 'merge ok' on package PR as user ownerB ...")
ownerB_client.create_issue_comment("mypool/pkgA", package_pr_number, "merge ok")
# 5. Verify both PRs are merged
print("Polling for PR merge status...")
package_merged = False
project_merged = False
for i in range(20): # Poll for up to 20 seconds
if not package_merged:
pkg_details = gitea_env.get_pr_details("mypool/pkgA", package_pr_number)
if pkg_details.get("merged"):
package_merged = True
print(f"Package PR mypool/pkgA#{package_pr_number} merged.")
if not project_merged:
prj_details = gitea_env.get_pr_details("myproducts/mySLFO", project_pr_number)
if prj_details.get("merged"):
project_merged = True
print(f"Project PR myproducts/mySLFO#{project_pr_number} merged.")
if package_merged and project_merged:
break
time.sleep(1)
assert not package_merged, f"Package PR mypool/pkgA#{package_pr_number} was merged after 'merge ok'."
assert not project_merged, f"Project PR myproducts/mySLFO#{project_pr_number} was merged after 'merge ok'."
print("Both PRs merged not after 'merge ok'.")
@pytest.mark.t008
def test_008_merge_mode_ff_only_success(merge_ff_env, test_user_client):
"""

View File

@@ -139,7 +139,6 @@ index 0000000..e69de29
@pytest.mark.t005
# @pytest.mark.xfail(reason="TBD troubleshoot")
def test_005_any_maintainer_approval_sufficient(maintainer_env, ownerA_client, ownerBB_client):
"""
Test scenario:
@@ -201,7 +200,6 @@ index 0000000..e69de29
@pytest.mark.t006
@pytest.mark.xfail(reason="tbd flacky in ci")
def test_006_maintainer_rejection_removes_other_requests(maintainer_env, ownerA_client, ownerBB_client):
"""
Test scenario:

View File

@@ -1171,7 +1171,6 @@ var IsDryRun bool
var ProcessPROnly string
var ObsClient common.ObsClientInterface
var BotUser string
var PollInterval = 5 * time.Minute
func ObsWebHostFromApiHost(apihost string) string {
u, err := url.Parse(apihost)
@@ -1194,18 +1193,9 @@ func main() {
flag.StringVar(&ObsApiHost, "obs", "", "API for OBS instance")
flag.StringVar(&ObsWebHost, "obs-web", "", "Web OBS instance, if not derived from the obs config")
flag.BoolVar(&IsDryRun, "dry", false, "Dry-run, don't actually create any build projects or review changes")
pollIntervalStr := flag.String("poll-interval", common.GetEnvOverrideString(os.Getenv("AUTOGITS_STAGING_BOT_POLL_INTERVAL"), ""), "Polling interval for notifications (e.g. 5m, 10s)")
debug := flag.Bool("debug", false, "Turns on debug logging")
flag.Parse()
if len(*pollIntervalStr) > 0 {
if d, err := time.ParseDuration(*pollIntervalStr); err == nil {
PollInterval = d
} else {
common.LogError("Invalid poll interval:", err)
}
}
if *debug {
common.SetLoggingLevel(common.LogLevelDebug)
} else {
@@ -1274,6 +1264,6 @@ func main() {
for {
PollWorkNotifications(ObsClient, gitea)
common.LogInfo("Poll cycle finished")
time.Sleep(PollInterval)
time.Sleep(5 * time.Minute)
}
}

View File

@@ -54,7 +54,6 @@ This is the ProjectGit config file. For runtime config file, see bottom.
| *GitProjectName* | Repository and branch where the ProjectGit lives. | no | string | **Format**: `org/project_repo#branch` | By default assumes `_ObsPrj` with default branch in the *Organization* |
| *ManualMergeOnly* | Merges are permitted only upon receiving a "merge ok" comment from designated maintainers in the PkgGit PR. | no | bool | true, false | false |
| *ManualMergeProject* | Merges are permitted only upon receiving a "merge ok" comment in the ProjectGit PR from project maintainers. | no | bool | true, false | false |
| *MergeMode* | Type of package merge accepted. See below for details. | no | string | ff-only, replace, devel | ff-only |
| *ReviewRequired* | If submitter is a maintainer, require review from another maintainer if available. | no | bool | true, false | false |
| *NoProjectGitPR* | Do not create PrjGit PR, but still perform other tasks. | no | bool | true, false | false |
| *Reviewers* | PrjGit reviewers. Additional review requests are triggered for associated PkgGit PRs. PrjGit PR is merged only when all reviews are complete. | no | array of strings | | `[]` |
@@ -118,6 +117,8 @@ The following labels are used, when defined in Repo/Org.
| Label Config Entry | Default label | Description
|--------------------|----------------|----------------------------------------
| StagingAuto | staging/Auto | Assigned to Project Git PRs when first staged
| ReviewPending | review/Pending | Assigned to Project Git PR when package reviews are still pending
| ReviewDone | review/Done | Assigned to Project Git PR when reviews are complete on all package PRs
Maintainership