Compare commits
161 Commits
wip_manual
...
mergemodes
| Author | SHA256 | Date | |
|---|---|---|---|
| db70452cbc | |||
| 53eebb75f7 | |||
| 9f9a4660e9 | |||
| cb2f17a287 | |||
| 3125df4d6a | |||
| 06600813b4 | |||
| 3b510182d6 | |||
|
|
d1bcc222ce | ||
|
|
b632952f62 | ||
|
|
1b90299d94
|
||
| 708add1017 | |||
| 712349d638 | |||
| ba5a42dd29 | |||
| 53cf2c8bad | |||
| 868c28cd5a | |||
| 962c4b2562 | |||
| 57cb251dbc | |||
| 75c4fada50 | |||
| 7d13e586ac | |||
| 7729b845b0 | |||
| c662b2fdbf | |||
|
|
4cedb37da4 | ||
|
|
fe519628c8 | ||
|
|
ff18828692 | ||
| 6337ef7e50 | |||
| e9992d2e99 | |||
| aac218fc6d | |||
| 139f40fce3 | |||
| c44d34fdbe | |||
| 23be3df1fb | |||
| 68b67c6975 | |||
| 478a3a140a | |||
| df4da87bfd | |||
| b19d301d95 | |||
| 9532aa897c | |||
| f942909ac7 | |||
| 7f98298b89 | |||
| c6ee055cb4 | |||
| 58e5547a91 | |||
| c2709e1894 | |||
| 7790e5f301 | |||
| 2620aa3ddd | |||
| 59a47cd542 | |||
| a0c51657d4 | |||
| f0b053ca07 | |||
| 844ec8a87b | |||
| 6ee8fcc597 | |||
| 1220799e57 | |||
| 86a176a785 | |||
| bb9e9a08e5 | |||
| edd8c67fc9 | |||
| 877e93c9bf | |||
| 51403713be | |||
| cc69a9348c | |||
| 5b5bb9a5bc | |||
|
|
2f39fc9836 | ||
| f959684540 | |||
| 18f7ed658a | |||
| c05fa236d1 | |||
| c866303696 | |||
| e806d6ad0d | |||
| abf8aa58fc | |||
| 4f132ec154 | |||
| 86a7fd072e | |||
| 5f5e7d98b5 | |||
| e8738c9585 | |||
| 2f18adaa67 | |||
| b7f5c97de1 | |||
| 09001ce01b | |||
| 37c9cc7a57 | |||
| 362e481a09 | |||
| 38f4c44fd0 | |||
| 605d3dee06 | |||
| 6f26bcdccc | |||
| fffdf4fad3 | |||
| f6d2239f4d | |||
| 913fb7c046 | |||
| 79318dc169 | |||
| 377ed1c37f | |||
| 51b0487b29 | |||
| 49e32c0ab1 | |||
| 01e4f5f59e | |||
| 19d9fc5f1e | |||
| c4e184140a | |||
| 56c492ccdf | |||
| 3a6009a5a3 | |||
| 2c4d25a5eb | |||
| 052ab37412 | |||
| 925f546272 | |||
| 71fd32a707 | |||
| 581131bdc8 | |||
| 495ed349ea | |||
| 350a255d6e | |||
| e3087e46c2 | |||
| ae6b638df6 | |||
| 2c73cc683a | |||
| 32adfb1111 | |||
| fe8fcbae96 | |||
| 5756f7ceea | |||
| 2be0f808d2 | |||
| 7a0f651eaf | |||
| 2e47104b17 | |||
| 76bfa612c5 | |||
| 71aa0813ad | |||
| cc675c1b24 | |||
| 44e4941120 | |||
| 86acfa6871 | |||
| 7f09b2d2d3 | |||
| f3a37f1158 | |||
| 9d6db86318 | |||
| e11993c81f | |||
| 4bd259a2a0 | |||
| 162ae11cdd | |||
| 8431b47322 | |||
| 3ed5ecc3f0 | |||
| d08ab3efd6 | |||
| a4f6628e52 | |||
| 25073dd619 | |||
| 4293181b4e | |||
| 551a4ef577 | |||
| 6afb18fc58 | |||
| f310220261 | |||
| ef7c0c1cea | |||
| 27230fa03b | |||
| c52d40b760 | |||
| d3ba579a8b | |||
| 9ef8209622 | |||
| ba66dd868e | |||
| 17755fa2b5 | |||
| f94d3a8942 | |||
| 20e1109602 | |||
| c25d3be44e | |||
| 8db558891a | |||
| 0e06ba5993 | |||
| 736769d630 | |||
| 93c970d0dd | |||
| 5544a65947 | |||
| 918723d57b | |||
| a418b48809 | |||
|
55846562c1
|
|||
|
95c7770cad
|
|||
|
1b900e3202
|
|||
|
d083acfd1c
|
|||
|
244160e20e
|
|||
| ed2847a2c6 | |||
| 1457caa64b | |||
| b9a38c1724 | |||
| 74edad5d3e | |||
|
|
e5cad365ee
|
||
|
|
53851ba10f
|
||
|
|
056e5208c8
|
||
|
|
af142fdb15
|
||
|
|
5ce92beb52
|
||
|
|
ae379ec408
|
||
| 458837b007 | |||
| a3feab6f7e | |||
| fa647ab2d8 | |||
| 19902813b5 | |||
| 23a7f310c5 | |||
| 58d1f2de91 | |||
| d623844411 |
33
.gitea/workflows/go-generate-check.yaml
Normal file
33
.gitea/workflows/go-generate-check.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
name: go-generate-check
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths:
|
||||
- '**.go'
|
||||
- '**.mod'
|
||||
- '**.sum'
|
||||
pull_request:
|
||||
paths:
|
||||
- '**.go'
|
||||
- '**.mod'
|
||||
- '**.sum'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
go-generate-check:
|
||||
name: go-generate-check
|
||||
container:
|
||||
image: registry.opensuse.org/devel/factory/git-workflow/containers/opensuse/bci/golang-extended:latest
|
||||
steps:
|
||||
- run: git clone --no-checkout --depth 1 ${{ gitea.server_url }}/${{ gitea.repository }} .
|
||||
- run: git fetch origin ${{ gitea.ref }}
|
||||
- run: git checkout FETCH_HEAD
|
||||
- run: go generate -C common
|
||||
- run: go generate -C workflow-pr
|
||||
- run: git add -N .; git diff
|
||||
- run: |
|
||||
status=$(git status --short)
|
||||
if [[ -n "$status" ]]; then
|
||||
echo -e "$status"
|
||||
echo "Please commit the differences from running: go generate"
|
||||
false
|
||||
fi
|
||||
24
.gitea/workflows/go-generate-push.yaml
Normal file
24
.gitea/workflows/go-generate-push.yaml
Normal file
@@ -0,0 +1,24 @@
|
||||
name: go-generate-push
|
||||
on:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
go-generate-push:
|
||||
name: go-generate-push
|
||||
container:
|
||||
image: registry.opensuse.org/devel/factory/git-workflow/containers/opensuse/bci/golang-extended:latest
|
||||
steps:
|
||||
- run: git clone --no-checkout --depth 1 ${{ gitea.server_url }}/${{ gitea.repository }} .
|
||||
- run: git fetch origin ${{ gitea.ref }}
|
||||
- run: git checkout FETCH_HEAD
|
||||
- run: go generate -C common
|
||||
- run: go generate -C workflow-pr
|
||||
- run: |
|
||||
host=${{ gitea.server_url }}
|
||||
host=${host#https://}
|
||||
echo $host
|
||||
git remote set-url origin "https://x-access-token:${{ secrets.GITEA_TOKEN }}@$host/${{ gitea.repository }}"
|
||||
git config user.name "Gitea Actions"
|
||||
git config user.email "gitea_noreply@opensuse.org"
|
||||
- run: 'git status --short; git status --porcelain=2|grep --quiet -v . || ( git add .;git commit -m "CI run result of: go generate"; git push origin HEAD:${{ gitea.ref }} )'
|
||||
- run: git log -p FETCH_HEAD...HEAD
|
||||
- run: git log --numstat FETCH_HEAD...HEAD
|
||||
33
.gitea/workflows/go-vendor-check.yaml
Normal file
33
.gitea/workflows/go-vendor-check.yaml
Normal file
@@ -0,0 +1,33 @@
|
||||
name: go-vendor-check
|
||||
on:
|
||||
push:
|
||||
branches: ['main']
|
||||
paths:
|
||||
- '**.mod'
|
||||
- '**.sum'
|
||||
pull_request:
|
||||
paths:
|
||||
- '**.mod'
|
||||
- '**.sum'
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
go-generate-check:
|
||||
name: go-vendor-check
|
||||
container:
|
||||
image: registry.opensuse.org/devel/factory/git-workflow/containers/opensuse/bci/golang-extended:latest
|
||||
steps:
|
||||
- run: git clone --no-checkout --depth 1 ${{ gitea.server_url }}/${{ gitea.repository }} .
|
||||
- run: git fetch origin ${{ gitea.ref }}
|
||||
- run: git checkout FETCH_HEAD
|
||||
- run: go mod download
|
||||
- run: go mod vendor
|
||||
- run: go mod verify
|
||||
- run: git add -N .; git diff
|
||||
- run: go mod tidy -diff || true
|
||||
- run: |
|
||||
status=$(git status --short)
|
||||
if [[ -n "$status" ]]; then
|
||||
echo -e "$status"
|
||||
echo "Please commit the differences from running: go generate"
|
||||
false
|
||||
fi
|
||||
26
.gitea/workflows/go-vendor-push.yaml
Normal file
26
.gitea/workflows/go-vendor-push.yaml
Normal file
@@ -0,0 +1,26 @@
|
||||
name: go-generate-push
|
||||
on:
|
||||
workflow_dispatch:
|
||||
jobs:
|
||||
go-generate-push:
|
||||
name: go-generate-push
|
||||
container:
|
||||
image: registry.opensuse.org/devel/factory/git-workflow/containers/opensuse/bci/golang-extended:latest
|
||||
steps:
|
||||
- run: git clone --no-checkout --depth 1 ${{ gitea.server_url }}/${{ gitea.repository }} .
|
||||
- run: git fetch origin ${{ gitea.ref }}
|
||||
- run: git checkout FETCH_HEAD
|
||||
- run: go mod download
|
||||
- run: go mod vendor
|
||||
- run: go mod verify
|
||||
- run: |
|
||||
host=${{ gitea.server_url }}
|
||||
host=${host#https://}
|
||||
echo $host
|
||||
git remote set-url origin "https://x-access-token:${{ secrets.GITEA_TOKEN }}@$host/${{ gitea.repository }}"
|
||||
git config user.name "Gitea Actions"
|
||||
git config user.email "gitea_noreply@opensuse.org"
|
||||
- run: 'git status --short; git status --porcelain=2|grep --quiet -v . || ( git add .;git commit -m "CI run result of: go mod vendor"; git push origin HEAD:${{ gitea.ref }} )'
|
||||
- run: go mod tidy -diff || true
|
||||
- run: git log -p FETCH_HEAD...HEAD
|
||||
- run: git log --numstat FETCH_HEAD...HEAD
|
||||
9
.gitignore
vendored
9
.gitignore
vendored
@@ -1,6 +1,7 @@
|
||||
mock
|
||||
node_modules
|
||||
*.obscpio
|
||||
autogits-tmp.tar.zst
|
||||
*.osc
|
||||
*.conf
|
||||
/integration/gitea-data
|
||||
/integration/gitea-logs
|
||||
/integration/rabbitmq-data
|
||||
/integration/workflow-pr-repos
|
||||
__pycache__/
|
||||
|
||||
4
Makefile
Normal file
4
Makefile
Normal file
@@ -0,0 +1,4 @@
|
||||
MODULES := devel-importer utils/hujson utils/maintainer-update gitea-events-rabbitmq-publisher gitea_status_proxy group-review obs-forward-bot obs-staging-bot obs-status-service workflow-direct workflow-pr
|
||||
|
||||
build:
|
||||
for m in $(MODULES); do go build -C $$m -buildmode=pie || exit 1 ; done
|
||||
12
README.md
12
README.md
@@ -23,18 +23,18 @@ The bots that drive Git Workflow for package management
|
||||
Bugs
|
||||
----
|
||||
|
||||
Report bugs to issue tracker at https://src.opensuse.org/adamm/autogits
|
||||
Report bugs to issue tracker at https://src.opensuse.org/git-workflow/autogits
|
||||
|
||||
|
||||
Build Status
|
||||
------------
|
||||
|
||||
main branch build status:
|
||||
|
||||

|
||||
|
||||
Devel project build status:
|
||||
Devel project build status (`main` branch):
|
||||
|
||||

|
||||
|
||||
`staging` branch build status:
|
||||
|
||||

|
||||
|
||||
|
||||
|
||||
15
_service
15
_service
@@ -1,15 +0,0 @@
|
||||
<services>
|
||||
<!-- workaround, go_modules needs a tar and obs_scm doesn't take file://. -->
|
||||
<service name="roast" mode="manual">
|
||||
<param name="target">.</param>
|
||||
<param name="reproducible">true</param>
|
||||
<param name="outfile">autogits-tmp.tar.zst</param>
|
||||
<param name="exclude">autogits-tmp.tar.zst</param>
|
||||
</service>
|
||||
<service name="go_modules" mode="manual">
|
||||
<param name="basename">./</param>
|
||||
<param name="compression">zst</param>
|
||||
<param name="vendorname">vendor</param>
|
||||
</service>
|
||||
</services>
|
||||
|
||||
146
autogits.spec
146
autogits.spec
@@ -17,11 +17,12 @@
|
||||
|
||||
|
||||
Name: autogits
|
||||
Version: 0
|
||||
Version: 1
|
||||
Release: 0
|
||||
Summary: GitWorkflow utilities
|
||||
License: GPL-2.0-or-later
|
||||
URL: https://src.opensuse.org/adamm/autogits
|
||||
BuildRequires: git
|
||||
BuildRequires: systemd-rpm-macros
|
||||
BuildRequires: go
|
||||
%{?systemd_ordering}
|
||||
@@ -30,13 +31,21 @@ BuildRequires: go
|
||||
Git Workflow tooling and utilities enabling automated handing of OBS projects
|
||||
as git repositories
|
||||
|
||||
%package utils
|
||||
Summary: HuJSON to JSON parser
|
||||
Provides: hujson
|
||||
Provides: /usr/bin/hujson
|
||||
|
||||
%description utils
|
||||
HuJSON to JSON parser, using stdin -> stdout pipe
|
||||
%package devel-importer
|
||||
Summary: Imports devel projects from obs to git
|
||||
|
||||
%description -n autogits-devel-importer
|
||||
Command-line tool to import devel projects from obs to git
|
||||
|
||||
|
||||
%package doc
|
||||
Summary: Common documentation files
|
||||
BuildArch: noarch
|
||||
|
||||
%description -n autogits-doc
|
||||
Common documentation files
|
||||
|
||||
|
||||
%package gitea-events-rabbitmq-publisher
|
||||
Summary: Publishes Gitea webhook data via RabbitMQ
|
||||
@@ -47,12 +56,12 @@ with a topic
|
||||
<scope>.src.$organization.$webhook_type.[$webhook_action_type]
|
||||
|
||||
|
||||
%package doc
|
||||
Summary: Common documentation files
|
||||
|
||||
%description doc
|
||||
Common documentation files
|
||||
%package gitea-status-proxy
|
||||
Summary: Proxy for setting commit status in Gitea
|
||||
|
||||
%description gitea-status-proxy
|
||||
Setting commit status requires code write access token. This proxy
|
||||
is middleware that delegates status setting without access to other APIs
|
||||
|
||||
%package group-review
|
||||
Summary: Reviews of groups defined in ProjectGit
|
||||
@@ -62,6 +71,12 @@ Is used to handle reviews associated with groups defined in the
|
||||
ProjectGit.
|
||||
|
||||
|
||||
%package obs-forward-bot
|
||||
Summary: obs-forward-bot
|
||||
|
||||
%description obs-forward-bot
|
||||
|
||||
|
||||
%package obs-staging-bot
|
||||
Summary: Build a PR against a ProjectGit, if review is requested
|
||||
|
||||
@@ -76,8 +91,19 @@ Summary: Reports build status of OBS service as an easily to produce SVG
|
||||
Reports build status of OBS service as an easily to produce SVG
|
||||
|
||||
|
||||
%package utils
|
||||
Summary: HuJSON to JSON parser
|
||||
Provides: hujson
|
||||
Provides: /usr/bin/hujson
|
||||
|
||||
%description utils
|
||||
HuJSON to JSON parser, using stdin -> stdout pipe
|
||||
|
||||
|
||||
%package workflow-direct
|
||||
Summary: Keep ProjectGit in sync for a devel project
|
||||
Requires: openssh-clients
|
||||
Requires: git-core
|
||||
|
||||
%description workflow-direct
|
||||
Keep ProjectGit in sync with packages in the organization of a devel project
|
||||
@@ -85,6 +111,8 @@ Keep ProjectGit in sync with packages in the organization of a devel project
|
||||
|
||||
%package workflow-pr
|
||||
Summary: Keeps ProjectGit PR in-sync with a PackageGit PR
|
||||
Requires: openssh-clients
|
||||
Requires: git-core
|
||||
|
||||
%description workflow-pr
|
||||
Keeps ProjectGit PR in-sync with a PackageGit PR
|
||||
@@ -95,15 +123,27 @@ Keeps ProjectGit PR in-sync with a PackageGit PR
|
||||
cp -r /home/abuild/rpmbuild/SOURCES/* ./
|
||||
|
||||
%build
|
||||
go build \
|
||||
-C devel-importer \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C utils/hujson \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C utils/maintainer-update \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C gitea-events-rabbitmq-publisher \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C gitea_status_proxy \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C group-review \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C obs-forward-bot \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C obs-staging-bot \
|
||||
-buildmode=pie
|
||||
@@ -117,17 +157,34 @@ go build \
|
||||
-C workflow-pr \
|
||||
-buildmode=pie
|
||||
|
||||
%check
|
||||
go test -C common -v
|
||||
go test -C group-review -v
|
||||
go test -C obs-staging-bot -v
|
||||
go test -C obs-status-service -v
|
||||
go test -C workflow-direct -v
|
||||
go test -C utils/maintainer-update
|
||||
# TODO build fails
|
||||
#go test -C workflow-pr -v
|
||||
|
||||
%install
|
||||
install -D -m0755 devel-importer/devel-importer %{buildroot}%{_bindir}/devel-importer
|
||||
install -D -m0755 gitea-events-rabbitmq-publisher/gitea-events-rabbitmq-publisher %{buildroot}%{_bindir}/gitea-events-rabbitmq-publisher
|
||||
install -D -m0644 systemd/gitea-events-rabbitmq-publisher.service %{buildroot}%{_unitdir}/gitea-events-rabbitmq-publisher.service
|
||||
install -D -m0755 gitea_status_proxy/gitea_status_proxy %{buildroot}%{_bindir}/gitea_status_proxy
|
||||
install -D -m0755 group-review/group-review %{buildroot}%{_bindir}/group-review
|
||||
install -D -m0644 systemd/group-review@.service %{buildroot}%{_unitdir}/group-review@.service
|
||||
install -D -m0755 obs-forward-bot/obs-forward-bot %{buildroot}%{_bindir}/obs-forward-bot
|
||||
install -D -m0755 obs-staging-bot/obs-staging-bot %{buildroot}%{_bindir}/obs-staging-bot
|
||||
install -D -m0644 systemd/obs-staging-bot.service %{buildroot}%{_unitdir}/obs-staging-bot.service
|
||||
install -D -m0755 obs-status-service/obs-status-service %{buildroot}%{_bindir}/obs-status-service
|
||||
install -D -m0644 systemd/obs-status-service.service %{buildroot}%{_unitdir}/obs-status-service.service
|
||||
install -D -m0755 workflow-direct/workflow-direct %{buildroot}%{_bindir}/workflow-direct
|
||||
install -D -m0644 systemd/workflow-direct@.service %{buildroot}%{_unitdir}/workflow-direct@.service
|
||||
install -D -m0755 workflow-pr/workflow-pr %{buildroot}%{_bindir}/workflow-pr
|
||||
install -D -m0644 systemd/workflow-pr@.service %{buildroot}%{_unitdir}/workflow-pr@.service
|
||||
install -D -m0755 utils/hujson/hujson %{buildroot}%{_bindir}/hujson
|
||||
install -D -m0755 utils/maintainer-update/maintainer-update %{buildroot}%{_bindir}/maintainer-update
|
||||
|
||||
%pre gitea-events-rabbitmq-publisher
|
||||
%service_add_pre gitea-events-rabbitmq-publisher.service
|
||||
@@ -141,6 +198,18 @@ install -D -m0755 utils/hujson/hujson
|
||||
%postun gitea-events-rabbitmq-publisher
|
||||
%service_del_postun gitea-events-rabbitmq-publisher.service
|
||||
|
||||
%pre group-review
|
||||
%service_add_pre group-review@.service
|
||||
|
||||
%post group-review
|
||||
%service_add_post group-review@.service
|
||||
|
||||
%preun group-review
|
||||
%service_del_preun group-review@.service
|
||||
|
||||
%postun group-review
|
||||
%service_del_postun group-review@.service
|
||||
|
||||
%pre obs-staging-bot
|
||||
%service_add_pre obs-staging-bot.service
|
||||
|
||||
@@ -165,25 +234,59 @@ install -D -m0755 utils/hujson/hujson
|
||||
%postun obs-status-service
|
||||
%service_del_postun obs-status-service.service
|
||||
|
||||
%files gitea-events-rabbitmq-publisher
|
||||
%pre workflow-direct
|
||||
%service_add_pre workflow-direct.service
|
||||
|
||||
%post workflow-direct
|
||||
%service_add_post workflow-direct.service
|
||||
|
||||
%preun workflow-direct
|
||||
%service_del_preun workflow-direct.service
|
||||
|
||||
%postun workflow-direct
|
||||
%service_del_postun workflow-direct.service
|
||||
|
||||
%pre workflow-pr
|
||||
%service_add_pre workflow-pr.service
|
||||
|
||||
%post workflow-pr
|
||||
%service_add_post workflow-pr.service
|
||||
|
||||
%preun workflow-pr
|
||||
%service_del_preun workflow-pr.service
|
||||
|
||||
%postun workflow-pr
|
||||
%service_del_postun workflow-pr.service
|
||||
|
||||
%files devel-importer
|
||||
%license COPYING
|
||||
%doc gitea-events-rabbitmq-publisher/README.md
|
||||
%{_bindir}/gitea-events-rabbitmq-publisher
|
||||
%{_unitdir}/gitea-events-rabbitmq-publisher.service
|
||||
%doc devel-importer/README.md
|
||||
%{_bindir}/devel-importer
|
||||
|
||||
%files doc
|
||||
%license COPYING
|
||||
%doc doc/README.md
|
||||
%doc doc/workflows.md
|
||||
|
||||
%files gitea-events-rabbitmq-publisher
|
||||
%license COPYING
|
||||
%doc gitea-events-rabbitmq-publisher/README.md
|
||||
%{_bindir}/gitea-events-rabbitmq-publisher
|
||||
%{_unitdir}/gitea-events-rabbitmq-publisher.service
|
||||
|
||||
%files gitea-status-proxy
|
||||
%license COPYING
|
||||
%{_bindir}/gitea_status_proxy
|
||||
|
||||
%files group-review
|
||||
%license COPYING
|
||||
%doc group-review/README.md
|
||||
%{_bindir}/group-review
|
||||
%{_unitdir}/group-review@.service
|
||||
|
||||
%files utils
|
||||
%files obs-forward-bot
|
||||
%license COPYING
|
||||
%{_bindir}/hujson
|
||||
%{_bindir}/obs-forward-bot
|
||||
|
||||
%files obs-staging-bot
|
||||
%license COPYING
|
||||
@@ -197,13 +300,20 @@ install -D -m0755 utils/hujson/hujson
|
||||
%{_bindir}/obs-status-service
|
||||
%{_unitdir}/obs-status-service.service
|
||||
|
||||
%files utils
|
||||
%license COPYING
|
||||
%{_bindir}/hujson
|
||||
%{_bindir}/maintainer-update
|
||||
|
||||
%files workflow-direct
|
||||
%license COPYING
|
||||
%doc workflow-direct/README.md
|
||||
%{_bindir}/workflow-direct
|
||||
%{_unitdir}/workflow-direct@.service
|
||||
|
||||
%files workflow-pr
|
||||
%license COPYING
|
||||
%doc workflow-pr/README.md
|
||||
%{_bindir}/workflow-pr
|
||||
%{_unitdir}/workflow-pr@.service
|
||||
|
||||
|
||||
@@ -39,6 +39,10 @@ const (
|
||||
|
||||
Permission_ForceMerge = "force-merge"
|
||||
Permission_Group = "release-engineering"
|
||||
|
||||
MergeModeFF = "ff-only"
|
||||
MergeModeReplace = "replace"
|
||||
MergeModeDevel = "devel"
|
||||
)
|
||||
|
||||
type ConfigFile struct {
|
||||
@@ -52,8 +56,10 @@ type ReviewGroup struct {
|
||||
}
|
||||
|
||||
type QAConfig struct {
|
||||
Name string
|
||||
Origin string
|
||||
Name string
|
||||
Origin string
|
||||
Label string // requires this gitea lable to be set or skipped
|
||||
BuildDisableRepos []string // which repos to build disable in the new project
|
||||
}
|
||||
|
||||
type Permissions struct {
|
||||
@@ -61,6 +67,20 @@ type Permissions struct {
|
||||
Members []string
|
||||
}
|
||||
|
||||
const (
|
||||
Label_StagingAuto = "staging/Auto"
|
||||
Label_ReviewPending = "review/Pending"
|
||||
Label_ReviewDone = "review/Done"
|
||||
)
|
||||
|
||||
func LabelKey(tag_value string) string {
|
||||
// capitalize first letter and remove /
|
||||
if len(tag_value) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.ToUpper(tag_value[0:1]) + strings.ReplaceAll(tag_value[1:], "/", "")
|
||||
}
|
||||
|
||||
type AutogitConfig struct {
|
||||
Workflows []string // [pr, direct, test]
|
||||
Organization string
|
||||
@@ -72,9 +92,13 @@ type AutogitConfig struct {
|
||||
Committers []string // group in addition to Reviewers and Maintainers that can order the bot around, mostly as helper for factory-maintainers
|
||||
Subdirs []string // list of directories to sort submodules into. Needed b/c _manifest cannot list non-existent directories
|
||||
|
||||
Labels map[string]string // list of tags, if not default, to apply
|
||||
MergeMode string // project merge mode
|
||||
|
||||
NoProjectGitPR bool // do not automatically create project git PRs, just assign reviewers and assume somethign else creates the ProjectGit PR
|
||||
ManualMergeOnly bool // only merge with "Merge OK" comment by Project Maintainers and/or Package Maintainers and/or reviewers
|
||||
ManualMergeProject bool // require merge of ProjectGit PRs with "Merge OK" by ProjectMaintainers and/or reviewers
|
||||
ReviewRequired bool // always require a maintainer review, even if maintainer submits it. Only ignored if no other package or project reviewers
|
||||
}
|
||||
|
||||
type AutogitConfigs []*AutogitConfig
|
||||
@@ -164,6 +188,17 @@ func ReadWorkflowConfig(gitea GiteaFileContentAndRepoFetcher, git_project string
|
||||
}
|
||||
}
|
||||
config.GitProjectName = config.GitProjectName + "#" + branch
|
||||
|
||||
// verify merge modes
|
||||
switch config.MergeMode {
|
||||
case MergeModeFF, MergeModeDevel, MergeModeReplace:
|
||||
break // good results
|
||||
case "":
|
||||
config.MergeMode = MergeModeFF
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported merge mode in %s: %s", git_project, config.MergeMode)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
@@ -188,6 +223,8 @@ func (configs AutogitConfigs) GetPrjGitConfig(org, repo, branch string) *Autogit
|
||||
if c.GitProjectName == prjgit {
|
||||
return c
|
||||
}
|
||||
}
|
||||
for _, c := range configs {
|
||||
if c.Organization == org && c.Branch == branch {
|
||||
return c
|
||||
}
|
||||
@@ -273,6 +310,14 @@ func (config *AutogitConfig) GetRemoteBranch() string {
|
||||
return "origin_" + config.Branch
|
||||
}
|
||||
|
||||
func (config *AutogitConfig) Label(label string) string {
|
||||
if t, found := config.Labels[LabelKey(label)]; found {
|
||||
return t
|
||||
}
|
||||
|
||||
return label
|
||||
}
|
||||
|
||||
type StagingConfig struct {
|
||||
ObsProject string
|
||||
RebuildAll bool
|
||||
|
||||
@@ -10,6 +10,67 @@ import (
|
||||
mock_common "src.opensuse.org/autogits/common/mock"
|
||||
)
|
||||
|
||||
func TestLabelKey(t *testing.T) {
|
||||
tests := map[string]string{
|
||||
"": "",
|
||||
"foo": "Foo",
|
||||
"foo/bar": "Foobar",
|
||||
"foo/Bar": "FooBar",
|
||||
}
|
||||
|
||||
for k, v := range tests {
|
||||
if c := common.LabelKey(k); c != v {
|
||||
t.Error("expected", v, "got", c, "input", k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigLabelParser(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
json string
|
||||
label_value string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
json: "{}",
|
||||
label_value: "path/String",
|
||||
},
|
||||
{
|
||||
name: "defined",
|
||||
json: `{"Labels": {"foo": "bar", "PathString": "moo/Label"}}`,
|
||||
label_value: "moo/Label",
|
||||
},
|
||||
{
|
||||
name: "undefined",
|
||||
json: `{"Labels": {"foo": "bar", "NotPathString": "moo/Label"}}`,
|
||||
label_value: "path/String",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
repo := models.Repository{
|
||||
DefaultBranch: "master",
|
||||
}
|
||||
|
||||
ctl := gomock.NewController(t)
|
||||
gitea := mock_common.NewMockGiteaFileContentAndRepoFetcher(ctl)
|
||||
gitea.EXPECT().GetRepositoryFileContent("foo", "bar", "", "workflow.config").Return([]byte(test.json), "abc", nil)
|
||||
gitea.EXPECT().GetRepository("foo", "bar").Return(&repo, nil)
|
||||
|
||||
config, err := common.ReadWorkflowConfig(gitea, "foo/bar")
|
||||
if err != nil || config == nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if l := config.Label("path/String"); l != test.label_value {
|
||||
t.Error("Expecting", test.label_value, "got", l)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProjectConfigMatcher(t *testing.T) {
|
||||
configs := common.AutogitConfigs{
|
||||
{
|
||||
@@ -21,6 +82,15 @@ func TestProjectConfigMatcher(t *testing.T) {
|
||||
Branch: "main",
|
||||
GitProjectName: "test/prjgit#main",
|
||||
},
|
||||
{
|
||||
Organization: "test",
|
||||
Branch: "main",
|
||||
GitProjectName: "test/bar#never_match",
|
||||
},
|
||||
{
|
||||
Organization: "test",
|
||||
GitProjectName: "test/bar#main",
|
||||
},
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -50,6 +120,20 @@ func TestProjectConfigMatcher(t *testing.T) {
|
||||
branch: "main",
|
||||
config: 1,
|
||||
},
|
||||
{
|
||||
name: "prjgit only match",
|
||||
org: "test",
|
||||
repo: "bar",
|
||||
branch: "main",
|
||||
config: 3,
|
||||
},
|
||||
{
|
||||
name: "non-default branch match",
|
||||
org: "test",
|
||||
repo: "bar",
|
||||
branch: "something_main",
|
||||
config: -1,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -105,6 +189,10 @@ func TestConfigWorkflowParser(t *testing.T) {
|
||||
if config.ManualMergeOnly != false {
|
||||
t.Fatal("This should be false")
|
||||
}
|
||||
|
||||
if config.Label("foobar") != "foobar" {
|
||||
t.Fatal("undefined label should return default value")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -254,3 +342,67 @@ func TestConfigPermissions(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigMergeModeParser(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
json string
|
||||
mergeMode string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
json: "{}",
|
||||
mergeMode: common.MergeModeFF,
|
||||
},
|
||||
{
|
||||
name: "ff-only",
|
||||
json: `{"MergeMode": "ff-only"}`,
|
||||
mergeMode: common.MergeModeFF,
|
||||
},
|
||||
{
|
||||
name: "replace",
|
||||
json: `{"MergeMode": "replace"}`,
|
||||
mergeMode: common.MergeModeReplace,
|
||||
},
|
||||
{
|
||||
name: "devel",
|
||||
json: `{"MergeMode": "devel"}`,
|
||||
mergeMode: common.MergeModeDevel,
|
||||
},
|
||||
{
|
||||
name: "unsupported",
|
||||
json: `{"MergeMode": "invalid"}`,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
repo := models.Repository{
|
||||
DefaultBranch: "master",
|
||||
}
|
||||
|
||||
ctl := gomock.NewController(t)
|
||||
gitea := mock_common.NewMockGiteaFileContentAndRepoFetcher(ctl)
|
||||
gitea.EXPECT().GetRepositoryFileContent("foo", "bar", "", "workflow.config").Return([]byte(test.json), "abc", nil)
|
||||
gitea.EXPECT().GetRepository("foo", "bar").Return(&repo, nil)
|
||||
|
||||
config, err := common.ReadWorkflowConfig(gitea, "foo/bar")
|
||||
if test.wantErr {
|
||||
if err == nil {
|
||||
t.Fatal("Expected error, got nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if config.MergeMode != test.mergeMode {
|
||||
t.Errorf("Expected MergeMode %s, got %s", test.mergeMode, config.MergeMode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,10 +20,13 @@ package common
|
||||
|
||||
const (
|
||||
GiteaTokenEnv = "GITEA_TOKEN"
|
||||
GiteaHostEnv = "GITEA_HOST"
|
||||
ObsUserEnv = "OBS_USER"
|
||||
ObsPasswordEnv = "OBS_PASSWORD"
|
||||
ObsSshkeyEnv = "OBS_SSHKEY"
|
||||
ObsSshkeyFileEnv = "OBS_SSHKEYFILE"
|
||||
ObsApiEnv = "OBS_API"
|
||||
ObsWebEnv = "OBS_WEB"
|
||||
|
||||
DefaultGitPrj = "_ObsPrj"
|
||||
PrjLinksFile = "links.json"
|
||||
|
||||
@@ -277,7 +277,7 @@ func (e *GitHandlerImpl) GitClone(repo, branch, remoteUrl string) (string, error
|
||||
args = slices.Insert(args, 1, "--unshallow")
|
||||
}
|
||||
e.GitExecOrPanic(repo, args...)
|
||||
return remoteName, e.GitExec(repo, "checkout", "--track", "-B", branch, remoteRef)
|
||||
return remoteName, e.GitExec(repo, "checkout", "-f", "--track", "-B", branch, remoteRef)
|
||||
}
|
||||
|
||||
func (e *GitHandlerImpl) GitBranchHead(gitDir, branchName string) (string, error) {
|
||||
@@ -350,6 +350,10 @@ var ExtraGitParams []string
|
||||
|
||||
func (e *GitHandlerImpl) GitExecWithOutput(cwd string, params ...string) (string, error) {
|
||||
cmd := exec.Command("/usr/bin/git", params...)
|
||||
var identityFile string
|
||||
if i := os.Getenv("AUTOGITS_IDENTITY_FILE"); len(i) > 0 {
|
||||
identityFile = " -i " + i
|
||||
}
|
||||
cmd.Env = []string{
|
||||
"GIT_CEILING_DIRECTORIES=" + e.GitPath,
|
||||
"GIT_CONFIG_GLOBAL=/dev/null",
|
||||
@@ -358,7 +362,7 @@ func (e *GitHandlerImpl) GitExecWithOutput(cwd string, params ...string) (string
|
||||
"EMAIL=not@exist@src.opensuse.org",
|
||||
"GIT_LFS_SKIP_SMUDGE=1",
|
||||
"GIT_LFS_SKIP_PUSH=1",
|
||||
"GIT_SSH_COMMAND=/usr/bin/ssh -o StrictHostKeyChecking=yes",
|
||||
"GIT_SSH_COMMAND=/usr/bin/ssh -o StrictHostKeyChecking=yes" + identityFile,
|
||||
}
|
||||
if len(ExtraGitParams) > 0 {
|
||||
cmd.Env = append(cmd.Env, ExtraGitParams...)
|
||||
|
||||
@@ -392,6 +392,7 @@ func TestCommitTreeParsing(t *testing.T) {
|
||||
commitId = commitId + strings.TrimSpace(string(data))
|
||||
return len(data), nil
|
||||
})
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
t.Fatal(err.Error())
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
transport "github.com/go-openapi/runtime/client"
|
||||
@@ -66,7 +67,16 @@ const (
|
||||
ReviewStateUnknown models.ReviewStateType = ""
|
||||
)
|
||||
|
||||
type GiteaLabelGetter interface {
|
||||
GetLabels(org, repo string, idx int64) ([]*models.Label, error)
|
||||
}
|
||||
|
||||
type GiteaLabelSettter interface {
|
||||
SetLabels(org, repo string, idx int64, labels []string) ([]*models.Label, error)
|
||||
}
|
||||
|
||||
type GiteaTimelineFetcher interface {
|
||||
ResetTimelineCache(org, repo string, idx int64)
|
||||
GetTimeline(org, repo string, idx int64) ([]*models.TimelineComment, error)
|
||||
}
|
||||
|
||||
@@ -91,9 +101,10 @@ type GiteaPRUpdater interface {
|
||||
UpdatePullRequest(org, repo string, num int64, options *models.EditPullRequestOption) (*models.PullRequest, error)
|
||||
}
|
||||
|
||||
type GiteaPRTimelineFetcher interface {
|
||||
type GiteaPRTimelineReviewFetcher interface {
|
||||
GiteaPRFetcher
|
||||
GiteaTimelineFetcher
|
||||
GiteaReviewFetcher
|
||||
}
|
||||
|
||||
type GiteaCommitFetcher interface {
|
||||
@@ -119,10 +130,16 @@ type GiteaPRChecker interface {
|
||||
GiteaMaintainershipReader
|
||||
}
|
||||
|
||||
type GiteaReviewFetcherAndRequester interface {
|
||||
type GiteaReviewFetcherAndRequesterAndUnrequester interface {
|
||||
GiteaReviewTimelineFetcher
|
||||
GiteaCommentFetcher
|
||||
GiteaReviewRequester
|
||||
GiteaReviewUnrequester
|
||||
}
|
||||
|
||||
type GiteaUnreviewTimelineFetcher interface {
|
||||
GiteaTimelineFetcher
|
||||
GiteaReviewUnrequester
|
||||
}
|
||||
|
||||
type GiteaReviewRequester interface {
|
||||
@@ -182,6 +199,8 @@ type Gitea interface {
|
||||
GiteaCommitStatusGetter
|
||||
GiteaCommitStatusSetter
|
||||
GiteaSetRepoOptions
|
||||
GiteaLabelGetter
|
||||
GiteaLabelSettter
|
||||
|
||||
GetNotifications(Type string, since *time.Time) ([]*models.NotificationThread, error)
|
||||
GetDoneNotifications(Type string, page int64) ([]*models.NotificationThread, error)
|
||||
@@ -189,7 +208,7 @@ type Gitea interface {
|
||||
GetOrganization(orgName string) (*models.Organization, error)
|
||||
GetOrganizationRepositories(orgName string) ([]*models.Repository, error)
|
||||
CreateRepositoryIfNotExist(git Git, org, repoName string) (*models.Repository, error)
|
||||
CreatePullRequestIfNotExist(repo *models.Repository, srcId, targetId, title, body string) (*models.PullRequest, error)
|
||||
CreatePullRequestIfNotExist(repo *models.Repository, srcId, targetId, title, body string) (*models.PullRequest, error, bool)
|
||||
GetPullRequestFileContent(pr *models.PullRequest, path string) ([]byte, string, error)
|
||||
GetRecentPullRequests(org, repo, branch string) ([]*models.PullRequest, error)
|
||||
GetRecentCommits(org, repo, branch string, commitNo int64) ([]*models.Commit, error)
|
||||
@@ -466,6 +485,30 @@ func (gitea *GiteaTransport) SetRepoOptions(owner, repo string, manual_merge boo
|
||||
return ok.Payload, err
|
||||
}
|
||||
|
||||
func (gitea *GiteaTransport) GetLabels(owner, repo string, idx int64) ([]*models.Label, error) {
|
||||
ret, err := gitea.client.Issue.IssueGetLabels(issue.NewIssueGetLabelsParams().WithOwner(owner).WithRepo(repo).WithIndex(idx), gitea.transport.DefaultAuthentication)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ret.Payload, err
|
||||
}
|
||||
|
||||
func (gitea *GiteaTransport) SetLabels(owner, repo string, idx int64, labels []string) ([]*models.Label, error) {
|
||||
interfaceLabels := make([]interface{}, len(labels))
|
||||
for i, l := range labels {
|
||||
interfaceLabels[i] = l
|
||||
}
|
||||
|
||||
ret, err := gitea.client.Issue.IssueAddLabel(issue.NewIssueAddLabelParams().WithOwner(owner).WithRepo(repo).WithIndex(idx).WithBody(&models.IssueLabelsOption{Labels: interfaceLabels}),
|
||||
gitea.transport.DefaultAuthentication)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ret.Payload, nil
|
||||
}
|
||||
|
||||
const (
|
||||
GiteaNotificationType_Pull = "Pull"
|
||||
)
|
||||
@@ -643,7 +686,7 @@ func (gitea *GiteaTransport) CreateRepositoryIfNotExist(git Git, org, repoName s
|
||||
return repo.Payload, nil
|
||||
}
|
||||
|
||||
func (gitea *GiteaTransport) CreatePullRequestIfNotExist(repo *models.Repository, srcId, targetId, title, body string) (*models.PullRequest, error) {
|
||||
func (gitea *GiteaTransport) CreatePullRequestIfNotExist(repo *models.Repository, srcId, targetId, title, body string) (*models.PullRequest, error, bool) {
|
||||
prOptions := models.CreatePullRequestOption{
|
||||
Base: targetId,
|
||||
Head: srcId,
|
||||
@@ -659,7 +702,7 @@ func (gitea *GiteaTransport) CreatePullRequestIfNotExist(repo *models.Repository
|
||||
WithHead(srcId),
|
||||
gitea.transport.DefaultAuthentication,
|
||||
); err == nil && pr.Payload.State == "open" {
|
||||
return pr.Payload, nil
|
||||
return pr.Payload, nil, false
|
||||
}
|
||||
|
||||
pr, err := gitea.client.Repository.RepoCreatePullRequest(
|
||||
@@ -673,10 +716,10 @@ func (gitea *GiteaTransport) CreatePullRequestIfNotExist(repo *models.Repository
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Cannot create pull request. %w", err)
|
||||
return nil, fmt.Errorf("Cannot create pull request. %w", err), true
|
||||
}
|
||||
|
||||
return pr.GetPayload(), nil
|
||||
return pr.GetPayload(), nil, true
|
||||
}
|
||||
|
||||
func (gitea *GiteaTransport) RequestReviews(pr *models.PullRequest, reviewers ...string) ([]*models.PullReview, error) {
|
||||
@@ -763,45 +806,91 @@ func (gitea *GiteaTransport) AddComment(pr *models.PullRequest, comment string)
|
||||
return nil
|
||||
}
|
||||
|
||||
type TimelineCacheData struct {
|
||||
data []*models.TimelineComment
|
||||
lastCheck time.Time
|
||||
}
|
||||
|
||||
var giteaTimelineCache map[string]TimelineCacheData = make(map[string]TimelineCacheData)
|
||||
var giteaTimelineCacheMutex sync.RWMutex
|
||||
|
||||
func (gitea *GiteaTransport) ResetTimelineCache(org, repo string, idx int64) {
|
||||
giteaTimelineCacheMutex.Lock()
|
||||
defer giteaTimelineCacheMutex.Unlock()
|
||||
|
||||
prID := fmt.Sprintf("%s/%s!%d", org, repo, idx)
|
||||
Cache, IsCached := giteaTimelineCache[prID]
|
||||
if IsCached {
|
||||
Cache.lastCheck = Cache.lastCheck.Add(-time.Hour)
|
||||
giteaTimelineCache[prID] = Cache
|
||||
}
|
||||
}
|
||||
|
||||
// returns timeline in reverse chronological create order
|
||||
func (gitea *GiteaTransport) GetTimeline(org, repo string, idx int64) ([]*models.TimelineComment, error) {
|
||||
page := int64(1)
|
||||
resCount := 1
|
||||
|
||||
retData := []*models.TimelineComment{}
|
||||
prID := fmt.Sprintf("%s/%s!%d", org, repo, idx)
|
||||
giteaTimelineCacheMutex.RLock()
|
||||
TimelineCache, IsCached := giteaTimelineCache[prID]
|
||||
var LastCachedTime strfmt.DateTime
|
||||
if IsCached {
|
||||
l := len(TimelineCache.data)
|
||||
if l > 0 {
|
||||
LastCachedTime = TimelineCache.data[0].Updated
|
||||
}
|
||||
|
||||
// cache data for 5 seconds
|
||||
if TimelineCache.lastCheck.Add(time.Second*5).Compare(time.Now()) > 0 {
|
||||
giteaTimelineCacheMutex.RUnlock()
|
||||
return TimelineCache.data, nil
|
||||
}
|
||||
}
|
||||
giteaTimelineCacheMutex.RUnlock()
|
||||
|
||||
giteaTimelineCacheMutex.Lock()
|
||||
defer giteaTimelineCacheMutex.Unlock()
|
||||
|
||||
for resCount > 0 {
|
||||
res, err := gitea.client.Issue.IssueGetCommentsAndTimeline(
|
||||
issue.NewIssueGetCommentsAndTimelineParams().
|
||||
WithOwner(org).
|
||||
WithRepo(repo).
|
||||
WithIndex(idx).
|
||||
WithPage(&page),
|
||||
gitea.transport.DefaultAuthentication,
|
||||
)
|
||||
|
||||
opts := issue.NewIssueGetCommentsAndTimelineParams().WithOwner(org).WithRepo(repo).WithIndex(idx).WithPage(&page)
|
||||
if !LastCachedTime.IsZero() {
|
||||
opts = opts.WithSince(&LastCachedTime)
|
||||
}
|
||||
res, err := gitea.client.Issue.IssueGetCommentsAndTimeline(opts, gitea.transport.DefaultAuthentication)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resCount = len(res.Payload)
|
||||
LogDebug("page:", page, "len:", resCount)
|
||||
if resCount == 0 {
|
||||
if resCount = len(res.Payload); resCount == 0 {
|
||||
break
|
||||
}
|
||||
page++
|
||||
|
||||
for _, d := range res.Payload {
|
||||
if d != nil {
|
||||
retData = append(retData, d)
|
||||
if time.Time(d.Created).Compare(time.Time(LastCachedTime)) > 0 {
|
||||
// created after last check, so we append here
|
||||
TimelineCache.data = append(TimelineCache.data, d)
|
||||
} else {
|
||||
// we need something updated in the timeline, maybe
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resCount < 10 {
|
||||
break
|
||||
}
|
||||
page++
|
||||
}
|
||||
LogDebug("total results:", len(retData))
|
||||
slices.SortFunc(retData, func(a, b *models.TimelineComment) int {
|
||||
LogDebug("timeline", prID, "# timeline:", len(TimelineCache.data))
|
||||
slices.SortFunc(TimelineCache.data, func(a, b *models.TimelineComment) int {
|
||||
return time.Time(b.Created).Compare(time.Time(a.Created))
|
||||
})
|
||||
|
||||
return retData, nil
|
||||
TimelineCache.lastCheck = time.Now()
|
||||
giteaTimelineCache[prID] = TimelineCache
|
||||
|
||||
return TimelineCache.data, nil
|
||||
}
|
||||
|
||||
func (gitea *GiteaTransport) GetRepositoryFileContent(org, repo, hash, path string) ([]byte, string, error) {
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"src.opensuse.org/autogits/common/gitea-generated/client/repository"
|
||||
"src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
@@ -25,12 +27,15 @@ const ProjectFileKey = "_project"
|
||||
type MaintainershipMap struct {
|
||||
Data map[string][]string
|
||||
IsDir bool
|
||||
Config *AutogitConfig
|
||||
FetchPackage func(string) ([]byte, error)
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
func parseMaintainershipData(data []byte) (*MaintainershipMap, error) {
|
||||
func ParseMaintainershipData(data []byte) (*MaintainershipMap, error) {
|
||||
maintainers := &MaintainershipMap{
|
||||
Data: make(map[string][]string),
|
||||
Raw: data,
|
||||
}
|
||||
if err := json.Unmarshal(data, &maintainers.Data); err != nil {
|
||||
return nil, err
|
||||
@@ -39,7 +44,9 @@ func parseMaintainershipData(data []byte) (*MaintainershipMap, error) {
|
||||
return maintainers, nil
|
||||
}
|
||||
|
||||
func FetchProjectMaintainershipData(gitea GiteaMaintainershipReader, org, prjGit, branch string) (*MaintainershipMap, error) {
|
||||
func FetchProjectMaintainershipData(gitea GiteaMaintainershipReader, config *AutogitConfig) (*MaintainershipMap, error) {
|
||||
org, prjGit, branch := config.GetPrjGit()
|
||||
|
||||
data, _, err := gitea.FetchMaintainershipDirFile(org, prjGit, branch, ProjectFileKey)
|
||||
dir := true
|
||||
if err != nil || data == nil {
|
||||
@@ -59,8 +66,9 @@ func FetchProjectMaintainershipData(gitea GiteaMaintainershipReader, org, prjGit
|
||||
}
|
||||
}
|
||||
|
||||
m, err := parseMaintainershipData(data)
|
||||
m, err := ParseMaintainershipData(data)
|
||||
if m != nil {
|
||||
m.Config = config
|
||||
m.IsDir = dir
|
||||
m.FetchPackage = func(pkg string) ([]byte, error) {
|
||||
data, _, err := gitea.FetchMaintainershipDirFile(org, prjGit, branch, pkg)
|
||||
@@ -80,6 +88,8 @@ func (data *MaintainershipMap) ListProjectMaintainers(groups []*ReviewGroup) []s
|
||||
return nil
|
||||
}
|
||||
|
||||
m = slices.Clone(m)
|
||||
|
||||
// expands groups
|
||||
for _, g := range groups {
|
||||
m = g.ExpandMaintainers(m)
|
||||
@@ -116,6 +126,7 @@ func (data *MaintainershipMap) ListPackageMaintainers(pkg string, groups []*Revi
|
||||
}
|
||||
}
|
||||
}
|
||||
pkgMaintainers = slices.Clone(pkgMaintainers)
|
||||
prjMaintainers := data.ListProjectMaintainers(nil)
|
||||
|
||||
prjMaintainer:
|
||||
@@ -149,7 +160,10 @@ func (data *MaintainershipMap) IsApproved(pkg string, reviews []*models.PullRevi
|
||||
}
|
||||
|
||||
LogDebug("Looking for review by:", reviewers)
|
||||
if slices.Contains(reviewers, submitter) {
|
||||
slices.Sort(reviewers)
|
||||
reviewers = slices.Compact(reviewers)
|
||||
SubmitterIdxInReviewers := slices.Index(reviewers, submitter)
|
||||
if SubmitterIdxInReviewers > -1 && (!data.Config.ReviewRequired || len(reviewers) == 1) {
|
||||
LogDebug("Submitter is maintainer. Approving.")
|
||||
return true
|
||||
}
|
||||
@@ -164,13 +178,135 @@ func (data *MaintainershipMap) IsApproved(pkg string, reviews []*models.PullRevi
|
||||
return false
|
||||
}
|
||||
|
||||
func (data *MaintainershipMap) modifyInplace(writer io.StringWriter) error {
|
||||
var original map[string][]string
|
||||
if err := json.Unmarshal(data.Raw, &original); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(bytes.NewReader(data.Raw))
|
||||
_, err := dec.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
output := ""
|
||||
lastPos := 0
|
||||
modified := false
|
||||
|
||||
type entry struct {
|
||||
key string
|
||||
valStart int
|
||||
valEnd int
|
||||
}
|
||||
var entries []entry
|
||||
|
||||
for dec.More() {
|
||||
kToken, _ := dec.Token()
|
||||
key := kToken.(string)
|
||||
var raw json.RawMessage
|
||||
dec.Decode(&raw)
|
||||
valEnd := int(dec.InputOffset())
|
||||
valStart := valEnd - len(raw)
|
||||
entries = append(entries, entry{key, valStart, valEnd})
|
||||
}
|
||||
|
||||
changed := make(map[string]bool)
|
||||
for k, v := range data.Data {
|
||||
if ov, ok := original[k]; !ok || !slices.Equal(v, ov) {
|
||||
changed[k] = true
|
||||
}
|
||||
}
|
||||
for k := range original {
|
||||
if _, ok := data.Data[k]; !ok {
|
||||
changed[k] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(changed) == 0 {
|
||||
_, err = writer.WriteString(string(data.Raw))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
if v, ok := data.Data[e.key]; ok {
|
||||
prefix := string(data.Raw[lastPos:e.valStart])
|
||||
if modified && strings.TrimSpace(output) == "{" {
|
||||
if commaIdx := strings.Index(prefix, ","); commaIdx != -1 {
|
||||
if quoteIdx := strings.Index(prefix, "\""); quoteIdx == -1 || commaIdx < quoteIdx {
|
||||
prefix = prefix[:commaIdx] + prefix[commaIdx+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
output += prefix
|
||||
if changed[e.key] {
|
||||
slices.Sort(v)
|
||||
newVal, _ := json.Marshal(v)
|
||||
output += string(newVal)
|
||||
modified = true
|
||||
} else {
|
||||
output += string(data.Raw[e.valStart:e.valEnd])
|
||||
}
|
||||
} else {
|
||||
// Deleted
|
||||
modified = true
|
||||
}
|
||||
lastPos = e.valEnd
|
||||
}
|
||||
output += string(data.Raw[lastPos:])
|
||||
|
||||
// Handle additions (simplistic: at the end)
|
||||
for k, v := range data.Data {
|
||||
if _, ok := original[k]; !ok {
|
||||
slices.Sort(v)
|
||||
newVal, _ := json.Marshal(v)
|
||||
keyStr, _ := json.Marshal(k)
|
||||
|
||||
// Insert before closing brace
|
||||
if idx := strings.LastIndex(output, "}"); idx != -1 {
|
||||
prefix := output[:idx]
|
||||
suffix := output[idx:]
|
||||
|
||||
trimmedPrefix := strings.TrimRight(prefix, " \n\r\t")
|
||||
if !strings.HasSuffix(trimmedPrefix, "{") && !strings.HasSuffix(trimmedPrefix, ",") {
|
||||
// find the actual position of the last non-whitespace character in prefix
|
||||
lastCharIdx := strings.LastIndexAny(prefix, "]}0123456789\"")
|
||||
if lastCharIdx != -1 {
|
||||
prefix = prefix[:lastCharIdx+1] + "," + prefix[lastCharIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
insertion := fmt.Sprintf(" %s: %s", string(keyStr), string(newVal))
|
||||
if !strings.HasSuffix(prefix, "\n") {
|
||||
insertion = "\n" + insertion
|
||||
}
|
||||
output = prefix + insertion + "\n" + suffix
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if modified {
|
||||
_, err := writer.WriteString(output)
|
||||
return err
|
||||
}
|
||||
_, err = writer.WriteString(string(data.Raw))
|
||||
return err
|
||||
}
|
||||
|
||||
func (data *MaintainershipMap) WriteMaintainershipFile(writer io.StringWriter) error {
|
||||
if data.IsDir {
|
||||
return fmt.Errorf("Not implemented")
|
||||
}
|
||||
|
||||
writer.WriteString("{\n")
|
||||
if len(data.Raw) > 0 {
|
||||
if err := data.modifyInplace(writer); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to full write
|
||||
writer.WriteString("{\n")
|
||||
if d, ok := data.Data[""]; ok {
|
||||
eol := ","
|
||||
if len(data.Data) == 1 {
|
||||
@@ -181,17 +317,12 @@ func (data *MaintainershipMap) WriteMaintainershipFile(writer io.StringWriter) e
|
||||
writer.WriteString(fmt.Sprintf(" \"\": %s%s\n", string(str), eol))
|
||||
}
|
||||
|
||||
keys := make([]string, len(data.Data))
|
||||
i := 0
|
||||
keys := make([]string, 0, len(data.Data))
|
||||
for pkg := range data.Data {
|
||||
if pkg == "" {
|
||||
continue
|
||||
}
|
||||
keys[i] = pkg
|
||||
i++
|
||||
}
|
||||
if len(keys) >= i {
|
||||
keys = slices.Delete(keys, i, len(keys))
|
||||
keys = append(keys, pkg)
|
||||
}
|
||||
slices.Sort(keys)
|
||||
for i, pkg := range keys {
|
||||
|
||||
@@ -13,10 +13,10 @@ import (
|
||||
)
|
||||
|
||||
func TestMaintainership(t *testing.T) {
|
||||
config := common.AutogitConfig{
|
||||
config := &common.AutogitConfig{
|
||||
Branch: "bar",
|
||||
Organization: "foo",
|
||||
GitProjectName: common.DefaultGitPrj,
|
||||
GitProjectName: common.DefaultGitPrj + "#bar",
|
||||
}
|
||||
|
||||
packageTests := []struct {
|
||||
@@ -141,7 +141,7 @@ func TestMaintainership(t *testing.T) {
|
||||
notFoundError := repository.NewRepoGetContentsNotFound()
|
||||
for _, test := range packageTests {
|
||||
runTests := func(t *testing.T, mi common.GiteaMaintainershipReader) {
|
||||
maintainers, err := common.FetchProjectMaintainershipData(mi, config.Organization, config.GitProjectName, config.Branch)
|
||||
maintainers, err := common.FetchProjectMaintainershipData(mi, config)
|
||||
if err != nil && !test.otherError {
|
||||
if test.maintainersFileErr == nil {
|
||||
t.Fatal("Unexpected error recieved", err)
|
||||
@@ -208,6 +208,7 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
name string
|
||||
is_dir bool
|
||||
maintainers map[string][]string
|
||||
raw []byte
|
||||
expected_output string
|
||||
expected_error error
|
||||
}{
|
||||
@@ -231,6 +232,43 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
},
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical modification",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one", "two"},
|
||||
"foo": {"byte", "four", "newone"},
|
||||
"pkg1": {},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\",\"newone\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "no change",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one", "two"},
|
||||
"foo": {"byte", "four"},
|
||||
"pkg1": {},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical addition",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one"},
|
||||
"new": {"user"},
|
||||
},
|
||||
raw: []byte("{\n \"\": [ \"one\" ]\n}\n"),
|
||||
expected_output: "{\n \"\": [ \"one\" ],\n \"new\": [\"user\"]\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical deletion",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one"},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\"],\n \"old\": [\"user\"]\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\"]\n}\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -239,6 +277,7 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
data := common.MaintainershipMap{
|
||||
Data: test.maintainers,
|
||||
IsDir: test.is_dir,
|
||||
Raw: test.raw,
|
||||
}
|
||||
|
||||
if err := data.WriteMaintainershipFile(&b); err != test.expected_error {
|
||||
@@ -248,8 +287,134 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
output := b.String()
|
||||
|
||||
if test.expected_output != output {
|
||||
t.Fatal("unexpected output:", output, "Expecting:", test.expected_output)
|
||||
t.Fatalf("unexpected output:\n%q\nExpecting:\n%q", output, test.expected_output)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestReviewRequired(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
maintainers []string
|
||||
config *common.AutogitConfig
|
||||
is_approved bool
|
||||
}{
|
||||
{
|
||||
name: "ReviewRequired=false",
|
||||
maintainers: []string{"maintainer1", "maintainer2"},
|
||||
config: &common.AutogitConfig{ReviewRequired: false},
|
||||
is_approved: true,
|
||||
},
|
||||
{
|
||||
name: "ReviewRequired=true",
|
||||
maintainers: []string{"maintainer1", "maintainer2"},
|
||||
config: &common.AutogitConfig{ReviewRequired: true},
|
||||
is_approved: false,
|
||||
},
|
||||
{
|
||||
name: "ReviewRequired=true",
|
||||
maintainers: []string{"maintainer1"},
|
||||
config: &common.AutogitConfig{ReviewRequired: true},
|
||||
is_approved: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
m := &common.MaintainershipMap{
|
||||
Data: map[string][]string{"": test.maintainers},
|
||||
}
|
||||
m.Config = test.config
|
||||
if approved := m.IsApproved("", nil, "maintainer1", nil); approved != test.is_approved {
|
||||
t.Error("Expected m.IsApproved()->", test.is_approved, "but didn't get it")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaintainershipDataCorruption_PackageAppend(t *testing.T) {
|
||||
// Test corruption when append happens (merging project maintainers)
|
||||
// If backing array has capacity, append writes to it.
|
||||
|
||||
// We construct a slice with capacity > len to simulate this common scenario
|
||||
backingArray := make([]string, 1, 10)
|
||||
backingArray[0] = "@g1"
|
||||
|
||||
initialData := map[string][]string{
|
||||
"pkg": backingArray, // len 1, cap 10
|
||||
"": {"prjUser"},
|
||||
}
|
||||
|
||||
m := &common.MaintainershipMap{
|
||||
Data: initialData,
|
||||
}
|
||||
|
||||
groups := []*common.ReviewGroup{
|
||||
{
|
||||
Name: "@g1",
|
||||
Reviewers: []string{"u1"},
|
||||
},
|
||||
}
|
||||
|
||||
// ListPackageMaintainers("pkg", groups)
|
||||
// 1. gets ["@g1"] (cap 10)
|
||||
// 2. Appends "prjUser" -> ["@g1", "prjUser"] (in backing array)
|
||||
// 3. Expands "@g1" -> "u1".
|
||||
// Replace: ["u1", "prjUser"]
|
||||
// Sort: ["prjUser", "u1"]
|
||||
//
|
||||
// The backing array is now ["prjUser", "u1", ...]
|
||||
// The map entry "pkg" is still len 1.
|
||||
// So it sees ["prjUser"].
|
||||
|
||||
list1 := m.ListPackageMaintainers("pkg", groups)
|
||||
t.Logf("List1: %v", list1)
|
||||
|
||||
// ListPackageMaintainers("pkg", nil)
|
||||
// Should be ["@g1", "prjUser"] (because prjUser is appended from project maintainers)
|
||||
// But since backing array is corrupted:
|
||||
// It sees ["prjUser"] (from map) + appends "prjUser" -> ["prjUser", "prjUser"].
|
||||
|
||||
list2 := m.ListPackageMaintainers("pkg", nil)
|
||||
t.Logf("List2: %v", list2)
|
||||
|
||||
if !slices.Contains(list2, "@g1") {
|
||||
t.Errorf("Corruption: '@g1' is missing from second call. Got %v", list2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaintainershipDataCorruption_ProjectInPlace(t *testing.T) {
|
||||
// Test corruption in ListProjectMaintainers when replacement fits in place
|
||||
// e.g. replacing 1 group with 1 user.
|
||||
|
||||
initialData := map[string][]string{
|
||||
"": {"@g1"},
|
||||
}
|
||||
|
||||
m := &common.MaintainershipMap{
|
||||
Data: initialData,
|
||||
}
|
||||
|
||||
groups := []*common.ReviewGroup{
|
||||
{
|
||||
Name: "@g1",
|
||||
Reviewers: []string{"u1"},
|
||||
},
|
||||
}
|
||||
|
||||
// First call with expansion
|
||||
// Replaces "@g1" with "u1". Length stays 1. Modifies backing array in place.
|
||||
list1 := m.ListProjectMaintainers(groups)
|
||||
t.Logf("List1: %v", list1)
|
||||
|
||||
// Second call without expansion
|
||||
// Should return ["@g1"]
|
||||
list2 := m.ListProjectMaintainers(nil)
|
||||
t.Logf("List2: %v", list2)
|
||||
|
||||
if !slices.Contains(list2, "@g1") {
|
||||
t.Errorf("Corruption: '@g1' is missing from second call (Project). Got %v", list2)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
120
common/mock/config.go
Normal file
120
common/mock/config.go
Normal file
@@ -0,0 +1,120 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: config.go
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -source=config.go -destination=mock/config.go -typed
|
||||
//
|
||||
|
||||
// Package mock_common is a generated GoMock package.
|
||||
package mock_common
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
models "src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
)
|
||||
|
||||
// MockGiteaFileContentAndRepoFetcher is a mock of GiteaFileContentAndRepoFetcher interface.
|
||||
type MockGiteaFileContentAndRepoFetcher struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockGiteaFileContentAndRepoFetcherMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockGiteaFileContentAndRepoFetcherMockRecorder is the mock recorder for MockGiteaFileContentAndRepoFetcher.
|
||||
type MockGiteaFileContentAndRepoFetcherMockRecorder struct {
|
||||
mock *MockGiteaFileContentAndRepoFetcher
|
||||
}
|
||||
|
||||
// NewMockGiteaFileContentAndRepoFetcher creates a new mock instance.
|
||||
func NewMockGiteaFileContentAndRepoFetcher(ctrl *gomock.Controller) *MockGiteaFileContentAndRepoFetcher {
|
||||
mock := &MockGiteaFileContentAndRepoFetcher{ctrl: ctrl}
|
||||
mock.recorder = &MockGiteaFileContentAndRepoFetcherMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockGiteaFileContentAndRepoFetcher) EXPECT() *MockGiteaFileContentAndRepoFetcherMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// GetRepository mocks base method.
|
||||
func (m *MockGiteaFileContentAndRepoFetcher) GetRepository(org, repo string) (*models.Repository, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetRepository", org, repo)
|
||||
ret0, _ := ret[0].(*models.Repository)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// GetRepository indicates an expected call of GetRepository.
|
||||
func (mr *MockGiteaFileContentAndRepoFetcherMockRecorder) GetRepository(org, repo any) *MockGiteaFileContentAndRepoFetcherGetRepositoryCall {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRepository", reflect.TypeOf((*MockGiteaFileContentAndRepoFetcher)(nil).GetRepository), org, repo)
|
||||
return &MockGiteaFileContentAndRepoFetcherGetRepositoryCall{Call: call}
|
||||
}
|
||||
|
||||
// MockGiteaFileContentAndRepoFetcherGetRepositoryCall wrap *gomock.Call
|
||||
type MockGiteaFileContentAndRepoFetcherGetRepositoryCall struct {
|
||||
*gomock.Call
|
||||
}
|
||||
|
||||
// Return rewrite *gomock.Call.Return
|
||||
func (c *MockGiteaFileContentAndRepoFetcherGetRepositoryCall) Return(arg0 *models.Repository, arg1 error) *MockGiteaFileContentAndRepoFetcherGetRepositoryCall {
|
||||
c.Call = c.Call.Return(arg0, arg1)
|
||||
return c
|
||||
}
|
||||
|
||||
// Do rewrite *gomock.Call.Do
|
||||
func (c *MockGiteaFileContentAndRepoFetcherGetRepositoryCall) Do(f func(string, string) (*models.Repository, error)) *MockGiteaFileContentAndRepoFetcherGetRepositoryCall {
|
||||
c.Call = c.Call.Do(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// DoAndReturn rewrite *gomock.Call.DoAndReturn
|
||||
func (c *MockGiteaFileContentAndRepoFetcherGetRepositoryCall) DoAndReturn(f func(string, string) (*models.Repository, error)) *MockGiteaFileContentAndRepoFetcherGetRepositoryCall {
|
||||
c.Call = c.Call.DoAndReturn(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// GetRepositoryFileContent mocks base method.
|
||||
func (m *MockGiteaFileContentAndRepoFetcher) GetRepositoryFileContent(org, repo, hash, path string) ([]byte, string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetRepositoryFileContent", org, repo, hash, path)
|
||||
ret0, _ := ret[0].([]byte)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// GetRepositoryFileContent indicates an expected call of GetRepositoryFileContent.
|
||||
func (mr *MockGiteaFileContentAndRepoFetcherMockRecorder) GetRepositoryFileContent(org, repo, hash, path any) *MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRepositoryFileContent", reflect.TypeOf((*MockGiteaFileContentAndRepoFetcher)(nil).GetRepositoryFileContent), org, repo, hash, path)
|
||||
return &MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall{Call: call}
|
||||
}
|
||||
|
||||
// MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall wrap *gomock.Call
|
||||
type MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall struct {
|
||||
*gomock.Call
|
||||
}
|
||||
|
||||
// Return rewrite *gomock.Call.Return
|
||||
func (c *MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall) Return(arg0 []byte, arg1 string, arg2 error) *MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall {
|
||||
c.Call = c.Call.Return(arg0, arg1, arg2)
|
||||
return c
|
||||
}
|
||||
|
||||
// Do rewrite *gomock.Call.Do
|
||||
func (c *MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall) Do(f func(string, string, string, string) ([]byte, string, error)) *MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall {
|
||||
c.Call = c.Call.Do(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// DoAndReturn rewrite *gomock.Call.DoAndReturn
|
||||
func (c *MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall) DoAndReturn(f func(string, string, string, string) ([]byte, string, error)) *MockGiteaFileContentAndRepoFetcherGetRepositoryFileContentCall {
|
||||
c.Call = c.Call.DoAndReturn(f)
|
||||
return c
|
||||
}
|
||||
1148
common/mock/git_utils.go
Normal file
1148
common/mock/git_utils.go
Normal file
File diff suppressed because it is too large
Load Diff
3598
common/mock/gitea_utils.go
Normal file
3598
common/mock/gitea_utils.go
Normal file
File diff suppressed because it is too large
Load Diff
156
common/mock/maintainership.go
Normal file
156
common/mock/maintainership.go
Normal file
@@ -0,0 +1,156 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: maintainership.go
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -source=maintainership.go -destination=mock/maintainership.go -typed
|
||||
//
|
||||
|
||||
// Package mock_common is a generated GoMock package.
|
||||
package mock_common
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
common "src.opensuse.org/autogits/common"
|
||||
models "src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
)
|
||||
|
||||
// MockMaintainershipData is a mock of MaintainershipData interface.
|
||||
type MockMaintainershipData struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockMaintainershipDataMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockMaintainershipDataMockRecorder is the mock recorder for MockMaintainershipData.
|
||||
type MockMaintainershipDataMockRecorder struct {
|
||||
mock *MockMaintainershipData
|
||||
}
|
||||
|
||||
// NewMockMaintainershipData creates a new mock instance.
|
||||
func NewMockMaintainershipData(ctrl *gomock.Controller) *MockMaintainershipData {
|
||||
mock := &MockMaintainershipData{ctrl: ctrl}
|
||||
mock.recorder = &MockMaintainershipDataMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockMaintainershipData) EXPECT() *MockMaintainershipDataMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// IsApproved mocks base method.
|
||||
func (m *MockMaintainershipData) IsApproved(Pkg string, Reviews []*models.PullReview, Submitter string, ReviewGroups []*common.ReviewGroup) bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "IsApproved", Pkg, Reviews, Submitter, ReviewGroups)
|
||||
ret0, _ := ret[0].(bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// IsApproved indicates an expected call of IsApproved.
|
||||
func (mr *MockMaintainershipDataMockRecorder) IsApproved(Pkg, Reviews, Submitter, ReviewGroups any) *MockMaintainershipDataIsApprovedCall {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IsApproved", reflect.TypeOf((*MockMaintainershipData)(nil).IsApproved), Pkg, Reviews, Submitter, ReviewGroups)
|
||||
return &MockMaintainershipDataIsApprovedCall{Call: call}
|
||||
}
|
||||
|
||||
// MockMaintainershipDataIsApprovedCall wrap *gomock.Call
|
||||
type MockMaintainershipDataIsApprovedCall struct {
|
||||
*gomock.Call
|
||||
}
|
||||
|
||||
// Return rewrite *gomock.Call.Return
|
||||
func (c *MockMaintainershipDataIsApprovedCall) Return(arg0 bool) *MockMaintainershipDataIsApprovedCall {
|
||||
c.Call = c.Call.Return(arg0)
|
||||
return c
|
||||
}
|
||||
|
||||
// Do rewrite *gomock.Call.Do
|
||||
func (c *MockMaintainershipDataIsApprovedCall) Do(f func(string, []*models.PullReview, string, []*common.ReviewGroup) bool) *MockMaintainershipDataIsApprovedCall {
|
||||
c.Call = c.Call.Do(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// DoAndReturn rewrite *gomock.Call.DoAndReturn
|
||||
func (c *MockMaintainershipDataIsApprovedCall) DoAndReturn(f func(string, []*models.PullReview, string, []*common.ReviewGroup) bool) *MockMaintainershipDataIsApprovedCall {
|
||||
c.Call = c.Call.DoAndReturn(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// ListPackageMaintainers mocks base method.
|
||||
func (m *MockMaintainershipData) ListPackageMaintainers(Pkg string, OptionalGroupExpasion []*common.ReviewGroup) []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListPackageMaintainers", Pkg, OptionalGroupExpasion)
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ListPackageMaintainers indicates an expected call of ListPackageMaintainers.
|
||||
func (mr *MockMaintainershipDataMockRecorder) ListPackageMaintainers(Pkg, OptionalGroupExpasion any) *MockMaintainershipDataListPackageMaintainersCall {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListPackageMaintainers", reflect.TypeOf((*MockMaintainershipData)(nil).ListPackageMaintainers), Pkg, OptionalGroupExpasion)
|
||||
return &MockMaintainershipDataListPackageMaintainersCall{Call: call}
|
||||
}
|
||||
|
||||
// MockMaintainershipDataListPackageMaintainersCall wrap *gomock.Call
|
||||
type MockMaintainershipDataListPackageMaintainersCall struct {
|
||||
*gomock.Call
|
||||
}
|
||||
|
||||
// Return rewrite *gomock.Call.Return
|
||||
func (c *MockMaintainershipDataListPackageMaintainersCall) Return(arg0 []string) *MockMaintainershipDataListPackageMaintainersCall {
|
||||
c.Call = c.Call.Return(arg0)
|
||||
return c
|
||||
}
|
||||
|
||||
// Do rewrite *gomock.Call.Do
|
||||
func (c *MockMaintainershipDataListPackageMaintainersCall) Do(f func(string, []*common.ReviewGroup) []string) *MockMaintainershipDataListPackageMaintainersCall {
|
||||
c.Call = c.Call.Do(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// DoAndReturn rewrite *gomock.Call.DoAndReturn
|
||||
func (c *MockMaintainershipDataListPackageMaintainersCall) DoAndReturn(f func(string, []*common.ReviewGroup) []string) *MockMaintainershipDataListPackageMaintainersCall {
|
||||
c.Call = c.Call.DoAndReturn(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// ListProjectMaintainers mocks base method.
|
||||
func (m *MockMaintainershipData) ListProjectMaintainers(OptionalGroupExpansion []*common.ReviewGroup) []string {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "ListProjectMaintainers", OptionalGroupExpansion)
|
||||
ret0, _ := ret[0].([]string)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// ListProjectMaintainers indicates an expected call of ListProjectMaintainers.
|
||||
func (mr *MockMaintainershipDataMockRecorder) ListProjectMaintainers(OptionalGroupExpansion any) *MockMaintainershipDataListProjectMaintainersCall {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListProjectMaintainers", reflect.TypeOf((*MockMaintainershipData)(nil).ListProjectMaintainers), OptionalGroupExpansion)
|
||||
return &MockMaintainershipDataListProjectMaintainersCall{Call: call}
|
||||
}
|
||||
|
||||
// MockMaintainershipDataListProjectMaintainersCall wrap *gomock.Call
|
||||
type MockMaintainershipDataListProjectMaintainersCall struct {
|
||||
*gomock.Call
|
||||
}
|
||||
|
||||
// Return rewrite *gomock.Call.Return
|
||||
func (c *MockMaintainershipDataListProjectMaintainersCall) Return(arg0 []string) *MockMaintainershipDataListProjectMaintainersCall {
|
||||
c.Call = c.Call.Return(arg0)
|
||||
return c
|
||||
}
|
||||
|
||||
// Do rewrite *gomock.Call.Do
|
||||
func (c *MockMaintainershipDataListProjectMaintainersCall) Do(f func([]*common.ReviewGroup) []string) *MockMaintainershipDataListProjectMaintainersCall {
|
||||
c.Call = c.Call.Do(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// DoAndReturn rewrite *gomock.Call.DoAndReturn
|
||||
func (c *MockMaintainershipDataListProjectMaintainersCall) DoAndReturn(f func([]*common.ReviewGroup) []string) *MockMaintainershipDataListProjectMaintainersCall {
|
||||
c.Call = c.Call.DoAndReturn(f)
|
||||
return c
|
||||
}
|
||||
85
common/mock/obs_utils.go
Normal file
85
common/mock/obs_utils.go
Normal file
@@ -0,0 +1,85 @@
|
||||
// Code generated by MockGen. DO NOT EDIT.
|
||||
// Source: obs_utils.go
|
||||
//
|
||||
// Generated by this command:
|
||||
//
|
||||
// mockgen -source=obs_utils.go -destination=mock/obs_utils.go -typed
|
||||
//
|
||||
|
||||
// Package mock_common is a generated GoMock package.
|
||||
package mock_common
|
||||
|
||||
import (
|
||||
reflect "reflect"
|
||||
|
||||
gomock "go.uber.org/mock/gomock"
|
||||
common "src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
// MockObsStatusFetcherWithState is a mock of ObsStatusFetcherWithState interface.
|
||||
type MockObsStatusFetcherWithState struct {
|
||||
ctrl *gomock.Controller
|
||||
recorder *MockObsStatusFetcherWithStateMockRecorder
|
||||
isgomock struct{}
|
||||
}
|
||||
|
||||
// MockObsStatusFetcherWithStateMockRecorder is the mock recorder for MockObsStatusFetcherWithState.
|
||||
type MockObsStatusFetcherWithStateMockRecorder struct {
|
||||
mock *MockObsStatusFetcherWithState
|
||||
}
|
||||
|
||||
// NewMockObsStatusFetcherWithState creates a new mock instance.
|
||||
func NewMockObsStatusFetcherWithState(ctrl *gomock.Controller) *MockObsStatusFetcherWithState {
|
||||
mock := &MockObsStatusFetcherWithState{ctrl: ctrl}
|
||||
mock.recorder = &MockObsStatusFetcherWithStateMockRecorder{mock}
|
||||
return mock
|
||||
}
|
||||
|
||||
// EXPECT returns an object that allows the caller to indicate expected use.
|
||||
func (m *MockObsStatusFetcherWithState) EXPECT() *MockObsStatusFetcherWithStateMockRecorder {
|
||||
return m.recorder
|
||||
}
|
||||
|
||||
// BuildStatusWithState mocks base method.
|
||||
func (m *MockObsStatusFetcherWithState) BuildStatusWithState(project string, opts *common.BuildResultOptions, packages ...string) (*common.BuildResultList, error) {
|
||||
m.ctrl.T.Helper()
|
||||
varargs := []any{project, opts}
|
||||
for _, a := range packages {
|
||||
varargs = append(varargs, a)
|
||||
}
|
||||
ret := m.ctrl.Call(m, "BuildStatusWithState", varargs...)
|
||||
ret0, _ := ret[0].(*common.BuildResultList)
|
||||
ret1, _ := ret[1].(error)
|
||||
return ret0, ret1
|
||||
}
|
||||
|
||||
// BuildStatusWithState indicates an expected call of BuildStatusWithState.
|
||||
func (mr *MockObsStatusFetcherWithStateMockRecorder) BuildStatusWithState(project, opts any, packages ...any) *MockObsStatusFetcherWithStateBuildStatusWithStateCall {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
varargs := append([]any{project, opts}, packages...)
|
||||
call := mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "BuildStatusWithState", reflect.TypeOf((*MockObsStatusFetcherWithState)(nil).BuildStatusWithState), varargs...)
|
||||
return &MockObsStatusFetcherWithStateBuildStatusWithStateCall{Call: call}
|
||||
}
|
||||
|
||||
// MockObsStatusFetcherWithStateBuildStatusWithStateCall wrap *gomock.Call
|
||||
type MockObsStatusFetcherWithStateBuildStatusWithStateCall struct {
|
||||
*gomock.Call
|
||||
}
|
||||
|
||||
// Return rewrite *gomock.Call.Return
|
||||
func (c *MockObsStatusFetcherWithStateBuildStatusWithStateCall) Return(arg0 *common.BuildResultList, arg1 error) *MockObsStatusFetcherWithStateBuildStatusWithStateCall {
|
||||
c.Call = c.Call.Return(arg0, arg1)
|
||||
return c
|
||||
}
|
||||
|
||||
// Do rewrite *gomock.Call.Do
|
||||
func (c *MockObsStatusFetcherWithStateBuildStatusWithStateCall) Do(f func(string, *common.BuildResultOptions, ...string) (*common.BuildResultList, error)) *MockObsStatusFetcherWithStateBuildStatusWithStateCall {
|
||||
c.Call = c.Call.Do(f)
|
||||
return c
|
||||
}
|
||||
|
||||
// DoAndReturn rewrite *gomock.Call.DoAndReturn
|
||||
func (c *MockObsStatusFetcherWithStateBuildStatusWithStateCall) DoAndReturn(f func(string, *common.BuildResultOptions, ...string) (*common.BuildResultList, error)) *MockObsStatusFetcherWithStateBuildStatusWithStateCall {
|
||||
c.Call = c.Call.DoAndReturn(f)
|
||||
return c
|
||||
}
|
||||
@@ -116,13 +116,18 @@ type Flags struct {
|
||||
Contents string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
type ProjectLinkMeta struct {
|
||||
Project string `xml:"project,attr"`
|
||||
}
|
||||
|
||||
type ProjectMeta struct {
|
||||
XMLName xml.Name `xml:"project"`
|
||||
Name string `xml:"name,attr"`
|
||||
Title string `xml:"title"`
|
||||
Description string `xml:"description"`
|
||||
Url string `xml:"url,omitempty"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
ScmSync string `xml:"scmsync,omitempty"`
|
||||
Link []ProjectLinkMeta `xml:"link"`
|
||||
Persons []PersonRepoMeta `xml:"person"`
|
||||
Groups []GroupRepoMeta `xml:"group"`
|
||||
Repositories []RepositoryMeta `xml:"repository"`
|
||||
@@ -138,8 +143,8 @@ type ProjectMeta struct {
|
||||
type PackageMeta struct {
|
||||
XMLName xml.Name `xml:"package"`
|
||||
Name string `xml:"name,attr"`
|
||||
Project string `xml:"project,attr"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
Project string `xml:"project,attr,omitempty"`
|
||||
ScmSync string `xml:"scmsync,omitempty"`
|
||||
Persons []PersonRepoMeta `xml:"person"`
|
||||
Groups []GroupRepoMeta `xml:"group"`
|
||||
|
||||
@@ -600,15 +605,16 @@ func PackageBuildStatusComp(A, B *PackageBuildStatus) int {
|
||||
}
|
||||
|
||||
type BuildResult struct {
|
||||
XMLName xml.Name `xml:"result" json:"xml,omitempty"`
|
||||
Project string `xml:"project,attr"`
|
||||
Repository string `xml:"repository,attr"`
|
||||
Arch string `xml:"arch,attr"`
|
||||
Code string `xml:"code,attr"`
|
||||
Dirty bool `xml:"dirty,attr"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
ScmInfo string `xml:"scminfo"`
|
||||
Dirty bool `xml:"dirty,attr,omitempty"`
|
||||
ScmSync string `xml:"scmsync,omitempty"`
|
||||
ScmInfo string `xml:"scminfo,omitempty"`
|
||||
Status []*PackageBuildStatus `xml:"status"`
|
||||
Binaries []BinaryList `xml:"binarylist"`
|
||||
Binaries []BinaryList `xml:"binarylist,omitempty"`
|
||||
|
||||
LastUpdate time.Time
|
||||
}
|
||||
@@ -635,8 +641,8 @@ type BinaryList struct {
|
||||
}
|
||||
|
||||
type BuildResultList struct {
|
||||
XMLName xml.Name `xml:"resultlist"`
|
||||
State string `xml:"state,attr"`
|
||||
XMLName xml.Name `xml:"resultlist"`
|
||||
State string `xml:"state,attr"`
|
||||
Result []*BuildResult `xml:"result"`
|
||||
|
||||
isLastBuild bool
|
||||
|
||||
414
common/pr.go
414
common/pr.go
@@ -23,7 +23,8 @@ type PRSet struct {
|
||||
PRs []*PRInfo
|
||||
Config *AutogitConfig
|
||||
|
||||
BotUser string
|
||||
BotUser string
|
||||
HasAutoStaging bool
|
||||
}
|
||||
|
||||
func (prinfo *PRInfo) PRComponents() (org string, repo string, idx int64) {
|
||||
@@ -33,6 +34,41 @@ func (prinfo *PRInfo) PRComponents() (org string, repo string, idx int64) {
|
||||
return
|
||||
}
|
||||
|
||||
func (prinfo *PRInfo) RemoveReviewers(gitea GiteaUnreviewTimelineFetcher, Reviewers []string, BotUser string) {
|
||||
org, repo, idx := prinfo.PRComponents()
|
||||
tl, err := gitea.GetTimeline(org, repo, idx)
|
||||
if err != nil {
|
||||
LogError("Failed to fetch timeline for", PRtoString(prinfo.PR), err)
|
||||
}
|
||||
|
||||
// find review request for each reviewer
|
||||
ReviewersToUnrequest := Reviewers
|
||||
ReviewersAlreadyChecked := []string{}
|
||||
|
||||
for _, tlc := range tl {
|
||||
if tlc.Type == TimelineCommentType_ReviewRequested && tlc.Assignee != nil {
|
||||
user := tlc.Assignee.UserName
|
||||
|
||||
if idx := slices.Index(ReviewersToUnrequest, user); idx >= 0 && !slices.Contains(ReviewersAlreadyChecked, user) {
|
||||
if tlc.User != nil && tlc.User.UserName == BotUser {
|
||||
ReviewersAlreadyChecked = append(ReviewersAlreadyChecked, user)
|
||||
continue
|
||||
}
|
||||
ReviewersToUnrequest = slices.Delete(ReviewersToUnrequest, idx, idx+1)
|
||||
if len(Reviewers) == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
LogDebug("Unrequesting reviewes for", PRtoString(prinfo.PR), ReviewersToUnrequest)
|
||||
err = gitea.UnrequestReview(org, repo, idx, ReviewersToUnrequest...)
|
||||
if err != nil {
|
||||
LogError("Failed to unrequest reviewers for", PRtoString(prinfo.PR), err)
|
||||
}
|
||||
}
|
||||
|
||||
func readPRData(gitea GiteaPRFetcher, pr *models.PullRequest, currentSet []*PRInfo, config *AutogitConfig) ([]*PRInfo, error) {
|
||||
for _, p := range currentSet {
|
||||
if pr.Index == p.PR.Index && pr.Base.Repo.Name == p.PR.Base.Repo.Name && pr.Base.Repo.Owner.UserName == p.PR.Base.Repo.Owner.UserName {
|
||||
@@ -63,7 +99,7 @@ func readPRData(gitea GiteaPRFetcher, pr *models.PullRequest, currentSet []*PRIn
|
||||
|
||||
var Timeline_RefIssueNotFound error = errors.New("RefIssue not found on the timeline")
|
||||
|
||||
func LastPrjGitRefOnTimeline(botUser string, gitea GiteaPRTimelineFetcher, org, repo string, num int64, config *AutogitConfig) (*models.PullRequest, error) {
|
||||
func LastPrjGitRefOnTimeline(botUser string, gitea GiteaPRTimelineReviewFetcher, org, repo string, num int64, config *AutogitConfig) (*models.PullRequest, error) {
|
||||
timeline, err := gitea.GetTimeline(org, repo, num)
|
||||
if err != nil {
|
||||
LogError("Failed to fetch timeline for", org, repo, "#", num, err)
|
||||
@@ -88,14 +124,19 @@ func LastPrjGitRefOnTimeline(botUser string, gitea GiteaPRTimelineFetcher, org,
|
||||
}
|
||||
|
||||
pr, err := gitea.GetPullRequest(prjGitOrg, prjGitRepo, issue.Index)
|
||||
switch err.(type) {
|
||||
case *repository.RepoGetPullRequestNotFound: // deleted?
|
||||
continue
|
||||
default:
|
||||
LogDebug("PrjGit RefIssue fetch error from timeline", issue.Index, err)
|
||||
if err != nil {
|
||||
switch err.(type) {
|
||||
case *repository.RepoGetPullRequestNotFound: // deleted?
|
||||
continue
|
||||
default:
|
||||
LogDebug("PrjGit RefIssue fetch error from timeline", issue.Index, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if pr.Base.Ref != prjGitBranch {
|
||||
LogDebug("found ref PR on timeline:", PRtoString(pr))
|
||||
if pr.Base.Name != prjGitBranch {
|
||||
LogDebug(" -> not matching:", pr.Base.Name, prjGitBranch)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -115,10 +156,12 @@ func LastPrjGitRefOnTimeline(botUser string, gitea GiteaPRTimelineFetcher, org,
|
||||
return nil, Timeline_RefIssueNotFound
|
||||
}
|
||||
|
||||
func FetchPRSet(user string, gitea GiteaPRTimelineFetcher, org, repo string, num int64, config *AutogitConfig) (*PRSet, error) {
|
||||
func FetchPRSet(user string, gitea GiteaPRTimelineReviewFetcher, org, repo string, num int64, config *AutogitConfig) (*PRSet, error) {
|
||||
var pr *models.PullRequest
|
||||
var err error
|
||||
|
||||
gitea.ResetTimelineCache(org, repo, num)
|
||||
|
||||
prjGitOrg, prjGitRepo, _ := config.GetPrjGit()
|
||||
if prjGitOrg == org && prjGitRepo == repo {
|
||||
if pr, err = gitea.GetPullRequest(org, repo, num); err != nil {
|
||||
@@ -141,6 +184,16 @@ func FetchPRSet(user string, gitea GiteaPRTimelineFetcher, org, repo string, num
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pr := range prs {
|
||||
org, repo, idx := pr.PRComponents()
|
||||
gitea.ResetTimelineCache(org, repo, idx)
|
||||
reviews, err := FetchGiteaReviews(gitea, org, repo, idx)
|
||||
if err != nil {
|
||||
LogError("Error fetching reviews for", PRtoString(pr.PR), ":", err)
|
||||
}
|
||||
pr.Reviews = reviews
|
||||
}
|
||||
|
||||
return &PRSet{
|
||||
PRs: prs,
|
||||
Config: config,
|
||||
@@ -148,6 +201,12 @@ func FetchPRSet(user string, gitea GiteaPRTimelineFetcher, org, repo string, num
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (prset *PRSet) RemoveReviewers(gitea GiteaUnreviewTimelineFetcher, reviewers []string) {
|
||||
for _, prinfo := range prset.PRs {
|
||||
prinfo.RemoveReviewers(gitea, reviewers, prset.BotUser)
|
||||
}
|
||||
}
|
||||
|
||||
func (rs *PRSet) Find(pr *models.PullRequest) (*PRInfo, bool) {
|
||||
for _, p := range rs.PRs {
|
||||
if p.PR.Base.RepoID == pr.Base.RepoID &&
|
||||
@@ -233,67 +292,150 @@ next_rs:
|
||||
}
|
||||
|
||||
for _, pr := range prjpr_set {
|
||||
if prinfo.PR.Base.Repo.Owner.UserName == pr.Org && prinfo.PR.Base.Repo.Name == pr.Repo && prinfo.PR.Index == pr.Num {
|
||||
if strings.EqualFold(prinfo.PR.Base.Repo.Owner.UserName, pr.Org) && strings.EqualFold(prinfo.PR.Base.Repo.Name, pr.Repo) && prinfo.PR.Index == pr.Num {
|
||||
continue next_rs
|
||||
}
|
||||
}
|
||||
LogDebug(" PR: ", PRtoString(prinfo.PR), "not found in project git PRSet")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (rs *PRSet) AssignReviewers(gitea GiteaReviewFetcherAndRequester, maintainers MaintainershipData) error {
|
||||
func (rs *PRSet) FindMissingAndExtraReviewers(maintainers MaintainershipData, idx int) (missing, extra []string) {
|
||||
configReviewers := ParseReviewers(rs.Config.Reviewers)
|
||||
|
||||
for _, pr := range rs.PRs {
|
||||
reviewers := []string{}
|
||||
// remove reviewers that were already requested and are not stale
|
||||
prjMaintainers := maintainers.ListProjectMaintainers(nil)
|
||||
LogDebug("project maintainers:", prjMaintainers)
|
||||
|
||||
if rs.IsPrjGitPR(pr.PR) {
|
||||
reviewers = slices.Concat(configReviewers.Prj, configReviewers.PrjOptional)
|
||||
LogDebug("PrjGit submitter:", pr.PR.User.UserName)
|
||||
if len(rs.PRs) == 1 {
|
||||
reviewers = slices.Concat(reviewers, maintainers.ListProjectMaintainers(nil))
|
||||
}
|
||||
pr := rs.PRs[idx]
|
||||
if rs.IsPrjGitPR(pr.PR) {
|
||||
missing = slices.Concat(configReviewers.Prj, configReviewers.PrjOptional)
|
||||
if rs.HasAutoStaging {
|
||||
missing = append(missing, Bot_BuildReview)
|
||||
}
|
||||
LogDebug("PrjGit submitter:", pr.PR.User.UserName)
|
||||
// only need project maintainer reviews if:
|
||||
// * not created by a bot and has other PRs, or
|
||||
// * not created by maintainer
|
||||
noReviewPRCreators := []string{}
|
||||
if !rs.Config.ReviewRequired {
|
||||
noReviewPRCreators = prjMaintainers
|
||||
}
|
||||
if len(rs.PRs) > 1 {
|
||||
noReviewPRCreators = append(noReviewPRCreators, rs.BotUser)
|
||||
}
|
||||
if slices.Contains(noReviewPRCreators, pr.PR.User.UserName) || pr.Reviews.IsReviewedByOneOf(prjMaintainers...) {
|
||||
LogDebug("Project already reviewed by a project maintainer, remove rest")
|
||||
// do not remove reviewers if they are also maintainers
|
||||
prjMaintainers = slices.DeleteFunc(prjMaintainers, func(m string) bool { return slices.Contains(missing, m) })
|
||||
extra = slices.Concat(prjMaintainers, []string{rs.BotUser})
|
||||
} else {
|
||||
pkg := pr.PR.Base.Repo.Name
|
||||
reviewers = slices.Concat(configReviewers.Pkg, maintainers.ListProjectMaintainers(nil), maintainers.ListPackageMaintainers(pkg, nil), configReviewers.PkgOptional)
|
||||
}
|
||||
|
||||
slices.Sort(reviewers)
|
||||
reviewers = slices.Compact(reviewers)
|
||||
|
||||
// submitters do not need to review their own work
|
||||
if idx := slices.Index(reviewers, pr.PR.User.UserName); idx != -1 {
|
||||
reviewers = slices.Delete(reviewers, idx, idx+1)
|
||||
}
|
||||
|
||||
LogDebug("PR: ", pr.PR.Base.Repo.Name, pr.PR.Index)
|
||||
LogDebug("reviewers for PR:", reviewers)
|
||||
|
||||
// remove reviewers that were already requested and are not stale
|
||||
reviews, err := FetchGiteaReviews(gitea, reviewers, pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index)
|
||||
if err != nil {
|
||||
LogError("Error fetching reviews:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
for idx := 0; idx < len(reviewers); {
|
||||
user := reviewers[idx]
|
||||
if reviews.HasPendingReviewBy(user) || reviews.IsReviewedBy(user) {
|
||||
reviewers = slices.Delete(reviewers, idx, idx+1)
|
||||
LogDebug("removing reviewer:", user)
|
||||
// if bot not created PrjGit or prj maintainer, we need to add project reviewers here
|
||||
if slices.Contains(noReviewPRCreators, pr.PR.User.UserName) {
|
||||
LogDebug("No need for project maintainers")
|
||||
extra = slices.Concat(prjMaintainers, []string{rs.BotUser})
|
||||
} else {
|
||||
idx++
|
||||
LogDebug("Adding prjMaintainers to PrjGit")
|
||||
missing = append(missing, prjMaintainers...)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
pkg := pr.PR.Base.Repo.Name
|
||||
pkgMaintainers := maintainers.ListPackageMaintainers(pkg, nil)
|
||||
Maintainers := slices.Concat(prjMaintainers, pkgMaintainers)
|
||||
noReviewPkgPRCreators := []string{}
|
||||
if !rs.Config.ReviewRequired {
|
||||
noReviewPkgPRCreators = pkgMaintainers
|
||||
}
|
||||
|
||||
// get maintainers associated with the PR too
|
||||
if len(reviewers) > 0 {
|
||||
LogDebug("Requesting reviews from:", reviewers)
|
||||
LogDebug("packakge maintainers:", Maintainers)
|
||||
|
||||
missing = slices.Concat(configReviewers.Pkg, configReviewers.PkgOptional)
|
||||
if slices.Contains(noReviewPkgPRCreators, pr.PR.User.UserName) || pr.Reviews.IsReviewedByOneOf(Maintainers...) {
|
||||
// submitter is maintainer or already reviewed
|
||||
LogDebug("Package reviewed by maintainer (or subitter is maintainer), remove the rest of them")
|
||||
// do not remove reviewers if they are also maintainers
|
||||
Maintainers = slices.DeleteFunc(Maintainers, func(m string) bool { return slices.Contains(missing, m) })
|
||||
extra = slices.Concat(Maintainers, []string{rs.BotUser})
|
||||
} else {
|
||||
// maintainer review is missing
|
||||
LogDebug("Adding package maintainers to package git")
|
||||
missing = append(missing, pkgMaintainers...)
|
||||
}
|
||||
}
|
||||
|
||||
slices.Sort(missing)
|
||||
missing = slices.Compact(missing)
|
||||
|
||||
slices.Sort(extra)
|
||||
extra = slices.Compact(extra)
|
||||
|
||||
// submitters cannot review their own work
|
||||
if idx := slices.Index(missing, pr.PR.User.UserName); idx != -1 {
|
||||
missing = slices.Delete(missing, idx, idx+1)
|
||||
}
|
||||
|
||||
LogDebug("PR: ", PRtoString(pr.PR))
|
||||
LogDebug(" preliminary add reviewers for PR:", missing)
|
||||
LogDebug(" preliminary rm reviewers for PR:", extra)
|
||||
|
||||
// remove missing reviewers that are already done or already pending
|
||||
for idx := 0; idx < len(missing); {
|
||||
user := missing[idx]
|
||||
if pr.Reviews.HasPendingReviewBy(user) || pr.Reviews.IsReviewedBy(user) {
|
||||
missing = slices.Delete(missing, idx, idx+1)
|
||||
LogDebug(" removing done/pending reviewer:", user)
|
||||
} else {
|
||||
idx++
|
||||
}
|
||||
}
|
||||
|
||||
// remove extra reviews that are actually only pending, and only pending by us
|
||||
for idx := 0; idx < len(extra); {
|
||||
user := extra[idx]
|
||||
rr := pr.Reviews.FindReviewRequester(user)
|
||||
if rr != nil && rr.User.UserName == rs.BotUser && pr.Reviews.HasPendingReviewBy(user) {
|
||||
// good to remove this review
|
||||
idx++
|
||||
} else {
|
||||
// this review should not be considered as extra by us
|
||||
LogDebug(" - cannot find? to remove", user)
|
||||
if rr != nil {
|
||||
LogDebug(" ", rr.User.UserName, "vs.", rs.BotUser, pr.Reviews.HasPendingReviewBy(user))
|
||||
}
|
||||
extra = slices.Delete(extra, idx, idx+1)
|
||||
}
|
||||
}
|
||||
|
||||
LogDebug(" add reviewers for PR:", missing)
|
||||
LogDebug(" rm reviewers for PR:", extra)
|
||||
|
||||
return missing, extra
|
||||
}
|
||||
|
||||
func (rs *PRSet) AssignReviewers(gitea GiteaReviewFetcherAndRequesterAndUnrequester, maintainers MaintainershipData) error {
|
||||
for idx, pr := range rs.PRs {
|
||||
missingReviewers, extraReviewers := rs.FindMissingAndExtraReviewers(maintainers, idx)
|
||||
|
||||
if len(missingReviewers) > 0 {
|
||||
LogDebug(" Requesting reviews from:", missingReviewers)
|
||||
if !IsDryRun {
|
||||
for _, r := range reviewers {
|
||||
for _, r := range missingReviewers {
|
||||
if _, err := gitea.RequestReviews(pr.PR, r); err != nil {
|
||||
LogError("Cannot create reviews on", fmt.Sprintf("%s/%s!%d for [%s]", pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index, strings.Join(reviewers, ", ")), err)
|
||||
LogError("Cannot create reviews on", PRtoString(pr.PR), "for user:", r, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(extraReviewers) > 0 {
|
||||
LogDebug(" UnRequesting reviews from:", extraReviewers)
|
||||
if !IsDryRun {
|
||||
for _, r := range extraReviewers {
|
||||
org, repo, idx := pr.PRComponents()
|
||||
if err := gitea.UnrequestReview(org, repo, idx, r); err != nil {
|
||||
LogError("Cannot unrequest reviews on", PRtoString(pr.PR), "for user:", r, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -319,11 +461,12 @@ func (rs *PRSet) IsApproved(gitea GiteaPRChecker, maintainers MaintainershipData
|
||||
if err == nil && prjgit != nil {
|
||||
reviewers := slices.Concat(configReviewers.Prj, maintainers.ListProjectMaintainers(groups))
|
||||
LogDebug("Fetching reviews for", prjgit.PR.Base.Repo.Owner.UserName, prjgit.PR.Base.Repo.Name, prjgit.PR.Index)
|
||||
r, err := FetchGiteaReviews(gitea, reviewers, prjgit.PR.Base.Repo.Owner.UserName, prjgit.PR.Base.Repo.Name, prjgit.PR.Index)
|
||||
r, err := FetchGiteaReviews(gitea, prjgit.PR.Base.Repo.Owner.UserName, prjgit.PR.Base.Repo.Name, prjgit.PR.Index)
|
||||
if err != nil {
|
||||
LogError("Cannot fetch gita reaviews for PR:", err)
|
||||
return false
|
||||
}
|
||||
r.RequestedReviewers = reviewers
|
||||
prjgit.Reviews = r
|
||||
if prjgit.Reviews.IsManualMergeOK() {
|
||||
is_manually_reviewed_ok = true
|
||||
@@ -339,11 +482,12 @@ func (rs *PRSet) IsApproved(gitea GiteaPRChecker, maintainers MaintainershipData
|
||||
pkg := pr.PR.Base.Repo.Name
|
||||
reviewers := slices.Concat(configReviewers.Pkg, maintainers.ListPackageMaintainers(pkg, groups))
|
||||
LogDebug("Fetching reviews for", pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index)
|
||||
r, err := FetchGiteaReviews(gitea, reviewers, pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index)
|
||||
r, err := FetchGiteaReviews(gitea, pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index)
|
||||
if err != nil {
|
||||
LogError("Cannot fetch gita reaviews for PR:", err)
|
||||
return false
|
||||
}
|
||||
r.RequestedReviewers = reviewers
|
||||
pr.Reviews = r
|
||||
if !pr.Reviews.IsManualMergeOK() {
|
||||
LogInfo("Not approved manual merge. PR:", pr.PR.URL)
|
||||
@@ -365,6 +509,9 @@ func (rs *PRSet) IsApproved(gitea GiteaPRChecker, maintainers MaintainershipData
|
||||
var pkg string
|
||||
if rs.IsPrjGitPR(pr.PR) {
|
||||
reviewers = configReviewers.Prj
|
||||
if rs.HasAutoStaging {
|
||||
reviewers = append(reviewers, Bot_BuildReview)
|
||||
}
|
||||
pkg = ""
|
||||
} else {
|
||||
reviewers = configReviewers.Pkg
|
||||
@@ -376,11 +523,12 @@ func (rs *PRSet) IsApproved(gitea GiteaPRChecker, maintainers MaintainershipData
|
||||
return false
|
||||
}
|
||||
|
||||
r, err := FetchGiteaReviews(gitea, reviewers, pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index)
|
||||
r, err := FetchGiteaReviews(gitea, pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index)
|
||||
if err != nil {
|
||||
LogError("Cannot fetch gitea reaviews for PR:", err)
|
||||
return false
|
||||
}
|
||||
r.RequestedReviewers = reviewers
|
||||
|
||||
is_manually_reviewed_ok = r.IsApproved()
|
||||
LogDebug("PR to", pr.PR.Base.Repo.Name, "reviewed?", is_manually_reviewed_ok)
|
||||
@@ -393,7 +541,7 @@ func (rs *PRSet) IsApproved(gitea GiteaPRChecker, maintainers MaintainershipData
|
||||
|
||||
if need_maintainer_review := !rs.IsPrjGitPR(pr.PR) || pr.PR.User.UserName != rs.BotUser; need_maintainer_review {
|
||||
// Do not expand groups here, as the group-review-bot will ACK if group has reviewed.
|
||||
if is_manually_reviewed_ok = maintainers.IsApproved(pkg, r.reviews, pr.PR.User.UserName, nil); !is_manually_reviewed_ok {
|
||||
if is_manually_reviewed_ok = maintainers.IsApproved(pkg, r.Reviews, pr.PR.User.UserName, nil); !is_manually_reviewed_ok {
|
||||
LogDebug(" not approved?", pkg)
|
||||
return false
|
||||
}
|
||||
@@ -404,6 +552,145 @@ func (rs *PRSet) IsApproved(gitea GiteaPRChecker, maintainers MaintainershipData
|
||||
return is_manually_reviewed_ok
|
||||
}
|
||||
|
||||
func (rs *PRSet) AddMergeCommit(git Git, remote string, pr int) bool {
|
||||
prinfo := rs.PRs[pr]
|
||||
|
||||
LogDebug("Adding merge commit for %s", PRtoString(prinfo.PR))
|
||||
if !prinfo.PR.AllowMaintainerEdit {
|
||||
LogError(" PR is not editable by maintainer")
|
||||
return false
|
||||
}
|
||||
|
||||
repo := prinfo.PR.Base.Repo
|
||||
head := prinfo.PR.Head
|
||||
br := rs.Config.Branch
|
||||
if len(br) == 0 {
|
||||
br = prinfo.PR.Base.Name
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("Merge branch '%s' into %s", br, head.Name)
|
||||
if err := git.GitExec(repo.Name, "merge", "--no-ff", "--no-commit", "-X", "theirs", head.Sha); err != nil {
|
||||
if err := git.GitExec(repo.Name, "merge", "--no-ff", "--no-commit", "--allow-unrelated-histories", "-X", "theirs", head.Sha); err != nil {
|
||||
return false
|
||||
}
|
||||
LogError("WARNING: Merging unrelated histories")
|
||||
}
|
||||
|
||||
// ensure only files that are in head.Sha are kept
|
||||
git.GitExecOrPanic(repo.Name, "read-tree", "-m", head.Sha)
|
||||
git.GitExecOrPanic(repo.Name, "commit", "-m", msg)
|
||||
git.GitExecOrPanic(repo.Name, "clean", "-fxd")
|
||||
|
||||
if !IsDryRun {
|
||||
git.GitExecOrPanic(repo.Name, "push", remote, "HEAD:"+head.Name)
|
||||
prinfo.PR.Head.Sha = strings.TrimSpace(git.GitExecWithOutputOrPanic(repo.Name, "rev-list", "-1", "HEAD")) // need to update as it's pushed but pr not refetched
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (rs *PRSet) HasMerge(git Git, pr int) bool {
|
||||
prinfo := rs.PRs[pr]
|
||||
|
||||
repo := prinfo.PR.Base.Repo
|
||||
head := prinfo.PR.Head
|
||||
br := rs.Config.Branch
|
||||
if len(br) == 0 {
|
||||
br = prinfo.PR.Base.Name
|
||||
}
|
||||
|
||||
parents, err := git.GitExecWithOutput(repo.Name, "show", "-s", "--format=%P", head.Sha)
|
||||
if err == nil {
|
||||
p := strings.Fields(strings.TrimSpace(parents))
|
||||
if len(p) == 2 {
|
||||
targetHead, _ := git.GitExecWithOutput(repo.Name, "rev-parse", "HEAD")
|
||||
targetHead = strings.TrimSpace(targetHead)
|
||||
if p[0] == targetHead || p[1] == targetHead {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rs *PRSet) PrepareForMerge(git Git) bool {
|
||||
// verify that package can merge here. Checkout current target branch of each PRSet, make a temporary branch
|
||||
// PR_#_mergetest and perform the merge based
|
||||
|
||||
if rs.Config.MergeMode == MergeModeDevel {
|
||||
return true // always can merge as we set branch here, not merge anything
|
||||
} else {
|
||||
// make sure that all the package PRs are in mergeable state
|
||||
for idx, prinfo := range rs.PRs {
|
||||
if rs.IsPrjGitPR(prinfo.PR) {
|
||||
continue
|
||||
}
|
||||
|
||||
repo := prinfo.PR.Base.Repo
|
||||
head := prinfo.PR.Head
|
||||
br := rs.Config.Branch
|
||||
if len(br) == 0 {
|
||||
br = prinfo.PR.Base.Name
|
||||
}
|
||||
|
||||
remote, err := git.GitClone(repo.Name, br, repo.SSHURL)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
git.GitExecOrPanic(repo.Name, "fetch", remote, head.Sha)
|
||||
switch rs.Config.MergeMode {
|
||||
case MergeModeFF:
|
||||
if err := git.GitExec(repo.Name, "merge-base", "--is-ancestor", "HEAD", head.Sha); err != nil {
|
||||
return false
|
||||
}
|
||||
case MergeModeReplace:
|
||||
Verify:
|
||||
if err := git.GitExec(repo.Name, "merge-base", "--is-ancestor", "HEAD", head.Sha); err != nil {
|
||||
if !rs.HasMerge(git, idx) {
|
||||
forkRemote, err := git.GitClone(repo.Name, head.Name, head.Repo.SSHURL)
|
||||
if err != nil {
|
||||
LogError("Failed to clone head repo:", head.Name, head.Repo.SSHURL)
|
||||
return false
|
||||
}
|
||||
LogDebug("Merge commit is missing and this is not FF merge possibility")
|
||||
git.GitExecOrPanic(repo.Name, "checkout", remote+"/"+br)
|
||||
if !rs.AddMergeCommit(git, forkRemote, idx) {
|
||||
return false
|
||||
}
|
||||
if !IsDryRun {
|
||||
goto Verify
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now we check project git if mergeable
|
||||
prjgit_info, err := rs.GetPrjGitPR()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
prjgit := prjgit_info.PR
|
||||
|
||||
_, _, prjgitBranch := rs.Config.GetPrjGit()
|
||||
remote, err := git.GitClone(DefaultGitPrj, prjgitBranch, prjgit.Base.Repo.SSHURL)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
testBranch := fmt.Sprintf("PR_%d_mergetest", prjgit.Index)
|
||||
git.GitExecOrPanic(DefaultGitPrj, "fetch", remote, prjgit.Head.Sha)
|
||||
if err := git.GitExec(DefaultGitPrj, "checkout", "-B", testBranch, prjgit.Base.Sha); err != nil {
|
||||
return false
|
||||
}
|
||||
if err := git.GitExec(DefaultGitPrj, "merge", "--no-ff", "--no-commit", prjgit.Head.Sha); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
prjgit_info, err := rs.GetPrjGitPR()
|
||||
if err != nil {
|
||||
@@ -542,8 +829,12 @@ func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
}
|
||||
prinfo.RemoteName, err = git.GitClone(repo.Name, br, repo.SSHURL)
|
||||
PanicOnError(err)
|
||||
git.GitExecOrPanic(repo.Name, "fetch", prinfo.RemoteName, head.Sha)
|
||||
git.GitExecOrPanic(repo.Name, "merge", "--ff", head.Sha)
|
||||
if rs.Config.MergeMode == MergeModeDevel {
|
||||
git.GitExecOrPanic(repo.Name, "checkout", "-B", br, head.Sha)
|
||||
} else {
|
||||
git.GitExecOrPanic(repo.Name, "fetch", prinfo.RemoteName, head.Sha)
|
||||
git.GitExecOrPanic(repo.Name, "merge", "--ff", head.Sha)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -560,7 +851,12 @@ func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
repo := prinfo.PR.Base.Repo
|
||||
|
||||
if !IsDryRun {
|
||||
git.GitExecOrPanic(repo.Name, "push", prinfo.RemoteName)
|
||||
params := []string{"push"}
|
||||
if rs.Config.MergeMode == MergeModeDevel {
|
||||
params = append(params, "-f")
|
||||
}
|
||||
params = append(params, prinfo.RemoteName)
|
||||
git.GitExecOrPanic(repo.Name, params...)
|
||||
} else {
|
||||
LogInfo("*** WOULD push", repo.Name, "to", prinfo.RemoteName)
|
||||
}
|
||||
|
||||
1139
common/pr_test.go
1139
common/pr_test.go
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,5 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"slices"
|
||||
)
|
||||
|
||||
type Reviewers struct {
|
||||
Prj []string
|
||||
Pkg []string
|
||||
@@ -36,10 +32,5 @@ func ParseReviewers(input []string) *Reviewers {
|
||||
*pkg = append(*pkg, reviewer)
|
||||
}
|
||||
}
|
||||
|
||||
if !slices.Contains(r.Prj, Bot_BuildReview) {
|
||||
r.Prj = append(r.Prj, Bot_BuildReview)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -21,14 +21,14 @@ func TestReviewers(t *testing.T) {
|
||||
name: "project and package reviewers",
|
||||
input: []string{"1", "2", "3", "*5", "+6", "-7"},
|
||||
|
||||
prj: []string{"5", "7", common.Bot_BuildReview},
|
||||
prj: []string{"5", "7"},
|
||||
pkg: []string{"1", "2", "3", "5", "6"},
|
||||
},
|
||||
{
|
||||
name: "optional project and package reviewers",
|
||||
input: []string{"~1", "2", "3", "~*5", "+6", "-7"},
|
||||
|
||||
prj: []string{"7", common.Bot_BuildReview},
|
||||
prj: []string{"7"},
|
||||
pkg: []string{"2", "3", "6"},
|
||||
prj_optional: []string{"5"},
|
||||
pkg_optional: []string{"1", "5"},
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
)
|
||||
|
||||
type PRReviews struct {
|
||||
reviews []*models.PullReview
|
||||
reviewers []string
|
||||
comments []*models.TimelineComment
|
||||
Reviews []*models.PullReview
|
||||
RequestedReviewers []string
|
||||
Comments []*models.TimelineComment
|
||||
|
||||
FullTimeline []*models.TimelineComment
|
||||
}
|
||||
|
||||
func FetchGiteaReviews(rf GiteaReviewTimelineFetcher, reviewers []string, org, repo string, no int64) (*PRReviews, error) {
|
||||
func FetchGiteaReviews(rf GiteaReviewTimelineFetcher, org, repo string, no int64) (*PRReviews, error) {
|
||||
timeline, err := rf.GetTimeline(org, repo, no)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -25,10 +27,14 @@ func FetchGiteaReviews(rf GiteaReviewTimelineFetcher, reviewers []string, org, r
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reviews := make([]*models.PullReview, 0, len(reviewers))
|
||||
reviews := make([]*models.PullReview, 0, 10)
|
||||
needNewReviews := []string{}
|
||||
var comments []*models.TimelineComment
|
||||
|
||||
alreadyHaveUserReview := func(user string) bool {
|
||||
if slices.Contains(needNewReviews, user) {
|
||||
return true
|
||||
}
|
||||
for _, r := range reviews {
|
||||
if r.User != nil && r.User.UserName == user {
|
||||
return true
|
||||
@@ -37,32 +43,40 @@ func FetchGiteaReviews(rf GiteaReviewTimelineFetcher, reviewers []string, org, r
|
||||
return false
|
||||
}
|
||||
|
||||
LogDebug("FetchingGiteaReviews for", org, repo, no)
|
||||
LogDebug("Number of reviews:", len(rawReviews))
|
||||
LogDebug("Number of items in timeline:", len(timeline))
|
||||
|
||||
cutOffIdx := len(timeline)
|
||||
for idx, item := range timeline {
|
||||
if item.Type == TimelineCommentType_Review {
|
||||
if item.Type == TimelineCommentType_Review || item.Type == TimelineCommentType_ReviewRequested {
|
||||
for _, r := range rawReviews {
|
||||
if r.ID == item.ReviewID {
|
||||
if !alreadyHaveUserReview(r.User.UserName) {
|
||||
reviews = append(reviews, r)
|
||||
if item.Type == TimelineCommentType_Review && idx > cutOffIdx {
|
||||
needNewReviews = append(needNewReviews, r.User.UserName)
|
||||
} else {
|
||||
reviews = append(reviews, r)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
} else if item.Type == TimelineCommentType_Comment {
|
||||
} else if item.Type == TimelineCommentType_Comment && cutOffIdx > idx {
|
||||
comments = append(comments, item)
|
||||
} else if item.Type == TimelineCommentType_PushPull {
|
||||
LogDebug("cut-off", item.Created)
|
||||
timeline = timeline[0:idx]
|
||||
break
|
||||
} else if item.Type == TimelineCommentType_PushPull && cutOffIdx == len(timeline) {
|
||||
LogDebug("cut-off", item.Created, "@", idx)
|
||||
cutOffIdx = idx
|
||||
} else {
|
||||
LogDebug("Unhandled timeline type:", item.Type)
|
||||
}
|
||||
}
|
||||
LogDebug("num comments:", len(comments), "reviews:", len(reviews), len(timeline))
|
||||
LogDebug("num comments:", len(comments), "timeline:", len(reviews))
|
||||
|
||||
return &PRReviews{
|
||||
reviews: reviews,
|
||||
reviewers: reviewers,
|
||||
comments: comments,
|
||||
Reviews: reviews,
|
||||
Comments: comments,
|
||||
FullTimeline: timeline,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -81,23 +95,27 @@ func bodyCommandManualMergeOK(body string) bool {
|
||||
}
|
||||
|
||||
func (r *PRReviews) IsManualMergeOK() bool {
|
||||
for _, c := range r.comments {
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, c := range r.Comments {
|
||||
if c.Updated != c.Created {
|
||||
continue
|
||||
}
|
||||
LogDebug("comment:", c.User.UserName, c.Body)
|
||||
if slices.Contains(r.reviewers, c.User.UserName) {
|
||||
if slices.Contains(r.RequestedReviewers, c.User.UserName) {
|
||||
if bodyCommandManualMergeOK(c.Body) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range r.reviews {
|
||||
for _, c := range r.Reviews {
|
||||
if c.Updated != c.Submitted {
|
||||
continue
|
||||
}
|
||||
if slices.Contains(r.reviewers, c.User.UserName) {
|
||||
if slices.Contains(r.RequestedReviewers, c.User.UserName) {
|
||||
if bodyCommandManualMergeOK(c.Body) {
|
||||
return true
|
||||
}
|
||||
@@ -108,11 +126,14 @@ func (r *PRReviews) IsManualMergeOK() bool {
|
||||
}
|
||||
|
||||
func (r *PRReviews) IsApproved() bool {
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
goodReview := true
|
||||
|
||||
for _, reviewer := range r.reviewers {
|
||||
for _, reviewer := range r.RequestedReviewers {
|
||||
goodReview = false
|
||||
for _, review := range r.reviews {
|
||||
for _, review := range r.Reviews {
|
||||
if review.User.UserName == reviewer && review.State == ReviewStateApproved && !review.Stale && !review.Dismissed {
|
||||
LogDebug(" -- found review: ", review.User.UserName)
|
||||
goodReview = true
|
||||
@@ -130,7 +151,11 @@ func (r *PRReviews) IsApproved() bool {
|
||||
|
||||
func (r *PRReviews) MissingReviews() []string {
|
||||
missing := []string{}
|
||||
for _, reviewer := range r.reviewers {
|
||||
if r == nil {
|
||||
return missing
|
||||
}
|
||||
|
||||
for _, reviewer := range r.RequestedReviewers {
|
||||
if !r.IsReviewedBy(reviewer) {
|
||||
missing = append(missing, reviewer)
|
||||
}
|
||||
@@ -138,45 +163,64 @@ func (r *PRReviews) MissingReviews() []string {
|
||||
return missing
|
||||
}
|
||||
|
||||
func (r *PRReviews) HasPendingReviewBy(reviewer string) bool {
|
||||
if !slices.Contains(r.reviewers, reviewer) {
|
||||
return false
|
||||
func (r *PRReviews) FindReviewRequester(reviewer string) *models.TimelineComment {
|
||||
if r == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
isPending := false
|
||||
for _, r := range r.reviews {
|
||||
if r.User.UserName == reviewer && !r.Stale {
|
||||
switch r.State {
|
||||
case ReviewStateApproved:
|
||||
fallthrough
|
||||
case ReviewStateRequestChanges:
|
||||
return false
|
||||
case ReviewStateRequestReview:
|
||||
fallthrough
|
||||
case ReviewStatePending:
|
||||
isPending = true
|
||||
}
|
||||
for _, r := range r.FullTimeline {
|
||||
if r.Type == TimelineCommentType_ReviewRequested && r.Assignee.UserName == reviewer {
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
return isPending
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *PRReviews) IsReviewedBy(reviewer string) bool {
|
||||
if !slices.Contains(r.reviewers, reviewer) {
|
||||
func (r *PRReviews) HasPendingReviewBy(reviewer string) bool {
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, r := range r.reviews {
|
||||
if r.User.UserName == reviewer && !r.Stale {
|
||||
for _, r := range r.Reviews {
|
||||
if r.User.UserName == reviewer {
|
||||
switch r.State {
|
||||
case ReviewStateApproved:
|
||||
return true
|
||||
case ReviewStateRequestChanges:
|
||||
case ReviewStateRequestReview, ReviewStatePending:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *PRReviews) IsReviewedBy(reviewer string) bool {
|
||||
if r == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, r := range r.Reviews {
|
||||
if r.User.UserName == reviewer && !r.Stale {
|
||||
switch r.State {
|
||||
case ReviewStateApproved, ReviewStateRequestChanges:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *PRReviews) IsReviewedByOneOf(reviewers ...string) bool {
|
||||
for _, reviewer := range reviewers {
|
||||
if r.IsReviewedBy(reviewer) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -62,11 +62,23 @@ func TestReviews(t *testing.T) {
|
||||
{
|
||||
name: "Two reviewer, one stale and pending",
|
||||
reviews: []*models.PullReview{
|
||||
&models.PullReview{State: common.ReviewStateRequestReview, User: &models.User{UserName: "user1"}, Stale: true},
|
||||
{State: common.ReviewStateRequestReview, User: &models.User{UserName: "user1"}, Stale: true},
|
||||
},
|
||||
reviewers: []string{"user1", "user2"},
|
||||
isApproved: false,
|
||||
isPendingByTest1: false,
|
||||
isPendingByTest1: true,
|
||||
isReviewedByTest1: false,
|
||||
},
|
||||
{
|
||||
name: "Two reviewer, one stale and pending, other done",
|
||||
reviews: []*models.PullReview{
|
||||
{State: common.ReviewStateRequestReview, User: &models.User{UserName: "user1"}},
|
||||
{State: common.ReviewStateRequestChanges, User: &models.User{UserName: "user1"}},
|
||||
{State: common.ReviewStateApproved, User: &models.User{UserName: "user2"}},
|
||||
},
|
||||
reviewers: []string{"user1", "user2"},
|
||||
isApproved: false,
|
||||
isPendingByTest1: true,
|
||||
isReviewedByTest1: false,
|
||||
},
|
||||
{
|
||||
@@ -139,7 +151,7 @@ func TestReviews(t *testing.T) {
|
||||
rf.EXPECT().GetTimeline("test", "pr", int64(1)).Return(test.timeline, nil)
|
||||
rf.EXPECT().GetPullRequestReviews("test", "pr", int64(1)).Return(test.reviews, test.fetchErr)
|
||||
|
||||
reviews, err := common.FetchGiteaReviews(rf, test.reviewers, "test", "pr", 1)
|
||||
reviews, err := common.FetchGiteaReviews(rf, "test", "pr", 1)
|
||||
|
||||
if test.fetchErr != nil {
|
||||
if err != test.fetchErr {
|
||||
@@ -147,6 +159,7 @@ func TestReviews(t *testing.T) {
|
||||
}
|
||||
return
|
||||
}
|
||||
reviews.RequestedReviewers = test.reviewers
|
||||
|
||||
if r := reviews.IsApproved(); r != test.isApproved {
|
||||
t.Fatal("Unexpected IsReviewed():", r, "vs. expected", test.isApproved)
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
#!/usr/bin/bash
|
||||
|
||||
git init -q --bare --object-format=sha256
|
||||
git config user.email test@example.com
|
||||
git config user.name Test
|
||||
export GIT_AUTHOR_DATE=2025-10-27T14:20:07+01:00
|
||||
export GIT_COMMITTER_DATE=2025-10-27T14:20:07+01:00
|
||||
|
||||
# 81aba862107f1e2f5312e165453955485f424612f313d6c2fb1b31fef9f82a14
|
||||
blobA=$(echo "help" | git hash-object --stdin -w)
|
||||
|
||||
110
common/utils.go
110
common/utils.go
@@ -27,10 +27,87 @@ import (
|
||||
"regexp"
|
||||
"slices"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
)
|
||||
|
||||
type NewRepos struct {
|
||||
Repos []struct {
|
||||
Organization, Repository, Branch string
|
||||
PackageName string
|
||||
}
|
||||
IsMaintainer bool
|
||||
}
|
||||
|
||||
const maintainership_line = "MAINTAINER"
|
||||
|
||||
var true_lines []string = []string{"1", "TRUE", "YES", "OK", "T"}
|
||||
|
||||
func HasSpace(s string) bool {
|
||||
return strings.IndexFunc(s, unicode.IsSpace) >= 0
|
||||
}
|
||||
|
||||
func FindNewReposInIssueBody(body string) *NewRepos {
|
||||
Issues := &NewRepos{}
|
||||
for _, line := range strings.Split(body, "\n") {
|
||||
line = strings.TrimSpace(line)
|
||||
if ul := strings.ToUpper(line); strings.HasPrefix(ul, "MAINTAINER") {
|
||||
value := ""
|
||||
if idx := strings.IndexRune(ul, ':'); idx > 0 && len(ul) > idx+2 {
|
||||
value = ul[idx+1:]
|
||||
} else if idx := strings.IndexRune(ul, ' '); idx > 0 && len(ul) > idx+2 {
|
||||
value = ul[idx+1:]
|
||||
}
|
||||
|
||||
if slices.Contains(true_lines, strings.TrimSpace(value)) {
|
||||
Issues.IsMaintainer = true
|
||||
}
|
||||
}
|
||||
// line = strings.TrimSpace(line)
|
||||
issue := struct{ Organization, Repository, Branch, PackageName string }{}
|
||||
|
||||
branch := strings.Split(line, "#")
|
||||
repo := strings.Split(branch[0], "/")
|
||||
|
||||
if len(branch) == 2 {
|
||||
issue.Branch = strings.TrimSpace(branch[1])
|
||||
}
|
||||
if len(repo) == 2 {
|
||||
issue.Organization = strings.TrimSpace(repo[0])
|
||||
issue.Repository = strings.TrimSpace(repo[1])
|
||||
issue.PackageName = issue.Repository
|
||||
|
||||
if idx := strings.Index(strings.ToUpper(issue.Branch), " AS "); idx > 0 && len(issue.Branch) > idx+5 {
|
||||
issue.PackageName = strings.TrimSpace(issue.Branch[idx+3:])
|
||||
issue.Branch = strings.TrimSpace(issue.Branch[0:idx])
|
||||
}
|
||||
|
||||
if HasSpace(issue.Organization) || HasSpace(issue.Repository) || HasSpace(issue.PackageName) || HasSpace(issue.Branch) {
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
Issues.Repos = append(Issues.Repos, issue)
|
||||
//PackageNameIdx := strings.Index(strings.ToUpper(line), " AS ")
|
||||
//words := strings.Split(line)
|
||||
}
|
||||
|
||||
if len(Issues.Repos) == 0 {
|
||||
return nil
|
||||
}
|
||||
return Issues
|
||||
}
|
||||
|
||||
func IssueToString(issue *models.Issue) string {
|
||||
if issue == nil {
|
||||
return "(nil)"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s#%d", issue.Repository.Owner, issue.Repository.Name, issue.Index)
|
||||
}
|
||||
|
||||
func SplitLines(str string) []string {
|
||||
return SplitStringNoEmpty(str, "\n")
|
||||
}
|
||||
@@ -168,9 +245,10 @@ func FetchDevelProjects() (DevelProjects, error) {
|
||||
}
|
||||
|
||||
var DevelProjectNotFound = errors.New("Devel project not found")
|
||||
|
||||
func (d DevelProjects) GetDevelProject(pkg string) (string, error) {
|
||||
for _, item := range d {
|
||||
if item.Package == pkg {
|
||||
if item.Package == pkg {
|
||||
return item.Project, nil
|
||||
}
|
||||
}
|
||||
@@ -178,3 +256,33 @@ func (d DevelProjects) GetDevelProject(pkg string) (string, error) {
|
||||
return "", DevelProjectNotFound
|
||||
}
|
||||
|
||||
var removedBranchNameSuffixes []string = []string{
|
||||
"-rm",
|
||||
"-removed",
|
||||
"-deleted",
|
||||
}
|
||||
|
||||
func findRemovedBranchSuffix(branchName string) string {
|
||||
branchName = strings.ToLower(branchName)
|
||||
|
||||
for _, suffix := range removedBranchNameSuffixes {
|
||||
if len(suffix) < len(branchName) && strings.HasSuffix(branchName, suffix) {
|
||||
return suffix
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func IsRemovedBranch(branchName string) bool {
|
||||
return len(findRemovedBranchSuffix(branchName)) > 0
|
||||
}
|
||||
|
||||
func TrimRemovedBranchSuffix(branchName string) string {
|
||||
suffix := findRemovedBranchSuffix(branchName)
|
||||
if len(suffix) > 0 {
|
||||
return branchName[0 : len(branchName)-len(suffix)]
|
||||
}
|
||||
|
||||
return branchName
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package common_test
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
@@ -165,3 +166,142 @@ func TestRemoteName(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovedBranchName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
branchName string
|
||||
isRemoved bool
|
||||
regularName string
|
||||
}{
|
||||
{
|
||||
name: "Empty branch",
|
||||
},
|
||||
{
|
||||
name: "Removed suffix only",
|
||||
branchName: "-rm",
|
||||
isRemoved: false,
|
||||
regularName: "-rm",
|
||||
},
|
||||
{
|
||||
name: "Capital suffix",
|
||||
branchName: "Foo-Rm",
|
||||
isRemoved: true,
|
||||
regularName: "Foo",
|
||||
},
|
||||
{
|
||||
name: "Other suffixes",
|
||||
isRemoved: true,
|
||||
branchName: "Goo-Rm-DeleteD",
|
||||
regularName: "Goo-Rm",
|
||||
},
|
||||
{
|
||||
name: "Other suffixes",
|
||||
isRemoved: true,
|
||||
branchName: "main-REMOVED",
|
||||
regularName: "main",
|
||||
},
|
||||
{
|
||||
name: "Not removed separator",
|
||||
isRemoved: false,
|
||||
branchName: "main;REMOVED",
|
||||
regularName: "main;REMOVED",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
if r := common.IsRemovedBranch(test.branchName); r != test.isRemoved {
|
||||
t.Error("Expecting isRemoved:", test.isRemoved, "but received", r)
|
||||
}
|
||||
|
||||
if tn := common.TrimRemovedBranchSuffix(test.branchName); tn != test.regularName {
|
||||
t.Error("Expected stripped branch name to be:", test.regularName, "but have:", tn)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewPackageIssueParsing(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
issues *common.NewRepos
|
||||
}{
|
||||
{
|
||||
name: "Nothing",
|
||||
},
|
||||
{
|
||||
name: "Basic repo",
|
||||
input: "org/repo#branch",
|
||||
issues: &common.NewRepos{
|
||||
Repos: []struct{ Organization, Repository, Branch, PackageName string }{
|
||||
{Organization: "org", Repository: "repo", Branch: "branch", PackageName: "repo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Default branch and junk lines and approval for maintainership",
|
||||
input: "\n\nsome comments\n\norg1/repo2\n\nmaintainership: yes",
|
||||
issues: &common.NewRepos{
|
||||
Repos: []struct{ Organization, Repository, Branch, PackageName string }{
|
||||
{Organization: "org1", Repository: "repo2", Branch: "", PackageName: "repo2"},
|
||||
},
|
||||
IsMaintainer: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Default branch and junk lines and no maintainership",
|
||||
input: "\n\nsome comments\n\norg1/repo2\n\nmaintainership: NEVER",
|
||||
issues: &common.NewRepos{
|
||||
Repos: []struct{ Organization, Repository, Branch, PackageName string }{
|
||||
{Organization: "org1", Repository: "repo2", Branch: "", PackageName: "repo2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "3 repos with comments and maintainership",
|
||||
input: "\n\nsome comments for org1/repo2 are here and more\n\norg1/repo2#master\n org2/repo3#master\n some/repo3#m\nMaintainer ok",
|
||||
issues: &common.NewRepos{
|
||||
Repos: []struct{ Organization, Repository, Branch, PackageName string }{
|
||||
{Organization: "org1", Repository: "repo2", Branch: "master", PackageName: "repo2"},
|
||||
{Organization: "org2", Repository: "repo3", Branch: "master", PackageName: "repo3"},
|
||||
{Organization: "some", Repository: "repo3", Branch: "m", PackageName: "repo3"},
|
||||
},
|
||||
IsMaintainer: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Invalid repos with spaces",
|
||||
input: "or g/repo#branch\norg/r epo#branch\norg/repo#br anch\norg/repo#branch As foo ++",
|
||||
},
|
||||
{
|
||||
name: "Valid repos with spaces",
|
||||
input: " org / repo # branch",
|
||||
issues: &common.NewRepos{
|
||||
Repos: []struct{ Organization, Repository, Branch, PackageName string }{
|
||||
{Organization: "org", Repository: "repo", Branch: "branch", PackageName: "repo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Package name is not repo name",
|
||||
input: " org / repo # branch as repo++ \nmaintainer true",
|
||||
issues: &common.NewRepos{
|
||||
Repos: []struct{ Organization, Repository, Branch, PackageName string }{
|
||||
{Organization: "org", Repository: "repo", Branch: "branch", PackageName: "repo++"},
|
||||
},
|
||||
IsMaintainer: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
issue := common.FindNewReposInIssueBody(test.input)
|
||||
if !reflect.DeepEqual(test.issues, issue) {
|
||||
t.Error("Expected", test.issues, "but have", issue)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,13 +58,42 @@ sub ListPackages {
|
||||
return @packages;
|
||||
}
|
||||
|
||||
sub FactoryMd5 {
|
||||
my ($package) = @_;
|
||||
my $out = "";
|
||||
|
||||
if (system("osc ls openSUSE:Factory $package | grep -q build.specials.obscpio") == 0) {
|
||||
system("mkdir _extract") == 0 || die "_extract exists or can't make it. Aborting.";
|
||||
chdir("_extract") || die;
|
||||
system("osc cat openSUSE:Factory $package build.specials.obscpio | cpio -dium 2> /dev/null") == 0 || die;
|
||||
system("rm .* 2> /dev/null");
|
||||
open( my $fh, "find -type f -exec /usr/bin/basename {} \\; | xargs md5sum | awk '{print \$1 FS \$2}' | grep -v d41d8cd98f00b204e9800998ecf8427e |") or die;
|
||||
while ( my $l = <$fh>) {
|
||||
$out = $out.$l;
|
||||
}
|
||||
close($fh);
|
||||
chdir("..") && system("rm -rf _extract") == 0 || die;
|
||||
}
|
||||
open( my $fh, "osc ls -v openSUSE:Factory $package | awk '{print \$1 FS \$7}' | grep -v -F '_scmsync.obsinfo\nbuild.specials.obscpio' |") or die;
|
||||
while (my $l = <$fh>) {
|
||||
$out = $out.$l;
|
||||
}
|
||||
close($fh);
|
||||
return $out;
|
||||
}
|
||||
|
||||
# Read project from first argument
|
||||
sub Usage {
|
||||
die "Usage: $0 <OBS Project> <org>";
|
||||
die "Usage: $0 <OBS Project> [org [package]]";
|
||||
}
|
||||
|
||||
my $project = shift or Usage();
|
||||
my $org = shift or Usage();
|
||||
my $org = shift;
|
||||
|
||||
if (not defined($org)) {
|
||||
$org = `osc meta prj $project | grep scmsync | sed -e 's,^.*src.opensuse.org/\\(.*\\)/_ObsPrj.*,\\1,'`;
|
||||
chomp($org);
|
||||
}
|
||||
|
||||
my @packages = ListPackages($project);
|
||||
my $pkg = shift;
|
||||
@@ -73,13 +102,28 @@ my $pkg = shift;
|
||||
my @tomove;
|
||||
my @toremove;
|
||||
|
||||
print "Verify packages in /pool ...\n";
|
||||
if ( ! -e $org ) {
|
||||
mkdir($org);
|
||||
}
|
||||
chdir($org);
|
||||
print "Verify packages in /pool for $org package in $project\n";
|
||||
|
||||
my $super_user = $ENV{SUPER};
|
||||
if (defined($super_user)) {
|
||||
$super_user = "-G $super_user";
|
||||
} else {
|
||||
$super_user = "";
|
||||
}
|
||||
|
||||
my @missing;
|
||||
|
||||
# verify that packages in devel project is a fork from pool.
|
||||
for my $pkg ( sort(@packages) ) {
|
||||
my $data = `git obs api /repos/$org/$pkg 2> /dev/null`;
|
||||
if ( length($data) == 0 ) {
|
||||
die "Repo missing in $org: $pkg";
|
||||
print "***** Repo missing in $org: $pkg\n";
|
||||
push(@missing, $pkg);
|
||||
next;
|
||||
}
|
||||
else {
|
||||
my $repo = decode_json($data);
|
||||
@@ -97,6 +141,15 @@ for my $pkg ( sort(@packages) ) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if ( scalar @missing > 0 ) {
|
||||
for my $pkg (@missing) {
|
||||
my $index = 0;
|
||||
$index++ until $packages[$index] eq $pkg;
|
||||
splice(@packages, $index, 1);
|
||||
}
|
||||
}
|
||||
|
||||
if ( scalar @toremove > 0 ) {
|
||||
print "ABORTING. Need repos removed.\n";
|
||||
print "@toremove\n";
|
||||
@@ -104,13 +157,15 @@ if ( scalar @toremove > 0 ) {
|
||||
}
|
||||
|
||||
if ( scalar @tomove > 0 ) {
|
||||
|
||||
# for my $pkg (@tomove) {
|
||||
# system("git obs api -X POST --data '{\"new_owner\": \"pool\"}' /repos/$org/$pkg/transfer 2> /dev/null > /dev/null");
|
||||
# }
|
||||
print "ABORTING. Need to move reps.\n";
|
||||
print "Initiated transfer for @tomove\n";
|
||||
exit(1);
|
||||
for my $pkg (@tomove) {
|
||||
system("git obs $super_user api -X POST --data '{\"reparent\": true, \"organization\": \"pool\"}' /repos/$org/$pkg/forks") == 0 and
|
||||
system("git clone gitea\@src.opensuse.org:pool/$pkg") == 0 and
|
||||
system("git -C $pkg checkout -B factory HEAD") == 0 and
|
||||
system("git -C $pkg push origin factory") == 0 and
|
||||
system("git obs $super_user api -X PATCH --data '{\"default_branch\": \"factory\"}' /repos/pool/$pkg") == 0
|
||||
or die "Error in creating a pool repo";
|
||||
system("for i in \$(git -C $pkg for-each-ref --format='%(refname:lstrip=3)' refs/remotes/origin/ | grep -v '\\(^HEAD\$\\|^factory\$\\)'); do git -C $pkg push origin :\$i; done") == 0 or die "failed to cull branches";
|
||||
}
|
||||
}
|
||||
|
||||
print "Verify complete.\n";
|
||||
@@ -137,6 +192,7 @@ for my $package ( sort(@packages) ) {
|
||||
or ( push( @tomove, $package ) and die "Can't fetch pool for $package" );
|
||||
|
||||
my @commits = FindFactoryCommit($package);
|
||||
my $Md5Hashes = FactoryMd5($package);
|
||||
my $c;
|
||||
my $match = 0;
|
||||
for my $commit (@commits) {
|
||||
@@ -149,15 +205,27 @@ for my $package ( sort(@packages) ) {
|
||||
system("git -C $package lfs fetch pool $commit") == 0
|
||||
and system("git -C $package checkout -B factory $commit") == 0
|
||||
and system("git -C $package lfs checkout") == 0
|
||||
and system(
|
||||
"cd $package; osc ls -v openSUSE:Factory $package | awk '{print \$1 FS \$7}' | grep -v -F '_scmsync.obsinfo\nbuild.specials.obscpio' | md5sum -c --quiet"
|
||||
) == 0
|
||||
)
|
||||
{
|
||||
and chdir($package)) {
|
||||
|
||||
open(my $fh, "|-", "md5sum -c --quiet") or die $!;
|
||||
print $fh $Md5Hashes;
|
||||
close $fh;
|
||||
if ($? >> 8 != 0) {
|
||||
chdir("..") || die;
|
||||
next;
|
||||
}
|
||||
open($fh, "|-", "awk '{print \$2}' | sort | bash -c \"diff <(ls -1 | sort) -\"") or die $!;
|
||||
print $fh $Md5Hashes;
|
||||
close $fh;
|
||||
my $ec = $? >> 8;
|
||||
chdir("..") || die;
|
||||
|
||||
if ($ec == 0) {
|
||||
$c = $commit;
|
||||
$match = 1;
|
||||
last;
|
||||
}
|
||||
|
||||
$c = $commit;
|
||||
$match = 1;
|
||||
last;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -165,7 +233,7 @@ for my $package ( sort(@packages) ) {
|
||||
die "Match not found. Aborting.";
|
||||
}
|
||||
|
||||
#system ("git -C $package push -f pool factory");
|
||||
system ("git -C $package push -f pool factory");
|
||||
print "$package: $c\n";
|
||||
}
|
||||
|
||||
|
||||
@@ -1,16 +1,24 @@
|
||||
Java:packages
|
||||
Kernel:firmware
|
||||
Kernel:kdump
|
||||
devel:gcc
|
||||
devel:languages:clojure
|
||||
devel:languages:erlang
|
||||
devel:languages:erlang:Factory
|
||||
devel:languages:hare
|
||||
devel:languages:javascript
|
||||
devel:languages:lua
|
||||
devel:languages:nodejs
|
||||
devel:languages:perl
|
||||
devel:languages:python:Factory
|
||||
devel:languages:python:mailman
|
||||
devel:languages:python:pytest
|
||||
devel:openSUSE:Factory
|
||||
network:chromium
|
||||
network:dhcp
|
||||
network:im:whatsapp
|
||||
network:messaging:xmpp
|
||||
science:HPC
|
||||
server:dns
|
||||
systemsmanagement:cockpit
|
||||
X11:lxde
|
||||
|
||||
@@ -14,15 +14,11 @@ import (
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
type Status struct {
|
||||
Context string `json:"context"`
|
||||
State string `json:"state"`
|
||||
TargetUrl string `json:"target_url"`
|
||||
}
|
||||
|
||||
type StatusInput struct {
|
||||
State string `json:"state"`
|
||||
TargetUrl string `json:"target_url"`
|
||||
Description string `json:"description"`
|
||||
Context string `json:"context"`
|
||||
State string `json:"state"`
|
||||
TargetUrl string `json:"target_url"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -59,23 +55,26 @@ func StatusProxy(w http.ResponseWriter, r *http.Request) {
|
||||
config, ok := r.Context().Value(configKey).(*Config)
|
||||
|
||||
if !ok {
|
||||
common.LogError("Config missing from context")
|
||||
common.LogDebug("Config missing from context")
|
||||
http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
header := r.Header.Get("Authorization")
|
||||
if header == "" {
|
||||
common.LogDebug("Authorization header not found")
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
token_arr := strings.Split(header, " ")
|
||||
if len(token_arr) != 2 {
|
||||
common.LogDebug("Authorization header malformed")
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
|
||||
if !strings.EqualFold(token_arr[0], "Bearer") {
|
||||
if !strings.EqualFold(token_arr[0], "token") {
|
||||
common.LogDebug("Token not found in Authorization header")
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
@@ -83,6 +82,7 @@ func StatusProxy(w http.ResponseWriter, r *http.Request) {
|
||||
token := token_arr[1]
|
||||
|
||||
if !slices.Contains(config.Keys, token) {
|
||||
common.LogDebug("Provided token is not known")
|
||||
http.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)
|
||||
return
|
||||
}
|
||||
@@ -104,13 +104,8 @@ func StatusProxy(w http.ResponseWriter, r *http.Request) {
|
||||
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
status := Status{
|
||||
Context: "Build in obs",
|
||||
State: statusinput.State,
|
||||
TargetUrl: statusinput.TargetUrl,
|
||||
}
|
||||
|
||||
status_payload, err := json.Marshal(status)
|
||||
status_payload, err := json.Marshal(statusinput)
|
||||
|
||||
if err != nil {
|
||||
http.Error(w, http.StatusText(http.StatusBadRequest), http.StatusBadRequest)
|
||||
@@ -131,8 +126,8 @@ func StatusProxy(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
req.Header.Add("Content-Type", "Content-Type")
|
||||
req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", ForgeToken))
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
req.Header.Add("Authorization", fmt.Sprintf("token %s", ForgeToken))
|
||||
|
||||
resp, err := client.Do(req)
|
||||
|
||||
|
||||
48
gitea_status_proxy/readme.md
Normal file
48
gitea_status_proxy/readme.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# gitea_status_proxy
|
||||
|
||||
Allows bots without code owner permission to set Gitea's commit status
|
||||
|
||||
## Basic usage
|
||||
|
||||
To beging, you need the json config and a Gitea token with permissions to the repository you want to write to.
|
||||
|
||||
Keys should be randomly generated, i.e by using openssl: `openssl rand -base64 48`
|
||||
|
||||
Generate a json config file, with the key generated from running the command above, save as example.json:
|
||||
|
||||
```
|
||||
{
|
||||
"forge_url": "https://src.opensuse.org/api/v1",
|
||||
"keys": ["$YOUR_TOKEN_GOES_HERE"]
|
||||
}
|
||||
```
|
||||
|
||||
### start the proxy:
|
||||
|
||||
```
|
||||
GITEA_TOKEN=YOURTOKEN ./gitea_status_proxy -config example.json
|
||||
2025/10/30 12:53:18 [I] server up and listening on :3000
|
||||
```
|
||||
|
||||
Now the proxy should be able to accept requests under: `localhost:3000/repos/{owner}/{repo}/statuses/{sha}`, the token to be used when authenticating to the proxy must be in the `keys` list of the configuration json file (example.json above)
|
||||
|
||||
### example:
|
||||
|
||||
On a separate terminal, you can use curl to post a status to the proxy, if the GITEA_TOKEN has permissions on the target
|
||||
repository, it will result in a new status being set for the given commit
|
||||
|
||||
```
|
||||
curl -X 'POST' \
|
||||
'localhost:3000/repos/szarate/test-actions-gitea/statuses/cd5847c92fb65a628bdd6015f96ee7e569e1ad6e4fc487acc149b52e788262f9' \
|
||||
-H 'accept: application/json' \
|
||||
-H 'Authorization: token $YOUR_TOKEN_GOES_HERE' \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{
|
||||
"context": "Proxy test",
|
||||
"description": "Status posted from the proxy",
|
||||
"state": "success",
|
||||
"target_url": "https://src.opensuse.org"
|
||||
}'
|
||||
```
|
||||
|
||||
After this you should be able to the results in the pull request, e.g from above: https://src.opensuse.org/szarate/test-actions-gitea/pulls/1
|
||||
@@ -1,41 +1,65 @@
|
||||
Group Review Bot
|
||||
================
|
||||
|
||||
Areas of responsibility
|
||||
-----------------------
|
||||
This workaround is mainly needed because Gitea does not track which team member performed a review on behalf of a team.
|
||||
|
||||
1. Is used to handle reviews associated with groups defined in the
|
||||
ProjectGit.
|
||||
Main Tasks
|
||||
----------
|
||||
|
||||
2. Assumes: workflow-pr needs to associate and define the PR set from
|
||||
which the groups.json is read (Base of the PrjGit PR)
|
||||
Awaits a comment in the format “@groupreviewbot-name: approve”, then approves the PR with the comment “<user> approved a review on behalf of <groupreviewbot-name>.”
|
||||
|
||||
Target Usage
|
||||
------------
|
||||
|
||||
Projects where policy reviews are required.
|
||||
|
||||
Configiuration
|
||||
Configuration
|
||||
--------------
|
||||
|
||||
Groups are defined in the workflow.config inside the project git. They take following options,
|
||||
The bot is configured via the `ReviewGroups` field in the `workflow.config` file, located in the ProjectGit repository.
|
||||
|
||||
See `ReviewGroups` in the [workflow-pr configuration](../workflow-pr/README.md#config-file).
|
||||
|
||||
```json
|
||||
{
|
||||
...
|
||||
ReviewGroups: [
|
||||
{
|
||||
"Name": "name of the group user",
|
||||
"Reviewers": ["members", "of", "group"],
|
||||
"Silent": (true, false) -- if true, do not explicitly require review requests of group members
|
||||
},
|
||||
],
|
||||
...
|
||||
...
|
||||
"ReviewGroups": [
|
||||
{
|
||||
"Name": "name of the group user",
|
||||
"Reviewers": ["members", "of", "group"],
|
||||
"Silent": "(true, false) -- if true, do not explicitly require review requests of group members"
|
||||
}
|
||||
],
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Server configuration
|
||||
--------------------------
|
||||
|
||||
**Configuration file:**
|
||||
|
||||
| Field | Type | Notes |
|
||||
| ----- | ----- | ----- |
|
||||
| root | Array of string | Format **org/repo\#branch** |
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* Gitea token to:
|
||||
+ R/W PullRequest
|
||||
+ R/W Notification
|
||||
+ R User
|
||||
Gitea token with following permissions:
|
||||
- R/W PullRequest
|
||||
- R/W Notification
|
||||
- R User
|
||||
|
||||
Env Variables
|
||||
-------------
|
||||
The following variables can be used (and override) command line parameters.
|
||||
|
||||
* `AUTOGITS_CONFIG` - config file location
|
||||
* `AUTOGITS_URL` - Gitea URL
|
||||
* `AUTOGITS_RABBITURL` - RabbitMQ url
|
||||
* `AUTOGITS_DEBUG` - when set, debug level logging enabled
|
||||
|
||||
Authentication env variables
|
||||
* `GITEA_TOKEN` - Gitea user token
|
||||
* `AMQP_USERNAME`, `AMQP_PASSWORD` - username and password for rabbitmq
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"slices"
|
||||
@@ -17,20 +18,23 @@ import (
|
||||
"src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
)
|
||||
|
||||
var configs common.AutogitConfigs
|
||||
var acceptRx *regexp.Regexp
|
||||
var rejectRx *regexp.Regexp
|
||||
var groupName string
|
||||
|
||||
func InitRegex(newGroupName string) {
|
||||
groupName = newGroupName
|
||||
acceptRx = regexp.MustCompile("^:\\s*(LGTM|approved?)")
|
||||
rejectRx = regexp.MustCompile("^:\\s*")
|
||||
type ReviewBot struct {
|
||||
configs common.AutogitConfigs
|
||||
acceptRx *regexp.Regexp
|
||||
rejectRx *regexp.Regexp
|
||||
groupName string
|
||||
gitea common.Gitea
|
||||
}
|
||||
|
||||
func ParseReviewLine(reviewText string) (bool, string) {
|
||||
func (bot *ReviewBot) InitRegex(newGroupName string) {
|
||||
bot.groupName = newGroupName
|
||||
bot.acceptRx = regexp.MustCompile("^:\\s*(LGTM|approved?)")
|
||||
bot.rejectRx = regexp.MustCompile("^:\\s*")
|
||||
}
|
||||
|
||||
func (bot *ReviewBot) ParseReviewLine(reviewText string) (bool, string) {
|
||||
line := strings.TrimSpace(reviewText)
|
||||
groupTextName := "@" + groupName
|
||||
groupTextName := "@" + bot.groupName
|
||||
glen := len(groupTextName)
|
||||
if len(line) < glen || line[0:glen] != groupTextName {
|
||||
return false, line
|
||||
@@ -50,20 +54,20 @@ func ParseReviewLine(reviewText string) (bool, string) {
|
||||
return false, line
|
||||
}
|
||||
|
||||
func ReviewAccepted(reviewText string) bool {
|
||||
func (bot *ReviewBot) ReviewAccepted(reviewText string) bool {
|
||||
for _, line := range common.SplitStringNoEmpty(reviewText, "\n") {
|
||||
if matched, reviewLine := ParseReviewLine(line); matched {
|
||||
return acceptRx.MatchString(reviewLine)
|
||||
if matched, reviewLine := bot.ParseReviewLine(line); matched {
|
||||
return bot.acceptRx.MatchString(reviewLine)
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func ReviewRejected(reviewText string) bool {
|
||||
func (bot *ReviewBot) ReviewRejected(reviewText string) bool {
|
||||
for _, line := range common.SplitStringNoEmpty(reviewText, "\n") {
|
||||
if matched, reviewLine := ParseReviewLine(line); matched {
|
||||
if rejectRx.MatchString(reviewLine) {
|
||||
return !acceptRx.MatchString(reviewLine)
|
||||
if matched, reviewLine := bot.ParseReviewLine(line); matched {
|
||||
if bot.rejectRx.MatchString(reviewLine) {
|
||||
return !bot.acceptRx.MatchString(reviewLine)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -113,10 +117,10 @@ var commentStrings = []string{
|
||||
"change_time_estimate",
|
||||
}*/
|
||||
|
||||
func FindAcceptableReviewInTimeline(user string, timeline []*models.TimelineComment, reviews []*models.PullReview) *models.TimelineComment {
|
||||
func (bot *ReviewBot) FindAcceptableReviewInTimeline(user string, timeline []*models.TimelineComment, reviews []*models.PullReview) *models.TimelineComment {
|
||||
for _, t := range timeline {
|
||||
if t.Type == common.TimelineCommentType_Comment && t.User.UserName == user && t.Created == t.Updated {
|
||||
if ReviewAccepted(t.Body) || ReviewRejected(t.Body) {
|
||||
if bot.ReviewAccepted(t.Body) || bot.ReviewRejected(t.Body) {
|
||||
return t
|
||||
}
|
||||
}
|
||||
@@ -125,9 +129,9 @@ func FindAcceptableReviewInTimeline(user string, timeline []*models.TimelineComm
|
||||
return nil
|
||||
}
|
||||
|
||||
func FindOurLastReviewInTimeline(timeline []*models.TimelineComment) *models.TimelineComment {
|
||||
func (bot *ReviewBot) FindOurLastReviewInTimeline(timeline []*models.TimelineComment) *models.TimelineComment {
|
||||
for _, t := range timeline {
|
||||
if t.Type == common.TimelineCommentType_Review && t.User.UserName == groupName && t.Created == t.Updated {
|
||||
if t.Type == common.TimelineCommentType_Review && t.User.UserName == bot.groupName && t.Created == t.Updated {
|
||||
return t
|
||||
}
|
||||
}
|
||||
@@ -135,13 +139,13 @@ func FindOurLastReviewInTimeline(timeline []*models.TimelineComment) *models.Tim
|
||||
return nil
|
||||
}
|
||||
|
||||
func UnrequestReviews(gitea common.Gitea, org, repo string, id int64, users []string) {
|
||||
if err := gitea.UnrequestReview(org, repo, id, users...); err != nil {
|
||||
func (bot *ReviewBot) UnrequestReviews(org, repo string, id int64, users []string) {
|
||||
if err := bot.gitea.UnrequestReview(org, repo, id, users...); err != nil {
|
||||
common.LogError("Can't remove reviewrs after a review:", err)
|
||||
}
|
||||
}
|
||||
|
||||
func ProcessNotifications(notification *models.NotificationThread, gitea common.Gitea) {
|
||||
func (bot *ReviewBot) ProcessNotifications(notification *models.NotificationThread) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
common.LogInfo("panic cought --- recovered")
|
||||
@@ -149,7 +153,7 @@ func ProcessNotifications(notification *models.NotificationThread, gitea common.
|
||||
}
|
||||
}()
|
||||
|
||||
rx := regexp.MustCompile(`^/?api/v\d+/repos/(?<org>[_a-zA-Z0-9-]+)/(?<project>[_a-zA-Z0-9-]+)/(?:issues|pulls)/(?<num>[0-9]+)$`)
|
||||
rx := regexp.MustCompile(`^/?api/v\d+/repos/(?<org>[_\.a-zA-Z0-9-]+)/(?<project>[_\.a-zA-Z0-9-]+)/(?:issues|pulls)/(?<num>[0-9]+)$`)
|
||||
subject := notification.Subject
|
||||
u, err := url.Parse(notification.Subject.URL)
|
||||
if err != nil {
|
||||
@@ -168,14 +172,14 @@ func ProcessNotifications(notification *models.NotificationThread, gitea common.
|
||||
id, _ := strconv.ParseInt(match[3], 10, 64)
|
||||
|
||||
common.LogInfo("processing:", fmt.Sprintf("%s/%s!%d", org, repo, id))
|
||||
pr, err := gitea.GetPullRequest(org, repo, id)
|
||||
pr, err := bot.gitea.GetPullRequest(org, repo, id)
|
||||
if err != nil {
|
||||
common.LogError(" ** Cannot fetch PR associated with review:", subject.URL, "Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := ProcessPR(pr); err == nil && !common.IsDryRun {
|
||||
if err := gitea.SetNotificationRead(notification.ID); err != nil {
|
||||
if err := bot.ProcessPR(pr); err == nil && !common.IsDryRun {
|
||||
if err := bot.gitea.SetNotificationRead(notification.ID); err != nil {
|
||||
common.LogDebug(" Cannot set notification as read", err)
|
||||
}
|
||||
} else if err != nil && err != ReviewNotFinished {
|
||||
@@ -185,24 +189,24 @@ func ProcessNotifications(notification *models.NotificationThread, gitea common.
|
||||
|
||||
var ReviewNotFinished = fmt.Errorf("Review is not finished")
|
||||
|
||||
func ProcessPR(pr *models.PullRequest) error {
|
||||
func (bot *ReviewBot) ProcessPR(pr *models.PullRequest) error {
|
||||
org := pr.Base.Repo.Owner.UserName
|
||||
repo := pr.Base.Repo.Name
|
||||
id := pr.Index
|
||||
|
||||
found := false
|
||||
for _, reviewer := range pr.RequestedReviewers {
|
||||
if reviewer != nil && reviewer.UserName == groupName {
|
||||
if reviewer != nil && reviewer.UserName == bot.groupName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
common.LogInfo(" review is not requested for", groupName)
|
||||
common.LogInfo(" review is not requested for", bot.groupName)
|
||||
return nil
|
||||
}
|
||||
|
||||
config := configs.GetPrjGitConfig(org, repo, pr.Base.Name)
|
||||
config := bot.configs.GetPrjGitConfig(org, repo, pr.Base.Name)
|
||||
if config == nil {
|
||||
return fmt.Errorf("Cannot find config for: %s", pr.URL)
|
||||
}
|
||||
@@ -212,17 +216,17 @@ func ProcessPR(pr *models.PullRequest) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
reviews, err := gitea.GetPullRequestReviews(org, repo, id)
|
||||
reviews, err := bot.gitea.GetPullRequestReviews(org, repo, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to fetch reviews for: %v: %w", pr.URL, err)
|
||||
}
|
||||
|
||||
timeline, err := common.FetchTimelineSinceReviewRequestOrPush(gitea, groupName, pr.Head.Sha, org, repo, id)
|
||||
timeline, err := common.FetchTimelineSinceReviewRequestOrPush(bot.gitea, bot.groupName, pr.Head.Sha, org, repo, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to fetch timeline to review. %w", err)
|
||||
}
|
||||
|
||||
groupConfig, err := config.GetReviewGroup(groupName)
|
||||
groupConfig, err := config.GetReviewGroup(bot.groupName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to fetch review group. %w", err)
|
||||
}
|
||||
@@ -233,30 +237,30 @@ func ProcessPR(pr *models.PullRequest) error {
|
||||
// pr.Head.Sha
|
||||
|
||||
for _, reviewer := range requestReviewers {
|
||||
if review := FindAcceptableReviewInTimeline(reviewer, timeline, reviews); review != nil {
|
||||
if ReviewAccepted(review.Body) {
|
||||
if review := bot.FindAcceptableReviewInTimeline(reviewer, timeline, reviews); review != nil {
|
||||
if bot.ReviewAccepted(review.Body) {
|
||||
if !common.IsDryRun {
|
||||
text := reviewer + " approved a review on behalf of " + groupName
|
||||
if review := FindOurLastReviewInTimeline(timeline); review == nil || review.Body != text {
|
||||
_, err := gitea.AddReviewComment(pr, common.ReviewStateApproved, text)
|
||||
text := reviewer + " approved a review on behalf of " + bot.groupName
|
||||
if review := bot.FindOurLastReviewInTimeline(timeline); review == nil || review.Body != text {
|
||||
_, err := bot.gitea.AddReviewComment(pr, common.ReviewStateApproved, text)
|
||||
if err != nil {
|
||||
common.LogError(" -> failed to write approval comment", err)
|
||||
}
|
||||
UnrequestReviews(gitea, org, repo, id, requestReviewers)
|
||||
bot.UnrequestReviews(org, repo, id, requestReviewers)
|
||||
}
|
||||
}
|
||||
common.LogInfo(" -> approved by", reviewer)
|
||||
common.LogInfo(" review at", review.Created)
|
||||
return nil
|
||||
} else if ReviewRejected(review.Body) {
|
||||
} else if bot.ReviewRejected(review.Body) {
|
||||
if !common.IsDryRun {
|
||||
text := reviewer + " requested changes on behalf of " + groupName + ". See " + review.HTMLURL
|
||||
if review := FindOurLastReviewInTimeline(timeline); review == nil || review.Body != text {
|
||||
_, err := gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, "Changes requested. See review by: "+reviewer)
|
||||
text := reviewer + " requested changes on behalf of " + bot.groupName + ". See " + review.HTMLURL
|
||||
if review := bot.FindOurLastReviewInTimeline(timeline); review == nil || review.Body != text {
|
||||
_, err := bot.gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, text)
|
||||
if err != nil {
|
||||
common.LogError(" -> failed to write rejecting comment", err)
|
||||
}
|
||||
UnrequestReviews(gitea, org, repo, id, requestReviewers)
|
||||
bot.UnrequestReviews(org, repo, id, requestReviewers)
|
||||
}
|
||||
}
|
||||
common.LogInfo(" -> declined by", reviewer)
|
||||
@@ -270,7 +274,7 @@ func ProcessPR(pr *models.PullRequest) error {
|
||||
if !groupConfig.Silent && len(requestReviewers) > 0 {
|
||||
common.LogDebug(" Requesting reviews for:", requestReviewers)
|
||||
if !common.IsDryRun {
|
||||
if _, err := gitea.RequestReviews(pr, requestReviewers...); err != nil {
|
||||
if _, err := bot.gitea.RequestReviews(pr, requestReviewers...); err != nil {
|
||||
common.LogDebug(" -> err:", err)
|
||||
}
|
||||
} else {
|
||||
@@ -283,42 +287,40 @@ func ProcessPR(pr *models.PullRequest) error {
|
||||
// add a helpful comment, if not yet added
|
||||
found_help_comment := false
|
||||
for _, t := range timeline {
|
||||
if t.Type == common.TimelineCommentType_Comment && t.User != nil && t.User.UserName == groupName {
|
||||
if t.Type == common.TimelineCommentType_Comment && t.User != nil && t.User.UserName == bot.groupName {
|
||||
found_help_comment = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found_help_comment && !common.IsDryRun {
|
||||
helpComment := fmt.Sprintln("Review by", groupName, "represents a group of reviewers:", strings.Join(requestReviewers, ", "), ".\n\n"+
|
||||
helpComment := fmt.Sprintln("Review by", bot.groupName, "represents a group of reviewers:", strings.Join(requestReviewers, ", "), ".\n\n"+
|
||||
"Do **not** use standard review interface to review on behalf of the group.\n"+
|
||||
"To accept the review on behalf of the group, create the following comment: `@"+groupName+": approve`.\n"+
|
||||
"To request changes on behalf of the group, create the following comment: `@"+groupName+": decline` followed with lines justifying the decision.\n"+
|
||||
"To accept the review on behalf of the group, create the following comment: `@"+bot.groupName+": approve`.\n"+
|
||||
"To request changes on behalf of the group, create the following comment: `@"+bot.groupName+": decline` followed with lines justifying the decision.\n"+
|
||||
"Future edits of the comments are ignored, a new comment is required to change the review state.")
|
||||
if slices.Contains(groupConfig.Reviewers, pr.User.UserName) {
|
||||
helpComment = helpComment + "\n\n" +
|
||||
"Submitter is member of this review group, hence they are excluded from being one of the reviewers here"
|
||||
}
|
||||
gitea.AddComment(pr, helpComment)
|
||||
bot.gitea.AddComment(pr, helpComment)
|
||||
}
|
||||
|
||||
return ReviewNotFinished
|
||||
}
|
||||
|
||||
func PeriodReviewCheck() {
|
||||
notifications, err := gitea.GetNotifications(common.GiteaNotificationType_Pull, nil)
|
||||
func (bot *ReviewBot) PeriodReviewCheck() {
|
||||
notifications, err := bot.gitea.GetNotifications(common.GiteaNotificationType_Pull, nil)
|
||||
if err != nil {
|
||||
common.LogError(" Error fetching unread notifications: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, notification := range notifications {
|
||||
ProcessNotifications(notification, gitea)
|
||||
bot.ProcessNotifications(notification)
|
||||
}
|
||||
}
|
||||
|
||||
var gitea common.Gitea
|
||||
|
||||
func main() {
|
||||
giteaUrl := flag.String("gitea-url", "https://src.opensuse.org", "Gitea instance used for reviews")
|
||||
rabbitMqHost := flag.String("rabbit-url", "amqps://rabbit.opensuse.org", "RabbitMQ instance where Gitea webhook notifications are sent")
|
||||
@@ -328,6 +330,24 @@ func main() {
|
||||
flag.BoolVar(&common.IsDryRun, "dry", false, "Dry run, no effect. For debugging")
|
||||
flag.Parse()
|
||||
|
||||
if err := common.SetLoggingLevelFromString(*logging); err != nil {
|
||||
common.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if cf := os.Getenv("AUTOGITS_CONFIG"); len(cf) > 0 {
|
||||
*configFile = cf
|
||||
}
|
||||
if url := os.Getenv("AUTOGITS_URL"); len(url) > 0 {
|
||||
*giteaUrl = url
|
||||
}
|
||||
if url := os.Getenv("AUTOGITS_RABBITURL"); len(url) > 0 {
|
||||
*rabbitMqHost = url
|
||||
}
|
||||
if debug := os.Getenv("AUTOGITS_DEBUG"); len(debug) > 0 {
|
||||
common.SetLoggingLevel(common.LogLevelDebug)
|
||||
}
|
||||
|
||||
args := flag.Args()
|
||||
if len(args) != 1 {
|
||||
log.Println(" syntax:")
|
||||
@@ -336,7 +356,7 @@ func main() {
|
||||
flag.Usage()
|
||||
return
|
||||
}
|
||||
groupName = args[0]
|
||||
targetGroupName := args[0]
|
||||
|
||||
if *configFile == "" {
|
||||
common.LogError("Missing config file")
|
||||
@@ -359,36 +379,35 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
gitea = common.AllocateGiteaTransport(*giteaUrl)
|
||||
configs, err = common.ResolveWorkflowConfigs(gitea, configData)
|
||||
giteaTransport := common.AllocateGiteaTransport(*giteaUrl)
|
||||
configs, err := common.ResolveWorkflowConfigs(giteaTransport, configData)
|
||||
if err != nil {
|
||||
common.LogError("Cannot parse workflow configs:", err)
|
||||
return
|
||||
}
|
||||
|
||||
reviewer, err := gitea.GetCurrentUser()
|
||||
reviewer, err := giteaTransport.GetCurrentUser()
|
||||
if err != nil {
|
||||
common.LogError("Cannot fetch review user:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := common.SetLoggingLevelFromString(*logging); err != nil {
|
||||
common.LogError(err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
if *interval < 1 {
|
||||
*interval = 1
|
||||
}
|
||||
|
||||
InitRegex(groupName)
|
||||
bot := &ReviewBot{
|
||||
gitea: giteaTransport,
|
||||
configs: configs,
|
||||
}
|
||||
bot.InitRegex(targetGroupName)
|
||||
|
||||
common.LogInfo(" ** processing group reviews for group:", groupName)
|
||||
common.LogInfo(" ** processing group reviews for group:", bot.groupName)
|
||||
common.LogInfo(" ** username in Gitea:", reviewer.UserName)
|
||||
common.LogInfo(" ** polling interval:", *interval, "min")
|
||||
common.LogInfo(" ** connecting to RabbitMQ:", *rabbitMqHost)
|
||||
|
||||
if groupName != reviewer.UserName {
|
||||
if bot.groupName != reviewer.UserName {
|
||||
common.LogError(" ***** Reviewer does not match group name. Aborting. *****")
|
||||
return
|
||||
}
|
||||
@@ -400,10 +419,13 @@ func main() {
|
||||
}
|
||||
|
||||
config_update := ConfigUpdatePush{
|
||||
bot: bot,
|
||||
config_modified: make(chan *common.AutogitConfig),
|
||||
}
|
||||
|
||||
process_issue_pr := IssueCommentProcessor{}
|
||||
process_issue_pr := IssueCommentProcessor{
|
||||
bot: bot,
|
||||
}
|
||||
|
||||
configUpdates := &common.RabbitMQGiteaEventsProcessor{
|
||||
Orgs: []string{},
|
||||
@@ -413,7 +435,7 @@ func main() {
|
||||
},
|
||||
}
|
||||
configUpdates.Connection().RabbitURL = u
|
||||
for _, c := range configs {
|
||||
for _, c := range bot.configs {
|
||||
if org, _, _ := c.GetPrjGit(); !slices.Contains(configUpdates.Orgs, org) {
|
||||
configUpdates.Orgs = append(configUpdates.Orgs, org)
|
||||
}
|
||||
@@ -426,17 +448,17 @@ func main() {
|
||||
select {
|
||||
case configTouched, ok := <-config_update.config_modified:
|
||||
if ok {
|
||||
for idx, c := range configs {
|
||||
for idx, c := range bot.configs {
|
||||
if c == configTouched {
|
||||
org, repo, branch := c.GetPrjGit()
|
||||
prj := fmt.Sprintf("%s/%s#%s", org, repo, branch)
|
||||
common.LogInfo("Detected config update for", prj)
|
||||
|
||||
new_config, err := common.ReadWorkflowConfig(gitea, prj)
|
||||
new_config, err := common.ReadWorkflowConfig(bot.gitea, prj)
|
||||
if err != nil {
|
||||
common.LogError("Failed parsing Project config for", prj, err)
|
||||
} else {
|
||||
configs[idx] = new_config
|
||||
bot.configs[idx] = new_config
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -446,7 +468,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
PeriodReviewCheck()
|
||||
bot.PeriodReviewCheck()
|
||||
time.Sleep(time.Duration(*interval * int64(time.Minute)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,359 @@
|
||||
package main
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
"src.opensuse.org/autogits/common"
|
||||
"src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
mock_common "src.opensuse.org/autogits/common/mock"
|
||||
)
|
||||
|
||||
func TestProcessPR(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockGitea := mock_common.NewMockGitea(ctrl)
|
||||
groupName := "testgroup"
|
||||
|
||||
bot := &ReviewBot{
|
||||
gitea: mockGitea,
|
||||
groupName: groupName,
|
||||
}
|
||||
bot.InitRegex(groupName)
|
||||
|
||||
org := "myorg"
|
||||
repo := "myrepo"
|
||||
prIndex := int64(1)
|
||||
headSha := "abcdef123456"
|
||||
|
||||
pr := &models.PullRequest{
|
||||
Index: prIndex,
|
||||
URL: "http://gitea/pr/1",
|
||||
State: "open",
|
||||
Base: &models.PRBranchInfo{
|
||||
Name: "main",
|
||||
Repo: &models.Repository{
|
||||
Name: repo,
|
||||
Owner: &models.User{
|
||||
UserName: org,
|
||||
},
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
Sha: headSha,
|
||||
},
|
||||
User: &models.User{
|
||||
UserName: "submitter",
|
||||
},
|
||||
RequestedReviewers: []*models.User{
|
||||
{UserName: groupName},
|
||||
},
|
||||
}
|
||||
|
||||
prjConfig := &common.AutogitConfig{
|
||||
GitProjectName: org + "/" + repo + "#main",
|
||||
ReviewGroups: []*common.ReviewGroup{
|
||||
{
|
||||
Name: groupName,
|
||||
Reviewers: []string{"reviewer1", "reviewer2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
bot.configs = common.AutogitConfigs{prjConfig}
|
||||
|
||||
t.Run("Review not requested for group", func(t *testing.T) {
|
||||
prNoRequest := *pr
|
||||
prNoRequest.RequestedReviewers = nil
|
||||
err := bot.ProcessPR(&prNoRequest)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("PR is closed", func(t *testing.T) {
|
||||
prClosed := *pr
|
||||
prClosed.State = "closed"
|
||||
err := bot.ProcessPR(&prClosed)
|
||||
if err != nil {
|
||||
t.Errorf("Expected no error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Successful Approval", func(t *testing.T) {
|
||||
common.IsDryRun = false
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, nil)
|
||||
// reviewer1 approved in timeline
|
||||
timeline := []*models.TimelineComment{
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: "reviewer1"},
|
||||
Body: "@" + groupName + ": approve",
|
||||
},
|
||||
}
|
||||
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, prIndex).Return(timeline, nil)
|
||||
|
||||
expectedText := "reviewer1 approved a review on behalf of " + groupName
|
||||
mockGitea.EXPECT().AddReviewComment(pr, common.ReviewStateApproved, expectedText).Return(nil, nil)
|
||||
mockGitea.EXPECT().UnrequestReview(org, repo, prIndex, gomock.Any()).Return(nil)
|
||||
|
||||
err := bot.ProcessPR(pr)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Dry Run - No actions taken", func(t *testing.T) {
|
||||
common.IsDryRun = true
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, nil)
|
||||
timeline := []*models.TimelineComment{
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: "reviewer1"},
|
||||
Body: "@" + groupName + ": approve",
|
||||
},
|
||||
}
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, prIndex).Return(timeline, nil)
|
||||
|
||||
// No AddReviewComment or UnrequestReview should be called
|
||||
err := bot.ProcessPR(pr)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Approval already exists - No new comment", func(t *testing.T) {
|
||||
common.IsDryRun = false
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, nil)
|
||||
|
||||
approvalText := "reviewer1 approved a review on behalf of " + groupName
|
||||
timeline := []*models.TimelineComment{
|
||||
{
|
||||
Type: common.TimelineCommentType_Review,
|
||||
User: &models.User{UserName: groupName},
|
||||
Body: approvalText,
|
||||
},
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: "reviewer1"},
|
||||
Body: "@" + groupName + ": approve",
|
||||
},
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: groupName},
|
||||
Body: "Help comment",
|
||||
},
|
||||
}
|
||||
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, prIndex).Return(timeline, nil)
|
||||
|
||||
// No AddReviewComment, UnrequestReview, or AddComment should be called
|
||||
err := bot.ProcessPR(pr)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Rejection already exists - No new comment", func(t *testing.T) {
|
||||
common.IsDryRun = false
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, nil)
|
||||
|
||||
rejectionText := "reviewer1 requested changes on behalf of " + groupName + ". See http://gitea/comment/123"
|
||||
timeline := []*models.TimelineComment{
|
||||
{
|
||||
Type: common.TimelineCommentType_Review,
|
||||
User: &models.User{UserName: groupName},
|
||||
Body: rejectionText,
|
||||
},
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: "reviewer1"},
|
||||
Body: "@" + groupName + ": decline",
|
||||
HTMLURL: "http://gitea/comment/123",
|
||||
},
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: groupName},
|
||||
Body: "Help comment",
|
||||
},
|
||||
}
|
||||
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, prIndex).Return(timeline, nil)
|
||||
|
||||
err := bot.ProcessPR(pr)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Pending review - Help comment already exists", func(t *testing.T) {
|
||||
common.IsDryRun = false
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, nil)
|
||||
|
||||
timeline := []*models.TimelineComment{
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: groupName},
|
||||
Body: "Some help comment",
|
||||
},
|
||||
}
|
||||
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, prIndex).Return(timeline, nil)
|
||||
|
||||
// It will try to request reviews
|
||||
mockGitea.EXPECT().RequestReviews(pr, "reviewer1", "reviewer2").Return(nil, nil)
|
||||
|
||||
// AddComment should NOT be called because bot already has a comment in timeline
|
||||
err := bot.ProcessPR(pr)
|
||||
if err != ReviewNotFinished {
|
||||
t.Errorf("Expected ReviewNotFinished error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Submitter is group member - Excluded from review request", func(t *testing.T) {
|
||||
common.IsDryRun = false
|
||||
prSubmitterMember := *pr
|
||||
prSubmitterMember.User = &models.User{UserName: "reviewer1"}
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, nil)
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, prIndex).Return(nil, nil)
|
||||
mockGitea.EXPECT().RequestReviews(&prSubmitterMember, "reviewer2").Return(nil, nil)
|
||||
mockGitea.EXPECT().AddComment(&prSubmitterMember, gomock.Any()).Return(nil)
|
||||
err := bot.ProcessPR(&prSubmitterMember)
|
||||
if err != ReviewNotFinished {
|
||||
t.Errorf("Expected ReviewNotFinished error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Successful Rejection", func(t *testing.T) {
|
||||
common.IsDryRun = false
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, nil)
|
||||
timeline := []*models.TimelineComment{
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: "reviewer2"},
|
||||
Body: "@" + groupName + ": decline",
|
||||
HTMLURL: "http://gitea/comment/999",
|
||||
},
|
||||
}
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, prIndex).Return(timeline, nil)
|
||||
expectedText := "reviewer2 requested changes on behalf of " + groupName + ". See http://gitea/comment/999"
|
||||
mockGitea.EXPECT().AddReviewComment(pr, common.ReviewStateRequestChanges, expectedText).Return(nil, nil)
|
||||
mockGitea.EXPECT().UnrequestReview(org, repo, prIndex, gomock.Any()).Return(nil)
|
||||
err := bot.ProcessPR(pr)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil error, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Config not found", func(t *testing.T) {
|
||||
bot.configs = common.AutogitConfigs{}
|
||||
err := bot.ProcessPR(pr)
|
||||
if err == nil {
|
||||
t.Error("Expected error when config is missing, got nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Gitea error in GetPullRequestReviews", func(t *testing.T) {
|
||||
bot.configs = common.AutogitConfigs{prjConfig}
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, fmt.Errorf("gitea error"))
|
||||
err := bot.ProcessPR(pr)
|
||||
if err == nil {
|
||||
t.Error("Expected error from gitea, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestProcessNotifications(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockGitea := mock_common.NewMockGitea(ctrl)
|
||||
groupName := "testgroup"
|
||||
|
||||
bot := &ReviewBot{
|
||||
gitea: mockGitea,
|
||||
groupName: groupName,
|
||||
}
|
||||
bot.InitRegex(groupName)
|
||||
|
||||
org := "myorg"
|
||||
repo := "myrepo"
|
||||
prIndex := int64(123)
|
||||
notificationID := int64(456)
|
||||
|
||||
notification := &models.NotificationThread{
|
||||
ID: notificationID,
|
||||
Subject: &models.NotificationSubject{
|
||||
URL: fmt.Sprintf("http://gitea/api/v1/repos/%s/%s/pulls/%d", org, repo, prIndex),
|
||||
},
|
||||
}
|
||||
|
||||
t.Run("Notification Success", func(t *testing.T) {
|
||||
common.IsDryRun = false
|
||||
pr := &models.PullRequest{
|
||||
Index: prIndex,
|
||||
Base: &models.PRBranchInfo{
|
||||
Name: "main",
|
||||
Repo: &models.Repository{
|
||||
Name: repo,
|
||||
Owner: &models.User{UserName: org},
|
||||
},
|
||||
},
|
||||
|
||||
Head: &models.PRBranchInfo{
|
||||
Sha: "headsha",
|
||||
Repo: &models.Repository{
|
||||
Name: repo,
|
||||
Owner: &models.User{UserName: org},
|
||||
},
|
||||
},
|
||||
|
||||
User: &models.User{UserName: "submitter"},
|
||||
RequestedReviewers: []*models.User{{UserName: groupName}},
|
||||
}
|
||||
|
||||
mockGitea.EXPECT().GetPullRequest(org, repo, prIndex).Return(pr, nil)
|
||||
|
||||
prjConfig := &common.AutogitConfig{
|
||||
GitProjectName: org + "/" + repo + "#main",
|
||||
ReviewGroups: []*common.ReviewGroup{{Name: groupName, Reviewers: []string{"r1"}}},
|
||||
}
|
||||
bot.configs = common.AutogitConfigs{prjConfig}
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, prIndex).Return(nil, nil)
|
||||
timeline := []*models.TimelineComment{
|
||||
{
|
||||
Type: common.TimelineCommentType_Comment,
|
||||
User: &models.User{UserName: "r1"},
|
||||
Body: "@" + groupName + ": approve",
|
||||
},
|
||||
}
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, prIndex).Return(timeline, nil)
|
||||
expectedText := "r1 approved a review on behalf of " + groupName
|
||||
mockGitea.EXPECT().AddReviewComment(pr, common.ReviewStateApproved, expectedText).Return(nil, nil)
|
||||
mockGitea.EXPECT().UnrequestReview(org, repo, prIndex, gomock.Any()).Return(nil)
|
||||
|
||||
mockGitea.EXPECT().SetNotificationRead(notificationID).Return(nil)
|
||||
|
||||
bot.ProcessNotifications(notification)
|
||||
|
||||
})
|
||||
|
||||
t.Run("Invalid Notification URL", func(t *testing.T) {
|
||||
badNotification := &models.NotificationThread{
|
||||
Subject: &models.NotificationSubject{
|
||||
URL: "http://gitea/invalid/url",
|
||||
},
|
||||
}
|
||||
bot.ProcessNotifications(badNotification)
|
||||
})
|
||||
|
||||
t.Run("Gitea error in GetPullRequest", func(t *testing.T) {
|
||||
mockGitea.EXPECT().GetPullRequest(org, repo, prIndex).Return(nil, fmt.Errorf("gitea error"))
|
||||
bot.ProcessNotifications(notification)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReviewApprovalCheck(t *testing.T) {
|
||||
tests := []struct {
|
||||
@@ -60,16 +413,78 @@ func TestReviewApprovalCheck(t *testing.T) {
|
||||
InString: "@group2: disapprove",
|
||||
Rejected: true,
|
||||
},
|
||||
{
|
||||
Name: "Whitespace before colon",
|
||||
GroupName: "group",
|
||||
InString: "@group : LGTM",
|
||||
Approved: true,
|
||||
},
|
||||
{
|
||||
Name: "No whitespace after colon",
|
||||
GroupName: "group",
|
||||
InString: "@group:LGTM",
|
||||
Approved: true,
|
||||
},
|
||||
{
|
||||
Name: "Leading and trailing whitespace on line",
|
||||
GroupName: "group",
|
||||
InString: " @group: LGTM ",
|
||||
Approved: true,
|
||||
},
|
||||
{
|
||||
Name: "Multiline: Approved on second line",
|
||||
GroupName: "group",
|
||||
InString: "Random noise\n@group: approved",
|
||||
Approved: true,
|
||||
},
|
||||
{
|
||||
Name: "Multiline: Multiple group mentions, first wins",
|
||||
GroupName: "group",
|
||||
InString: "@group: decline\n@group: approve",
|
||||
Rejected: true,
|
||||
},
|
||||
{
|
||||
Name: "Multiline: Approved on second line",
|
||||
GroupName: "group",
|
||||
InString: "noise\n@group: approve\nmore noise",
|
||||
Approved: true,
|
||||
},
|
||||
{
|
||||
Name: "Not at start of line (even with whitespace)",
|
||||
GroupName: "group",
|
||||
InString: "Hello @group: approve",
|
||||
Approved: false,
|
||||
},
|
||||
{
|
||||
Name: "Rejecting with reason",
|
||||
GroupName: "group",
|
||||
InString: "@group: decline because of X, Y and Z",
|
||||
Rejected: true,
|
||||
},
|
||||
{
|
||||
Name: "No colon after group",
|
||||
GroupName: "group",
|
||||
InString: "@group LGTM",
|
||||
Approved: false,
|
||||
Rejected: false,
|
||||
},
|
||||
{
|
||||
Name: "Invalid char after group",
|
||||
GroupName: "group",
|
||||
InString: "@group! LGTM",
|
||||
Approved: false,
|
||||
Rejected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Name, func(t *testing.T) {
|
||||
InitRegex(test.GroupName)
|
||||
bot := &ReviewBot{}
|
||||
bot.InitRegex(test.GroupName)
|
||||
|
||||
if r := ReviewAccepted(test.InString); r != test.Approved {
|
||||
if r := bot.ReviewAccepted(test.InString); r != test.Approved {
|
||||
t.Error("ReviewAccepted() returned", r, "expecting", test.Approved)
|
||||
}
|
||||
if r := ReviewRejected(test.InString); r != test.Rejected {
|
||||
if r := bot.ReviewRejected(test.InString); r != test.Rejected {
|
||||
t.Error("ReviewRejected() returned", r, "expecting", test.Rejected)
|
||||
}
|
||||
})
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
type IssueCommentProcessor struct{}
|
||||
type IssueCommentProcessor struct {
|
||||
bot *ReviewBot
|
||||
}
|
||||
|
||||
func (s *IssueCommentProcessor) ProcessFunc(req *common.Request) error {
|
||||
if req.Type != common.RequestType_IssueComment {
|
||||
@@ -19,14 +21,15 @@ func (s *IssueCommentProcessor) ProcessFunc(req *common.Request) error {
|
||||
repo := data.Repository.Name
|
||||
index := int64(data.Issue.Number)
|
||||
|
||||
pr, err := gitea.GetPullRequest(org, repo, index)
|
||||
pr, err := s.bot.gitea.GetPullRequest(org, repo, index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to fetch PullRequest from event: %s/%s!%d Error: %w", org, repo, index, err)
|
||||
}
|
||||
return ProcessPR(pr)
|
||||
return s.bot.ProcessPR(pr)
|
||||
}
|
||||
|
||||
type ConfigUpdatePush struct {
|
||||
bot *ReviewBot
|
||||
config_modified chan *common.AutogitConfig
|
||||
}
|
||||
|
||||
@@ -46,7 +49,7 @@ func (s *ConfigUpdatePush) ProcessFunc(req *common.Request) error {
|
||||
}
|
||||
branch := data.Ref[len(branch_ref):]
|
||||
|
||||
c := configs.GetPrjGitConfig(org, repo, branch)
|
||||
c := s.bot.configs.GetPrjGitConfig(org, repo, branch)
|
||||
if c == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -64,7 +67,7 @@ func (s *ConfigUpdatePush) ProcessFunc(req *common.Request) error {
|
||||
}
|
||||
|
||||
if modified_config {
|
||||
for _, config := range configs {
|
||||
for _, config := range s.bot.configs {
|
||||
if o, r, _ := config.GetPrjGit(); o == org && r == repo {
|
||||
s.config_modified <- config
|
||||
}
|
||||
|
||||
203
group-review/rabbit_test.go
Normal file
203
group-review/rabbit_test.go
Normal file
@@ -0,0 +1,203 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
"src.opensuse.org/autogits/common"
|
||||
"src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
mock_common "src.opensuse.org/autogits/common/mock"
|
||||
)
|
||||
|
||||
func TestIssueCommentProcessor(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
mockGitea := mock_common.NewMockGitea(ctrl)
|
||||
groupName := "testgroup"
|
||||
bot := &ReviewBot{
|
||||
gitea: mockGitea,
|
||||
groupName: groupName,
|
||||
}
|
||||
bot.InitRegex(groupName)
|
||||
|
||||
processor := &IssueCommentProcessor{bot: bot}
|
||||
|
||||
org := "myorg"
|
||||
repo := "myrepo"
|
||||
index := 123
|
||||
|
||||
event := &common.IssueCommentWebhookEvent{
|
||||
Repository: &common.Repository{
|
||||
Name: repo,
|
||||
Owner: &common.Organization{
|
||||
Username: org,
|
||||
},
|
||||
},
|
||||
Issue: &common.IssueDetail{
|
||||
Number: index,
|
||||
},
|
||||
}
|
||||
|
||||
req := &common.Request{
|
||||
Type: common.RequestType_IssueComment,
|
||||
Data: event,
|
||||
}
|
||||
|
||||
t.Run("Successful Processing", func(t *testing.T) {
|
||||
pr := &models.PullRequest{
|
||||
Index: int64(index),
|
||||
Base: &models.PRBranchInfo{
|
||||
Name: "main",
|
||||
Repo: &models.Repository{
|
||||
Name: repo,
|
||||
Owner: &models.User{UserName: org},
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
Sha: "headsha",
|
||||
Repo: &models.Repository{
|
||||
Name: repo,
|
||||
Owner: &models.User{UserName: org},
|
||||
},
|
||||
},
|
||||
User: &models.User{UserName: "submitter"},
|
||||
RequestedReviewers: []*models.User{{UserName: groupName}},
|
||||
}
|
||||
|
||||
mockGitea.EXPECT().GetPullRequest(org, repo, int64(index)).Return(pr, nil)
|
||||
|
||||
prjConfig := &common.AutogitConfig{
|
||||
GitProjectName: org + "/" + repo + "#main",
|
||||
ReviewGroups: []*common.ReviewGroup{{Name: groupName, Reviewers: []string{"r1"}}},
|
||||
}
|
||||
bot.configs = common.AutogitConfigs{prjConfig}
|
||||
mockGitea.EXPECT().GetPullRequestReviews(org, repo, int64(index)).Return(nil, nil)
|
||||
mockGitea.EXPECT().GetTimeline(org, repo, int64(index)).Return(nil, nil)
|
||||
mockGitea.EXPECT().RequestReviews(pr, "r1").Return(nil, nil)
|
||||
mockGitea.EXPECT().AddComment(pr, gomock.Any()).Return(nil)
|
||||
|
||||
err := processor.ProcessFunc(req)
|
||||
if err != ReviewNotFinished {
|
||||
t.Errorf("Expected ReviewNotFinished, got %v", err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Gitea error in GetPullRequest", func(t *testing.T) {
|
||||
mockGitea.EXPECT().GetPullRequest(org, repo, int64(index)).Return(nil, fmt.Errorf("gitea error"))
|
||||
err := processor.ProcessFunc(req)
|
||||
if err == nil {
|
||||
t.Error("Expected error, got nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Wrong Request Type", func(t *testing.T) {
|
||||
wrongReq := &common.Request{Type: common.RequestType_Push}
|
||||
err := processor.ProcessFunc(wrongReq)
|
||||
if err == nil {
|
||||
t.Error("Expected error for wrong request type, got nil")
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigUpdatePush(t *testing.T) {
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
groupName := "testgroup"
|
||||
bot := &ReviewBot{
|
||||
groupName: groupName,
|
||||
}
|
||||
bot.InitRegex(groupName)
|
||||
|
||||
configChan := make(chan *common.AutogitConfig, 1)
|
||||
processor := &ConfigUpdatePush{
|
||||
bot: bot,
|
||||
config_modified: configChan,
|
||||
}
|
||||
|
||||
org := "myorg"
|
||||
repo := "myrepo"
|
||||
branch := "main"
|
||||
|
||||
prjConfig := &common.AutogitConfig{
|
||||
GitProjectName: org + "/" + repo + "#" + branch,
|
||||
Organization: org,
|
||||
Branch: branch,
|
||||
}
|
||||
bot.configs = common.AutogitConfigs{prjConfig}
|
||||
|
||||
event := &common.PushWebhookEvent{
|
||||
Ref: "refs/heads/" + branch,
|
||||
Repository: &common.Repository{
|
||||
Name: repo,
|
||||
Owner: &common.Organization{
|
||||
Username: org,
|
||||
},
|
||||
},
|
||||
Commits: []common.Commit{
|
||||
{
|
||||
Modified: []string{common.ProjectConfigFile},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
req := &common.Request{
|
||||
Type: common.RequestType_Push,
|
||||
Data: event,
|
||||
}
|
||||
|
||||
t.Run("Config Modified", func(t *testing.T) {
|
||||
err := processor.ProcessFunc(req)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil error, got %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case modified := <-configChan:
|
||||
if modified != prjConfig {
|
||||
t.Errorf("Expected modified config to be %v, got %v", prjConfig, modified)
|
||||
}
|
||||
default:
|
||||
t.Error("Expected config modification signal, but none received")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("No Config Modified", func(t *testing.T) {
|
||||
noConfigEvent := *event
|
||||
noConfigEvent.Commits = []common.Commit{{Modified: []string{"README.md"}}}
|
||||
noConfigReq := &common.Request{Type: common.RequestType_Push, Data: &noConfigEvent}
|
||||
|
||||
err := processor.ProcessFunc(noConfigReq)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil error, got %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-configChan:
|
||||
t.Error("Did not expect config modification signal")
|
||||
default:
|
||||
// Success
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Wrong Branch Ref", func(t *testing.T) {
|
||||
wrongBranchEvent := *event
|
||||
wrongBranchEvent.Ref = "refs/tags/v1.0"
|
||||
wrongBranchReq := &common.Request{Type: common.RequestType_Push, Data: &wrongBranchEvent}
|
||||
|
||||
err := processor.ProcessFunc(wrongBranchReq)
|
||||
if err == nil {
|
||||
t.Error("Expected error for wrong branch ref, got nil")
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("Config Not Found", func(t *testing.T) {
|
||||
bot.configs = common.AutogitConfigs{}
|
||||
err := processor.ProcessFunc(req)
|
||||
if err != nil {
|
||||
t.Errorf("Expected nil error even if config not found, got %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
11
integration/Dockerfile
Normal file
11
integration/Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
FROM opensuse/tumbleweed
|
||||
ENV container=podman
|
||||
|
||||
ENV LANG=en_US.UTF-8
|
||||
|
||||
RUN zypper -vvvn install podman podman-compose vim make python3-pytest python3-requests python3-pytest-dependency
|
||||
|
||||
COPY . /opt/project/
|
||||
|
||||
WORKDIR /opt/project/integration
|
||||
76
integration/Makefile
Normal file
76
integration/Makefile
Normal file
@@ -0,0 +1,76 @@
|
||||
# We want to be able to test in two **modes**:
|
||||
# A. bots are used from official packages as defined in */Dockerfile.package
|
||||
# B. bots are just picked up from binaries that are placed in corresponding parent directory.
|
||||
|
||||
# The topology is defined in podman-compose file and can be spawned in two ways:
|
||||
# 1. Privileged container (needs no additional dependancies)
|
||||
# 2. podman-compose on a local machine (needs dependencies as defined in the Dockerfile)
|
||||
|
||||
|
||||
# Typical workflow:
|
||||
# A1: - run 'make test_package'
|
||||
# B1: - run 'make test_local' (make sure that the go binaries in parent folder are built)
|
||||
# A2:
|
||||
# 1. 'make build_package' - prepares images (recommended, otherwise there might be surprises if image fails to build during `make up`)
|
||||
# 2. 'make up' - spawns podman-compose
|
||||
# 3. 'pytest -v tests/*' - run tests
|
||||
# 4. 'make down' - once the containers are not needed
|
||||
# B2: (make sure the go binaries in the parent folder are built)
|
||||
# 4. 'make build_local' - prepared images (recommended, otherwise there might be surprises if image fails to build during `make up`)
|
||||
# 5. 'make up' - spawns podman-compose
|
||||
# 6. 'pytest -v tests/*' - run tests
|
||||
# 7. 'make down' - once the containers are not needed
|
||||
|
||||
|
||||
AUTO_DETECT_MODE := $(shell if test -e ../workflow-pr/workflow-pr; then echo .local; else echo .package; fi)
|
||||
|
||||
# try to detect mode B1, otherwise mode A1
|
||||
test: GIWTF_IMAGE_SUFFIX=$(AUTO_DETECT_MODE)
|
||||
test: build_container test_container
|
||||
|
||||
# mode A1
|
||||
test_package: GIWTF_IMAGE_SUFFIX=.package
|
||||
test_package: build_container test_container
|
||||
|
||||
# mode B1
|
||||
test_local: GIWTF_IMAGE_SUFFIX=.local
|
||||
test_local: build_container test_container
|
||||
|
||||
MODULES := gitea-events-rabbitmq-publisher obs-staging-bot workflow-pr
|
||||
|
||||
# Prepare topology 1
|
||||
build_container:
|
||||
podman build ../ -f integration/Dockerfile -t autogits_integration
|
||||
|
||||
# Run tests in topology 1
|
||||
test_container:
|
||||
podman run --rm --privileged -t --network integration_gitea-network -e GIWTF_IMAGE_SUFFIX=$(GIWTF_IMAGE_SUFFIX) autogits_integration /usr/bin/bash -c "make build && make up && sleep 25 && pytest -v tests/*"
|
||||
|
||||
|
||||
build_local: AUTO_DETECT_MODE=.local
|
||||
build_local: build
|
||||
|
||||
build_package: AUTO_DETECT_MODE=.package
|
||||
build_package: build
|
||||
|
||||
# parse all service images from podman-compose and build them (topology 2)
|
||||
build:
|
||||
podman pull docker.io/library/rabbitmq:3.13.7-management
|
||||
for i in $$(grep -A 1000 services: podman-compose.yml | grep -oE '^ [^: ]+'); do GIWTF_IMAGE_SUFFIX=$(AUTO_DETECT_MODE) podman-compose build $$i || exit 1; done
|
||||
|
||||
# this will spawn prebuilt containers (topology 2)
|
||||
up:
|
||||
podman-compose up -d
|
||||
|
||||
# tear down (topology 2)
|
||||
down:
|
||||
podman-compose down
|
||||
|
||||
# mode A
|
||||
up-bots-package:
|
||||
GIWTF_IMAGE_SUFFIX=.package podman-compose up -d
|
||||
|
||||
# mode B
|
||||
up-bots-local:
|
||||
GIWTF_IMAGE_SUFFIX=.local podman-compose up -d
|
||||
|
||||
1
integration/clean.sh
Executable file
1
integration/clean.sh
Executable file
@@ -0,0 +1 @@
|
||||
sudo rm -rf gitea-data/ gitea-logs/ rabbitmq-data/ workflow-pr-repos/
|
||||
1
integration/gitea-events-rabbitmq-publisher/Dockerfile
Symbolic link
1
integration/gitea-events-rabbitmq-publisher/Dockerfile
Symbolic link
@@ -0,0 +1 @@
|
||||
Dockerfile.package
|
||||
15
integration/gitea-events-rabbitmq-publisher/Dockerfile.local
Normal file
15
integration/gitea-events-rabbitmq-publisher/Dockerfile.local
Normal file
@@ -0,0 +1,15 @@
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Add the custom CA to the trust store
|
||||
COPY integration/rabbitmq-config/certs/cert.pem /usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
RUN zypper -n in which binutils
|
||||
|
||||
# Copy the pre-built binary into the container
|
||||
# The user will build this and place it in the same directory as this Dockerfile
|
||||
COPY gitea-events-rabbitmq-publisher/gitea-events-rabbitmq-publisher /usr/local/bin/
|
||||
COPY integration/gitea-events-rabbitmq-publisher/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
@@ -0,0 +1,15 @@
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Add the custom CA to the trust store
|
||||
COPY integration/rabbitmq-config/certs/cert.pem /usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
RUN zypper ar -f http://download.opensuse.org/repositories/devel:/Factory:/git-workflow/15.7/devel:Factory:git-workflow.repo
|
||||
RUN zypper --gpg-auto-import-keys ref
|
||||
|
||||
RUN zypper -n in git-core curl autogits-gitea-events-rabbitmq-publisher binutils
|
||||
|
||||
COPY integration/gitea-events-rabbitmq-publisher/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
13
integration/gitea-events-rabbitmq-publisher/entrypoint.sh
Normal file
13
integration/gitea-events-rabbitmq-publisher/entrypoint.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
exe=$(which gitea-events-rabbitmq-publisher 2>/dev/null) || :
|
||||
exe=${exe:-/usr/local/bin/gitea-events-rabbitmq-publisher}
|
||||
|
||||
package=$(rpm -qa | grep autogits-gitea-events-rabbitmq-publisher) || :
|
||||
|
||||
echo "!!!!!!!!!!!!!!!! using binary $exe; installed package: $package"
|
||||
which strings > /dev/null 2>&1 && strings "$exe" | grep -A 2 vcs.revision= | head -4 || :
|
||||
echo "RABBITMQ_HOST: $RABBITMQ_HOST"
|
||||
|
||||
exec $exe "$@"
|
||||
25
integration/gitea/Dockerfile
Normal file
25
integration/gitea/Dockerfile
Normal file
@@ -0,0 +1,25 @@
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
RUN zypper ar --repo https://download.opensuse.org/repositories/devel:/Factory:/git-workflow/15.7/devel:Factory:git-workflow.repo \
|
||||
&& zypper -n --gpg-auto-import-keys refresh
|
||||
|
||||
RUN zypper -n install \
|
||||
git \
|
||||
sqlite3 \
|
||||
curl \
|
||||
gawk \
|
||||
openssh \
|
||||
jq \
|
||||
devel_Factory_git-workflow:gitea \
|
||||
&& rm -rf /var/cache/zypp/*
|
||||
|
||||
# Copy the minimal set of required files from the local 'container-files' directory
|
||||
COPY container-files/ /
|
||||
|
||||
RUN chmod -R 777 /etc/gitea/conf
|
||||
|
||||
# Make the setup and entrypoint scripts executable
|
||||
RUN chmod +x /opt/setup/setup-gitea.sh && chmod +x /opt/setup/entrypoint.sh && chmod +x /opt/setup/setup-webhook.sh && chmod +x /opt/setup/setup-dummy-data.sh
|
||||
|
||||
# Use the new entrypoint script to start the container
|
||||
ENTRYPOINT ["/opt/setup/entrypoint.sh"]
|
||||
42
integration/gitea/container-files/etc/gitea/conf/app.ini
Normal file
42
integration/gitea/container-files/etc/gitea/conf/app.ini
Normal file
@@ -0,0 +1,42 @@
|
||||
WORK_PATH = /var/lib/gitea
|
||||
|
||||
[server]
|
||||
CERT_FILE = /etc/gitea/https/cert.pem
|
||||
KEY_FILE = /etc/gitea/https/key.pem
|
||||
STATIC_ROOT_PATH = /usr/share/gitea
|
||||
APP_DATA_PATH = /var/lib/gitea/data
|
||||
PPROF_DATA_PATH = /var/lib/gitea/data/tmp/pprof
|
||||
PROTOCOL = http
|
||||
DOMAIN = gitea-test
|
||||
SSH_DOMAIN = gitea-test
|
||||
ROOT_URL = http://gitea-test:3000/
|
||||
HTTP_PORT = 3000
|
||||
DISABLE_SSH = false
|
||||
START_SSH_SERVER = true
|
||||
SSH_PORT = 3022
|
||||
LFS_START_SERVER = true
|
||||
|
||||
[lfs]
|
||||
PATH = /var/lib/gitea/data/lfs
|
||||
|
||||
[database]
|
||||
DB_TYPE = sqlite3
|
||||
PATH = /var/lib/gitea/data/gitea.db
|
||||
|
||||
[security]
|
||||
INSTALL_LOCK = true
|
||||
|
||||
[oauth2]
|
||||
ENABLED = false
|
||||
|
||||
[log]
|
||||
ROOT_PATH = /var/log/gitea
|
||||
MODE = console, file
|
||||
; Either "Trace", "Debug", "Info", "Warn", "Error" or "None", default is "Info"
|
||||
LEVEL = Debug
|
||||
|
||||
[service]
|
||||
ENABLE_BASIC_AUTHENTICATION = true
|
||||
|
||||
[webhook]
|
||||
ALLOWED_HOST_LIST = gitea-publisher
|
||||
19
integration/gitea/container-files/opt/setup/entrypoint.sh
Normal file
19
integration/gitea/container-files/opt/setup/entrypoint.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Run setup to ensure permissions, migrations, and the admin user are ready.
|
||||
# The setup script is now idempotent.
|
||||
/opt/setup/setup-gitea.sh
|
||||
|
||||
# Start the webhook setup script in the background.
|
||||
# It will wait for the main Gitea process to be ready before creating the webhook.
|
||||
/opt/setup/setup-webhook.sh &
|
||||
|
||||
echo "Starting Gitea..."
|
||||
|
||||
# The original systemd service ran as user 'gitea' and group 'gitea'
|
||||
# with a working directory of '/var/lib/gitea'.
|
||||
# We will switch to that user and run the web command.
|
||||
# Using exec means Gitea will become PID 1, allowing it to receive signals correctly.
|
||||
cd /var/lib/gitea
|
||||
exec su -s /bin/bash gitea -c "/usr/bin/gitea web --config /etc/gitea/conf/app.ini"
|
||||
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
# This script is now empty as dummy data setup is handled by pytest fixtures.
|
||||
100
integration/gitea/container-files/opt/setup/setup-gitea.sh
Normal file
100
integration/gitea/container-files/opt/setup/setup-gitea.sh
Normal file
@@ -0,0 +1,100 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
set -e
|
||||
|
||||
# Set ownership on the volume mounts. This allows the 'gitea' user to write to them.
|
||||
# We use -R to ensure all subdirectories (like /var/lib/gitea/data) are covered.
|
||||
chown -R gitea:gitea /var/lib/gitea /var/log/gitea
|
||||
|
||||
# Set ownership on the config directory.
|
||||
chown -R gitea:gitea /etc/gitea
|
||||
|
||||
# Run database migrations to initialize the sqlite3 db based on app.ini.
|
||||
su -s /bin/bash gitea -c 'gitea migrate'
|
||||
|
||||
# Create a default admin user if it doesn't exist
|
||||
if ! su -s /bin/bash gitea -c 'gitea admin user list' | awk 'NR>1 && $2 == "admin" {found=1} END {exit !found}'; then
|
||||
echo "Creating admin user..."
|
||||
su -s /bin/bash gitea -c 'gitea admin user create --username admin --password opensuse --email admin@example.com --must-change-password=false --admin'
|
||||
else
|
||||
echo "Admin user already exists."
|
||||
fi
|
||||
|
||||
# Generate an access token for the admin user
|
||||
ADMIN_TOKEN_FILE="/var/lib/gitea/admin.token"
|
||||
if [ -f "$ADMIN_TOKEN_FILE" ]; then
|
||||
echo "Admin token already exists at $ADMIN_TOKEN_FILE."
|
||||
else
|
||||
echo "Generating admin token..."
|
||||
ADMIN_TOKEN=$(su -s /bin/bash gitea -c "gitea admin user generate-access-token -raw -u admin -t admin-token")
|
||||
if [ -n "$ADMIN_TOKEN" ]; then
|
||||
printf "%s" "$ADMIN_TOKEN" > "$ADMIN_TOKEN_FILE"
|
||||
chmod 777 "$ADMIN_TOKEN_FILE"
|
||||
chown gitea:gitea "$ADMIN_TOKEN_FILE"
|
||||
echo "Admin token generated and saved to $ADMIN_TOKEN_FILE."
|
||||
else
|
||||
echo "Failed to generate admin token."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Generate SSH key for the admin user if it doesn't exist
|
||||
SSH_KEY_DIR="/var/lib/gitea/ssh-keys"
|
||||
mkdir -p "$SSH_KEY_DIR"
|
||||
if [ ! -f "$SSH_KEY_DIR/id_ed25519" ]; then
|
||||
echo "Generating SSH key for admin user..."
|
||||
ssh-keygen -t ed25519 -N "" -f "$SSH_KEY_DIR/id_ed25519"
|
||||
chown -R gitea:gitea "$SSH_KEY_DIR"
|
||||
chmod 700 "$SSH_KEY_DIR"
|
||||
chmod 600 "$SSH_KEY_DIR/id_ed25519"
|
||||
chmod 644 "$SSH_KEY_DIR/id_ed25519.pub"
|
||||
fi
|
||||
|
||||
# Create a autogits_obs_staging_bot user if it doesn't exist
|
||||
if ! su -s /bin/bash gitea -c 'gitea admin user list' | awk 'NR>1 && $2 == "autogits_obs_staging_bot" {found=1} END {exit !found}'; then
|
||||
echo "Creating autogits_obs_staging_bot user..."
|
||||
su -s /bin/bash gitea -c 'gitea admin user create --username autogits_obs_staging_bot --password opensuse --email autogits_obs_staging_bot@example.com --must-change-password=false'
|
||||
else
|
||||
echo "autogits_obs_staging_bot user already exists."
|
||||
fi
|
||||
|
||||
# Generate an access token for the autogits_obs_staging_bot user
|
||||
BOT_TOKEN_FILE="/var/lib/gitea/autogits_obs_staging_bot.token"
|
||||
if [ -f "$BOT_TOKEN_FILE" ]; then
|
||||
echo "autogits_obs_staging_bot token already exists at $BOT_TOKEN_FILE."
|
||||
else
|
||||
echo "Generating autogits_obs_staging_bot token..."
|
||||
BOT_TOKEN=$(su -s /bin/bash gitea -c "gitea admin user generate-access-token -raw -u autogits_obs_staging_bot -t autogits_obs_staging_bot-token")
|
||||
if [ -n "$BOT_TOKEN" ]; then
|
||||
printf "%s" "$BOT_TOKEN" > "$BOT_TOKEN_FILE"
|
||||
chmod 666 "$BOT_TOKEN_FILE"
|
||||
chown gitea:gitea "$BOT_TOKEN_FILE"
|
||||
echo "autogits_obs_staging_bot token generated and saved to $BOT_TOKEN_FILE."
|
||||
else
|
||||
echo "Failed to generate autogits_obs_staging_bot token."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create a workflow-pr user if it doesn't exist
|
||||
if ! su -s /bin/bash gitea -c 'gitea admin user list' | awk 'NR>1 && $2 == "workflow-pr" {found=1} END {exit !found}'; then
|
||||
echo "Creating workflow-pr user..."
|
||||
su -s /bin/bash gitea -c 'gitea admin user create --username workflow-pr --password opensuse --email workflow-pr@example.com --must-change-password=false'
|
||||
else
|
||||
echo "workflow-pr user already exists."
|
||||
fi
|
||||
|
||||
# Generate an access token for the workflow-pr user
|
||||
BOT_TOKEN_FILE="/var/lib/gitea/workflow-pr.token"
|
||||
if [ -f "$BOT_TOKEN_FILE" ]; then
|
||||
echo "workflow-pr token already exists at $BOT_TOKEN_FILE."
|
||||
else
|
||||
echo "Generating workflow-pr token..."
|
||||
BOT_TOKEN=$(su -s /bin/bash gitea -c "gitea admin user generate-access-token -raw -u workflow-pr -t workflow-pr-token")
|
||||
if [ -n "$BOT_TOKEN" ]; then
|
||||
printf "%s" "$BOT_TOKEN" > "$BOT_TOKEN_FILE"
|
||||
chmod 666 "$BOT_TOKEN_FILE"
|
||||
chown gitea:gitea "$BOT_TOKEN_FILE"
|
||||
echo "workflow-pr token generated and saved to $BOT_TOKEN_FILE."
|
||||
else
|
||||
echo "Failed to generate workflow-pr token."
|
||||
fi
|
||||
fi
|
||||
92
integration/gitea/container-files/opt/setup/setup-webhook.sh
Normal file
92
integration/gitea/container-files/opt/setup/setup-webhook.sh
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
GITEA_URL="http://localhost:3000"
|
||||
WEBHOOK_URL="http://gitea-publisher:8002/rabbitmq-forwarder"
|
||||
TOKEN_NAME="webhook-creator"
|
||||
|
||||
echo "Webhook setup script started in background."
|
||||
|
||||
# Wait 10s for the main Gitea process to start
|
||||
sleep 10
|
||||
|
||||
# Wait for Gitea API to be ready
|
||||
echo "Waiting for Gitea API at $GITEA_URL..."
|
||||
while ! curl -s -f "$GITEA_URL/api/v1/version" > /dev/null; do
|
||||
echo "Gitea API not up yet, waiting 5s..."
|
||||
sleep 5
|
||||
done
|
||||
echo "Gitea API is up."
|
||||
|
||||
# The `gitea admin` command needs to be run as the gitea user.
|
||||
# The -raw flag gives us the token directly.
|
||||
echo "Generating or retrieving admin token..."
|
||||
TOKEN_FILE="/var/lib/gitea/admin.token"
|
||||
|
||||
if [ -f "$TOKEN_FILE" ]; then
|
||||
TOKEN=$(cat "$TOKEN_FILE" | tr -d '\n\r ')
|
||||
echo "Admin token loaded from $TOKEN_FILE."
|
||||
else
|
||||
TOKEN=$(su -s /bin/bash gitea -c "gitea admin user generate-access-token -raw -u admin -t $TOKEN_NAME")
|
||||
if [ -n "$TOKEN" ]; then
|
||||
printf "%s" "$TOKEN" > "$TOKEN_FILE"
|
||||
chmod 666 "$TOKEN_FILE"
|
||||
chown gitea:gitea "$TOKEN_FILE"
|
||||
echo "Admin token generated and saved to $TOKEN_FILE."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$TOKEN" ]; then
|
||||
echo "Failed to generate or retrieve admin token. This might be because the token already exists in Gitea but not in $TOKEN_FILE. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run the dummy data setup script
|
||||
/opt/setup/setup-dummy-data.sh "$GITEA_URL" "$TOKEN"
|
||||
|
||||
# Add SSH key via API
|
||||
PUB_KEY_FILE="/var/lib/gitea/ssh-keys/id_ed25519.pub"
|
||||
if [ -f "$PUB_KEY_FILE" ]; then
|
||||
echo "Checking for existing SSH key 'bot-key'..."
|
||||
KEYS_URL="$GITEA_URL/api/v1/admin/users/workflow-pr/keys"
|
||||
EXISTING_KEYS=$(curl -s -X GET -H "Authorization: token $TOKEN" "$KEYS_URL")
|
||||
|
||||
if ! echo "$EXISTING_KEYS" | grep -q "\"title\":\"bot-key\""; then
|
||||
echo "Registering SSH key 'bot-key' via API..."
|
||||
KEY_CONTENT=$(cat "$PUB_KEY_FILE")
|
||||
curl -s -X POST "$KEYS_URL" \
|
||||
-H "Authorization: token $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"key\": \"$KEY_CONTENT\",
|
||||
\"read_only\": false,
|
||||
\"title\": \"bot-key\"
|
||||
}"
|
||||
echo -e "\nSSH key registered."
|
||||
else
|
||||
echo "SSH key 'bot-key' already registered."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if the webhook already exists
|
||||
echo "Checking for existing system webhook..."
|
||||
DB_PATH="/var/lib/gitea/data/gitea.db"
|
||||
EXISTS=$(su -s /bin/bash gitea -c "sqlite3 '$DB_PATH' \"SELECT 1 FROM webhook WHERE url = '$WEBHOOK_URL' AND is_system_webhook = 1 LIMIT 1;\"")
|
||||
|
||||
if [ "$EXISTS" = "1" ]; then
|
||||
echo "System webhook for $WEBHOOK_URL already exists. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Creating Gitea system webhook for $WEBHOOK_URL via direct database INSERT..."
|
||||
# The events JSON requires escaped double quotes for the sqlite3 command.
|
||||
EVENTS_JSON='{\"push_only\":false,\"send_everything\":true,\"choose_events\":false,\"branch_filter\":\"*\",\"events\":{\"create\":false,\"delete\":false,\"fork\":false,\"issue_assign\":false,\"issue_comment\":false,\"issue_label\":false,\"issue_milestone\":false,\"issues\":false,\"package\":false,\"pull_request\":false,\"pull_request_assign\":false,\"pull_request_comment\":false,\"pull_request_label\":false,\"pull_request_milestone\":false,\"pull_request_review\":false,\"pull_request_review_request\":false,\"pull_request_sync\":false,\"push\":false,\"release\":false,\"repository\":false,\"status\":false,\"wiki\":false,\"workflow_job\":false,\"workflow_run\":false}}'
|
||||
NOW_UNIX=$(date +%s)
|
||||
|
||||
INSERT_CMD="INSERT INTO webhook (repo_id, owner_id, is_system_webhook, url, http_method, content_type, events, is_active, type, meta, created_unix, updated_unix) VALUES (0, 0, 1, '$WEBHOOK_URL', 'POST', 1, '$EVENTS_JSON', 1, 'gitea', '', $NOW_UNIX, $NOW_UNIX);"
|
||||
|
||||
su -s /bin/bash gitea -c "sqlite3 '$DB_PATH' \"$INSERT_CMD\""
|
||||
|
||||
echo "System webhook created successfully."
|
||||
|
||||
exit 0
|
||||
14
integration/mock-obs/Dockerfile
Normal file
14
integration/mock-obs/Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
||||
# Use a base Python image
|
||||
FROM registry.suse.com/bci/python:3.11
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the server script
|
||||
COPY server.py .
|
||||
|
||||
# Expose the port the server will run on
|
||||
EXPOSE 8080
|
||||
|
||||
# Command to run the server
|
||||
CMD ["python3", "-u", "server.py"]
|
||||
@@ -0,0 +1,18 @@
|
||||
<project name="openSUSE:Leap:16.0:PullRequest">
|
||||
<title>Leap 16.0 PullRequest area</title>
|
||||
<description>Base project to define the pull request builds</description>
|
||||
<person userid="autogits_obs_staging_bot" role="maintainer"/>
|
||||
<person userid="maxlin_factory" role="maintainer"/>
|
||||
<group groupid="maintenance-opensuse.org" role="maintainer"/>
|
||||
<debuginfo>
|
||||
<enable/>
|
||||
</debuginfo>
|
||||
<repository name="standard">
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<arch>x86_64</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
</project>
|
||||
@@ -0,0 +1,59 @@
|
||||
<project name="openSUSE:Leap:16.0">
|
||||
<title>openSUSE Leap 16.0 based on SLFO</title>
|
||||
<description>Leap 16.0 based on SLES 16.0 (specifically SLFO:1.2)</description>
|
||||
<link project="openSUSE:Backports:SLE-16.0"/>
|
||||
<scmsync>http://gitea-test:3000/products/SLFO#main</scmsync>
|
||||
<person userid="dimstar_suse" role="maintainer"/>
|
||||
<person userid="lkocman-factory" role="maintainer"/>
|
||||
<person userid="maxlin_factory" role="maintainer"/>
|
||||
<person userid="factory-auto" role="reviewer"/>
|
||||
<person userid="licensedigger" role="reviewer"/>
|
||||
<group groupid="autobuild-team" role="maintainer"/>
|
||||
<group groupid="factory-maintainers" role="maintainer"/>
|
||||
<group groupid="maintenance-opensuse.org" role="maintainer"/>
|
||||
<group groupid="factory-staging" role="reviewer"/>
|
||||
<build>
|
||||
<disable repository="ports"/>
|
||||
</build>
|
||||
<debuginfo>
|
||||
<enable/>
|
||||
</debuginfo>
|
||||
<repository name="standard" rebuild="local">
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>local</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
<repository name="product">
|
||||
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="product" trigger="manual"/>
|
||||
<path project="openSUSE:Leap:16.0:NonFree" repository="standard"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="images"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>local</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
<repository name="ports">
|
||||
<arch>armv7l</arch>
|
||||
</repository>
|
||||
<repository name="images">
|
||||
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="images" trigger="manual"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
</project>
|
||||
140
integration/mock-obs/server.py
Normal file
140
integration/mock-obs/server.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import http.server
|
||||
import socketserver
|
||||
import os
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import fnmatch
|
||||
|
||||
PORT = 8080
|
||||
RESPONSE_DIR = "/app/responses"
|
||||
STATE_DIR = "/tmp/mock_obs_state"
|
||||
|
||||
class MockOBSHandler(http.server.SimpleHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
logging.info(f"GET request for: {self.path}")
|
||||
path_without_query = self.path.split('?')[0]
|
||||
|
||||
# Check for state stored by a PUT request first
|
||||
sanitized_put_path = 'PUT' + path_without_query.replace('/', '_')
|
||||
state_file_path = os.path.join(STATE_DIR, sanitized_put_path)
|
||||
if os.path.exists(state_file_path):
|
||||
logging.info(f"Found stored PUT state for {self.path} at {state_file_path}")
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/xml")
|
||||
file_size = os.path.getsize(state_file_path)
|
||||
self.send_header("Content-Length", str(file_size))
|
||||
self.end_headers()
|
||||
with open(state_file_path, 'rb') as f:
|
||||
self.wfile.write(f.read())
|
||||
return
|
||||
|
||||
# If no PUT state file, fall back to the glob/exact match logic
|
||||
self.handle_request('GET')
|
||||
|
||||
def do_PUT(self):
|
||||
logging.info(f"PUT request for: {self.path}")
|
||||
logging.info(f"Headers: {self.headers}")
|
||||
path_without_query = self.path.split('?')[0]
|
||||
|
||||
body = b''
|
||||
if self.headers.get('Transfer-Encoding', '').lower() == 'chunked':
|
||||
logging.info("Chunked transfer encoding detected")
|
||||
while True:
|
||||
line = self.rfile.readline().strip()
|
||||
if not line:
|
||||
break
|
||||
chunk_length = int(line, 16)
|
||||
if chunk_length == 0:
|
||||
self.rfile.readline()
|
||||
break
|
||||
body += self.rfile.read(chunk_length)
|
||||
self.rfile.read(2) # Read the trailing CRLF
|
||||
else:
|
||||
content_length = int(self.headers.get('Content-Length', 0))
|
||||
body = self.rfile.read(content_length)
|
||||
|
||||
logging.info(f"Body: {body.decode('utf-8')}")
|
||||
sanitized_path = 'PUT' + path_without_query.replace('/', '_')
|
||||
state_file_path = os.path.join(STATE_DIR, sanitized_path)
|
||||
|
||||
logging.info(f"Saving state for {self.path} to {state_file_path}")
|
||||
os.makedirs(os.path.dirname(state_file_path), exist_ok=True)
|
||||
with open(state_file_path, 'wb') as f:
|
||||
f.write(body)
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/plain")
|
||||
response_body = b"OK"
|
||||
self.send_header("Content-Length", str(len(response_body)))
|
||||
self.end_headers()
|
||||
self.wfile.write(response_body)
|
||||
|
||||
def do_POST(self):
|
||||
logging.info(f"POST request for: {self.path}")
|
||||
self.handle_request('POST')
|
||||
|
||||
def do_DELETE(self):
|
||||
logging.info(f"DELETE request for: {self.path}")
|
||||
self.handle_request('DELETE')
|
||||
|
||||
def handle_request(self, method):
|
||||
path_without_query = self.path.split('?')[0]
|
||||
sanitized_request_path = method + path_without_query.replace('/', '_')
|
||||
logging.info(f"Handling request, looking for match for: {sanitized_request_path}")
|
||||
|
||||
response_file = None
|
||||
# Check for glob match first
|
||||
if os.path.exists(RESPONSE_DIR):
|
||||
for filename in os.listdir(RESPONSE_DIR):
|
||||
if fnmatch.fnmatch(sanitized_request_path, filename):
|
||||
response_file = os.path.join(RESPONSE_DIR, filename)
|
||||
logging.info(f"Found matching response file (glob): {response_file}")
|
||||
break
|
||||
|
||||
# Fallback to exact match if no glob match
|
||||
if response_file is None:
|
||||
exact_file = os.path.join(RESPONSE_DIR, sanitized_request_path)
|
||||
if os.path.exists(exact_file):
|
||||
response_file = exact_file
|
||||
logging.info(f"Found matching response file (exact): {response_file}")
|
||||
|
||||
if response_file:
|
||||
logging.info(f"Serving content from {response_file}")
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/xml")
|
||||
file_size = os.path.getsize(response_file)
|
||||
self.send_header("Content-Length", str(file_size))
|
||||
self.end_headers()
|
||||
with open(response_file, 'rb') as f:
|
||||
self.wfile.write(f.read())
|
||||
else:
|
||||
logging.info(f"Response file not found for {sanitized_request_path}. Sending 404.")
|
||||
self.send_response(404)
|
||||
self.send_header("Content-type", "text/plain")
|
||||
body = f"Mock response not found for {sanitized_request_path}".encode('utf-8')
|
||||
self.send_header("Content-Length", str(len(body)))
|
||||
self.end_headers()
|
||||
self.wfile.write(body)
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
|
||||
|
||||
if not os.path.exists(STATE_DIR):
|
||||
logging.info(f"Creating state directory: {STATE_DIR}")
|
||||
os.makedirs(STATE_DIR)
|
||||
if not os.path.exists(RESPONSE_DIR):
|
||||
os.makedirs(RESPONSE_DIR)
|
||||
|
||||
with socketserver.TCPServer(("", PORT), MockOBSHandler) as httpd:
|
||||
logging.info(f"Serving mock OBS API on port {PORT}")
|
||||
|
||||
def graceful_shutdown(sig, frame):
|
||||
logging.info("Received SIGTERM, shutting down gracefully...")
|
||||
threading.Thread(target=httpd.shutdown).start()
|
||||
|
||||
signal.signal(signal.SIGTERM, graceful_shutdown)
|
||||
|
||||
httpd.serve_forever()
|
||||
logging.info("Server has shut down.")
|
||||
1
integration/obs-staging-bot/Dockerfile
Symbolic link
1
integration/obs-staging-bot/Dockerfile
Symbolic link
@@ -0,0 +1 @@
|
||||
./Dockerfile.package
|
||||
18
integration/obs-staging-bot/Dockerfile.local
Normal file
18
integration/obs-staging-bot/Dockerfile.local
Normal file
@@ -0,0 +1,18 @@
|
||||
# Use a base Python image
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Install any necessary dependencies for the bot
|
||||
# e.g., git, curl, etc.
|
||||
RUN zypper -n in git-core curl binutils
|
||||
|
||||
# Copy the bot binary and its entrypoint script
|
||||
COPY obs-staging-bot/obs-staging-bot /usr/local/bin/obs-staging-bot
|
||||
COPY integration/obs-staging-bot/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Create a non-root user to run the bot
|
||||
RUN useradd -m -u 1001 bot
|
||||
USER 1001
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
19
integration/obs-staging-bot/Dockerfile.package
Normal file
19
integration/obs-staging-bot/Dockerfile.package
Normal file
@@ -0,0 +1,19 @@
|
||||
# Use a base Python image
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
RUN zypper ar -f http://download.opensuse.org/repositories/devel:/Factory:/git-workflow/15.7/devel:Factory:git-workflow.repo
|
||||
RUN zypper --gpg-auto-import-keys ref
|
||||
|
||||
# Install any necessary dependencies for the bot
|
||||
# e.g., git, curl, etc.
|
||||
RUN zypper -n in git-core curl autogits-obs-staging-bot binutils
|
||||
|
||||
COPY integration/obs-staging-bot/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Create a non-root user to run the bot
|
||||
RUN useradd -m -u 1001 bot
|
||||
USER 1001
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
28
integration/obs-staging-bot/entrypoint.sh
Normal file
28
integration/obs-staging-bot/entrypoint.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# This script waits for the Gitea admin token to be created,
|
||||
# exports it as an environment variable, and then executes the main container command.
|
||||
|
||||
TOKEN_FILE="/gitea-data/autogits_obs_staging_bot.token"
|
||||
|
||||
echo "OBS Staging Bot: Waiting for Gitea autogits_obs_staging_bot token at $TOKEN_FILE..."
|
||||
while [ ! -s "$TOKEN_FILE" ]; do
|
||||
sleep 2
|
||||
done
|
||||
|
||||
export GITEA_TOKEN=$(cat "$TOKEN_FILE" | tr -d '\n\r ')
|
||||
echo "OBS Staging Bot: GITEA_TOKEN exported."
|
||||
|
||||
# Execute the bot as the current user (root), using 'env' to pass required variables.
|
||||
echo "OBS Staging Bot: Executing bot..."
|
||||
|
||||
exe=$(which obs-staging-bot)
|
||||
exe=${exe:-/usr/local/bin/obs-staging-bot}
|
||||
|
||||
package=$(rpm -qa | grep autogits-obs-staging-bot) || :
|
||||
|
||||
echo "!!!!!!!!!!!!!!!! using binary $exe; installed package: $package"
|
||||
which strings > /dev/null 2>&1 && strings "$exe" | grep -A 2 vcs.revision= | head -4 || :
|
||||
|
||||
exec $exe "$@"
|
||||
136
integration/podman-compose.yml
Normal file
136
integration/podman-compose.yml
Normal file
@@ -0,0 +1,136 @@
|
||||
version: "3.8"
|
||||
|
||||
networks:
|
||||
gitea-network:
|
||||
driver: bridge
|
||||
|
||||
services:
|
||||
gitea:
|
||||
build: ./gitea
|
||||
container_name: gitea-test
|
||||
environment:
|
||||
- GITEA_WORK_DIR=/var/lib/gitea
|
||||
networks:
|
||||
- gitea-network
|
||||
ports:
|
||||
# Map the HTTP and SSH ports defined in your app.ini
|
||||
- "3000:3000"
|
||||
- "3022:3022"
|
||||
volumes:
|
||||
# Persist Gitea's data (repositories, sqlite db, etc.) to a local directory
|
||||
# The :z flag allows sharing between containers
|
||||
- ./gitea-data:/var/lib/gitea:z
|
||||
# Persist Gitea's logs to a local directory
|
||||
- ./gitea-logs:/var/log/gitea:Z
|
||||
restart: unless-stopped
|
||||
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.13.7-management
|
||||
container_name: rabbitmq-test
|
||||
healthcheck:
|
||||
test: ["CMD", "rabbitmq-diagnostics", "check_running", "-q"]
|
||||
interval: 30s
|
||||
timeout: 30s
|
||||
retries: 3
|
||||
networks:
|
||||
- gitea-network
|
||||
ports:
|
||||
# AMQP protocol port with TLS
|
||||
- "5671:5671"
|
||||
# HTTP management UI
|
||||
- "15672:15672"
|
||||
volumes:
|
||||
# Persist RabbitMQ data
|
||||
- ./rabbitmq-data:/var/lib/rabbitmq:Z
|
||||
# Mount TLS certs
|
||||
- ./rabbitmq-config/certs:/etc/rabbitmq/certs:Z
|
||||
# Mount rabbitmq config
|
||||
- ./rabbitmq-config/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:Z
|
||||
# Mount exchange definitions
|
||||
- ./rabbitmq-config/definitions.json:/etc/rabbitmq/definitions.json:Z
|
||||
restart: unless-stopped
|
||||
|
||||
gitea-publisher:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: integration/gitea-events-rabbitmq-publisher/Dockerfile${GIWTF_IMAGE_SUFFIX}
|
||||
container_name: gitea-publisher
|
||||
networks:
|
||||
- gitea-network
|
||||
depends_on:
|
||||
gitea:
|
||||
condition: service_started
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- RABBITMQ_HOST=rabbitmq-test
|
||||
- RABBITMQ_USERNAME=gitea
|
||||
- RABBITMQ_PASSWORD=gitea
|
||||
- SSL_CERT_FILE=/usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
command: [ "-listen", "0.0.0.0:8002", "-topic-domain", "suse", "-debug" ]
|
||||
restart: unless-stopped
|
||||
|
||||
workflow-pr:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: integration/workflow-pr/Dockerfile${GIWTF_IMAGE_SUFFIX}
|
||||
container_name: workflow-pr
|
||||
networks:
|
||||
- gitea-network
|
||||
depends_on:
|
||||
gitea:
|
||||
condition: service_started
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- AMQP_USERNAME=gitea
|
||||
- AMQP_PASSWORD=gitea
|
||||
- SSL_CERT_FILE=/usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
volumes:
|
||||
- ./gitea-data:/var/lib/gitea:ro,z
|
||||
- ./workflow-pr/workflow-pr.json:/etc/workflow-pr.json:ro,z
|
||||
- ./workflow-pr-repos:/var/lib/workflow-pr/repos:Z
|
||||
command: [
|
||||
"-check-on-start",
|
||||
"-debug",
|
||||
"-gitea-url", "http://gitea-test:3000",
|
||||
"-url", "amqps://rabbitmq-test:5671",
|
||||
"-config", "/etc/workflow-pr.json",
|
||||
"-repo-path", "/var/lib/workflow-pr/repos"
|
||||
]
|
||||
restart: unless-stopped
|
||||
|
||||
mock-obs:
|
||||
build: ./mock-obs
|
||||
container_name: mock-obs
|
||||
networks:
|
||||
- gitea-network
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./mock-obs/responses:/app/responses:z # Use :z for shared SELinux label
|
||||
restart: unless-stopped
|
||||
|
||||
obs-staging-bot:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: integration/obs-staging-bot/Dockerfile${GIWTF_IMAGE_SUFFIX}
|
||||
container_name: obs-staging-bot
|
||||
networks:
|
||||
- gitea-network
|
||||
depends_on:
|
||||
gitea:
|
||||
condition: service_started
|
||||
mock-obs:
|
||||
condition: service_started
|
||||
environment:
|
||||
- OBS_USER=mock
|
||||
- OBS_PASSWORD=mock-long-password
|
||||
volumes:
|
||||
- ./gitea-data:/gitea-data:ro,z
|
||||
command:
|
||||
- "-debug"
|
||||
- "-gitea-url=http://gitea-test:3000"
|
||||
- "-obs=http://mock-obs:8080"
|
||||
- "-obs-web=http://mock-obs:8080"
|
||||
restart: unless-stopped
|
||||
30
integration/rabbitmq-config/certs/cert.pem
Normal file
30
integration/rabbitmq-config/certs/cert.pem
Normal file
@@ -0,0 +1,30 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFKzCCAxOgAwIBAgIUJsg/r0ZyIVxtAkrlZKOr4LvYEvMwDQYJKoZIhvcNAQEL
|
||||
BQAwGDEWMBQGA1UEAwwNcmFiYml0bXEtdGVzdDAeFw0yNjAxMjQxMjQyMjNaFw0z
|
||||
NjAxMjIxMjQyMjNaMBgxFjAUBgNVBAMMDXJhYmJpdG1xLXRlc3QwggIiMA0GCSqG
|
||||
SIb3DQEBAQUAA4ICDwAwggIKAoICAQC9OjTq4DgqVo0mRpS8DGRR6SFrSpb2bqnl
|
||||
YI7xSI3y67i/oP4weiZSawk2+euxhsN4FfOlsAgvpg4WyRQH5PwnXOA1Lxz51qp1
|
||||
t0VumE3B1RDheiBTE8loG1FvmikOiek2gzz76nK0R1sbKY1+/NVJpMs6dL6NzJXG
|
||||
N6aCpWTk7oeY+lW5bPBG0VRA7RUG80w9R9RDtqYc0SYUmm43tjjxPZ81rhCXFx/F
|
||||
v1kxnNTQJdATNrTn9SofymSfm42f4loOGyGBsqJYybKXOPDxrM1erBN5eCwTpJMS
|
||||
4J30aMSdQTzza2Z4wi2LR0vq/FU/ouqzlRp7+7tNJbVAsqhiUa2eeAVkFwZl9wRw
|
||||
lddY0W85U507nw5M3iQv2GTOhJRXwhWpzDUFQ0fT56hAY/V+VbF1iHGAVIz4XlUj
|
||||
gC21wuXz0xRdqP8cCd8UHLSbp8dmie161GeKVwO037aP+1hZJbm7ePsS5Na+qYG1
|
||||
LCy0GhfQn71BsYUaGJtfRcaMwIbqaNIYn+Y6S1FVjxDPXCxFXDrIcFvldmJYTyeK
|
||||
7KrkO2P1RbEiwYyPPUhthbb1Agi9ZutZsnadmPRk27t9bBjNnWaY2z17hijnzVVz
|
||||
jOHuPlpb7cSaagVzLTT0zrZ+ifnZWwdl0S2ZrjBAeVrkNt7DOCUqwBnuBqYiRZFt
|
||||
A1QicHxaEQIDAQABo20wazAdBgNVHQ4EFgQU3l25Ghab2k7UhwxftZ2vZ1HO9Sow
|
||||
HwYDVR0jBBgwFoAU3l25Ghab2k7UhwxftZ2vZ1HO9SowDwYDVR0TAQH/BAUwAwEB
|
||||
/zAYBgNVHREEETAPgg1yYWJiaXRtcS10ZXN0MA0GCSqGSIb3DQEBCwUAA4ICAQB9
|
||||
ilcsRqIvnyN25Oh668YC/xxyeNTIaIxjMLyJaMylBRjNwo1WfbdpXToaEXgot5gK
|
||||
5HGlu3OIBBwBryNAlBtf/usxzLzmkEsm1Dsn9sJNY1ZTkD8MO9yyOtLqBlqAsIse
|
||||
oPVjzSdjk1fP3uyoG/ZUVAFZHZD3/9BEsftfS13oUVxo7vYz1DSyUATT/4QTYMQB
|
||||
PytL6EKJ0dLyuy7rIkZVkaUi+P7GuDXj25Mi6Zkxaw2QnssSuoqy1bAMkzEyNFK5
|
||||
0wlNWEY8H3jRZuAz1T4AXb9sjeCgBKZoWXgmGbzleOophdzvlq66UGAWPWYFGp8Q
|
||||
4GJognovhKzSY9+3n+rMPLAXSao48SYDlyTOZeBo1DTluR5QjVd+NWbEdIsA6buQ
|
||||
a6uPTSVKsulm7hyUlEZp+SsYAtVoZx3jzKKjZXjnaxOfUFWx6pTxNXvxR7pQ/8Ls
|
||||
IfduGy4VjKVQdyuwCE7eVEPDK6d53WWs6itziuj7gfq8mHvZivIA65z05lTwqkvb
|
||||
1WS2aht+zacqVSYyNrK+/kJA2CST3ggc1EO73lRvbfO9LJZWMdO+f/tkXH4zkfmL
|
||||
A3JtJcLOWuv+ZrZvHMpKlBFNMySxE3IeGX+Ad9bGyhZvZULut95/QD7Xy4cPRZHF
|
||||
R3SRn0rn/BeTly+5fkEoFk+ttah8IbwzhduPyPIxng==
|
||||
-----END CERTIFICATE-----
|
||||
52
integration/rabbitmq-config/certs/key.pem
Normal file
52
integration/rabbitmq-config/certs/key.pem
Normal file
@@ -0,0 +1,52 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC9OjTq4DgqVo0m
|
||||
RpS8DGRR6SFrSpb2bqnlYI7xSI3y67i/oP4weiZSawk2+euxhsN4FfOlsAgvpg4W
|
||||
yRQH5PwnXOA1Lxz51qp1t0VumE3B1RDheiBTE8loG1FvmikOiek2gzz76nK0R1sb
|
||||
KY1+/NVJpMs6dL6NzJXGN6aCpWTk7oeY+lW5bPBG0VRA7RUG80w9R9RDtqYc0SYU
|
||||
mm43tjjxPZ81rhCXFx/Fv1kxnNTQJdATNrTn9SofymSfm42f4loOGyGBsqJYybKX
|
||||
OPDxrM1erBN5eCwTpJMS4J30aMSdQTzza2Z4wi2LR0vq/FU/ouqzlRp7+7tNJbVA
|
||||
sqhiUa2eeAVkFwZl9wRwlddY0W85U507nw5M3iQv2GTOhJRXwhWpzDUFQ0fT56hA
|
||||
Y/V+VbF1iHGAVIz4XlUjgC21wuXz0xRdqP8cCd8UHLSbp8dmie161GeKVwO037aP
|
||||
+1hZJbm7ePsS5Na+qYG1LCy0GhfQn71BsYUaGJtfRcaMwIbqaNIYn+Y6S1FVjxDP
|
||||
XCxFXDrIcFvldmJYTyeK7KrkO2P1RbEiwYyPPUhthbb1Agi9ZutZsnadmPRk27t9
|
||||
bBjNnWaY2z17hijnzVVzjOHuPlpb7cSaagVzLTT0zrZ+ifnZWwdl0S2ZrjBAeVrk
|
||||
Nt7DOCUqwBnuBqYiRZFtA1QicHxaEQIDAQABAoICAA+AWvDpzNgVDouV6R3NkxNN
|
||||
upXgPqUx9BuNETCtbal6i4AxR1l/zC9gwti82QTKQi2OeM74MHd8zjcqIkiyRsDP
|
||||
wDNDKIfEAONTT+4LLoWEN5WNDGRZ4Nw1LrLqiVX+ULtNPXvynRJtLQa43PVL74oQ
|
||||
pLBle23A1n0uNmcJ9w21B6ktysN9q+JVSCZodZpD6Jk1jus8JXgDXy/9Za2NMTV8
|
||||
A5ShbYz/ETSBJCSnERz7GARW7TN6V0jS6vLTSqMQJyn0KYbHNDr7TPTL7psRuaI5
|
||||
jP/cqxmx1/WKLo5k3cR3IW/cesDGQXZhMRQvNymXJkxvWMPS36lmfyZtbFNflw4Z
|
||||
9OD+2RKt5jFDJjG8fYiYoYBdLiTj2Wdvo4mbRPNkTL75o65riDkDCQuZhDXFBm3s
|
||||
B1aDv5y1AXrzNZ5JSikszKgbLNPYB0rI3unp6i0P1985w6dyel0MGG+ouaeiyrxS
|
||||
9IgJDnE4BJ79mEzHTXtbZ/+3aGAK/Y6mU8Pz2s6/+6ccT0miievsMS+si1KESF31
|
||||
WLnsMdcrJcxqcm7Ypo24G0yBJluSDKtD1cqQUGN1MKp+EEv1SCH+4csaa3ooRB0o
|
||||
YveySjqxtmhVpQuY3egCOaXhPmX7lgYwoe+G4UIkUMwPn20WMg+jFxgPASdh4lqE
|
||||
mzpePP7STvEZAr+rrLu1AoIBAQDmCEiKOsUTtJlX3awOIRtCkIqBxS1E6rpyjfxK
|
||||
A6+zpXnE++8MhIJ07+9bPdOshGjS3JbJ+hu+IocbNg++rjRArYQnJh8/qBZ2GB2v
|
||||
Ryfptsoxtk/xUsmOfchvk4tOjvDHZrJehUtGc+LzX/WUqpgtEk1Gnx7RGRuDNnqS
|
||||
Q1+yU4NubHwOHPswBBXOnVtopcAHFpKhbKRFOHOwMZN99qcWVIkv4J9c6emcPMLI
|
||||
I/QPIvwB6WmbLa0o3JNXlD4kPdqCgNW36KEFiW8m+4tgzF3HWYSAyIeBRFG7ouE6
|
||||
yk5hiptPKhZlTmTAkQSssCXksiTw1rsspFULZSRyaaaPunvVAoIBAQDSlrKu+B2h
|
||||
AJtxWy5MQDOiroqT3KDneIGXPYgH3/tiDmxy0CIEbSb5SqZ6zAmihs3dWWCmc1JH
|
||||
YObRrqIxu+qVi4K+Uz8l7WBrS7DkjZjajq+y/mrZYUNRoL2q9mnNqRNan7zxWDJc
|
||||
U4u2NH9P4LOz6ttE4OG9SC3/gZLoepA+ANZatu93749IT7z8ske0MVPP76jVI1Gl
|
||||
D7cPIlzcBUdJgNV8UOkxeqU3+S6Jn17Tkx5qMWND/2BCN4voQ4pfGWSkbaHlMLh1
|
||||
2SbVuR+HYPY3aPJeSY7MEPoc7d2SSVOcVDr2AQwSDSCCgIFZOZlawehUz9R51hK8
|
||||
LlaccFWXhS9NAoIBAEFZNRJf48DXW4DErq5M5WuhmFeJZnTfohwNDhEQvwdwCQnW
|
||||
8HBD7LO/veXTyKCH9SeCFyxF6z+2m181mn93Cc0d/h8JC3OQEuF1tGko88PHc+Vv
|
||||
f4J1HGFohlp8NeUZYnmjSSTlBR98qIqvRhr348daHa3kYmLQmSpLfcKzdSo542qp
|
||||
UwzHWuynHHLX7THrdIQO+5T0Qi6P/P2e9+GfApSra1W4oE1K/lyuPj+RRzJNo/3/
|
||||
C0tUTI8BKrKEoKq3D65nX0+hvKzQAE24xD25kSKi4aucTDKC8B04BngnJOE8+SYi
|
||||
NL6O6Lxz9joAyKMRoMDyn7Xs8WQNVa9TKEhImAkCggEBAMljmIm/egZIoF7thf8h
|
||||
vr+rD5eL/Myf776E95wgVTVW+dtqs71r7UOmYkM48VXeeO1f1hAYZO0h/Fs2GKJb
|
||||
RWGyQ1xkHBXXRsgVYJuR1kXdAqW4rNIqM8jSYdAnStOFB5849+YOJEsrEocy+TWY
|
||||
fAJpbTwXm4n6hxK8BZQR8fN5tYSXQbd+/5V1vBQlInFuYuqOFPWPizrBJp1wjUFU
|
||||
QvJGJON4NSo+UdaPlDPEl1jabtG7XWTfylxI5qE+RgvgKuEcfyDBUQZSntLw8Pf0
|
||||
gEJJOM92pPr+mVIlICoPucfcvW4ZXkO9DgP/hLOhY8jpe5fwERBa6xvPbMC6pP/8
|
||||
PFkCggEBAOLtvboBThe57QRphsKHmCtRJHmT4oZzhMYsE+5GMGYzPNWod1hSyfXn
|
||||
EB8iTmAFP5r7FdC10B8mMpACXuDdi2jbmlYOTU6xNTprSKtv8r8CvorWJdsQwRsy
|
||||
pZ7diSCeyi0z/sIx//ov0b3WD0E8BG/HWsFbX0p5xXpaljYEv5dK7xUiWgBW+15a
|
||||
N1AeVcPiXRDwhQMVcvVOvzgwKsw+Rpls/9W4hihcBHaiMcBUDFWxJtnf4ZAGAZS3
|
||||
/694MOYlmfgT/cDqF9oOsCdxM0w24kL0dcUM7zPk314ixAAfUwXaxisBhS2roJ88
|
||||
HsuK9JPSK/AS0IqUtKiq4LZ9ErixYF0=
|
||||
-----END PRIVATE KEY-----
|
||||
35
integration/rabbitmq-config/definitions.json
Executable file
35
integration/rabbitmq-config/definitions.json
Executable file
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"users": [
|
||||
{
|
||||
"name": "gitea",
|
||||
"password_hash": "5IdZmMJhNb4otX/nz9Xtmkpj9khl6+5eAmXNs/oHYwQNO3jg",
|
||||
"hashing_algorithm": "rabbit_password_hashing_sha256",
|
||||
"tags": "administrator"
|
||||
}
|
||||
],
|
||||
"vhosts": [
|
||||
{
|
||||
"name": "/"
|
||||
}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"user": "gitea",
|
||||
"vhost": "/",
|
||||
"configure": ".*",
|
||||
"write": ".*",
|
||||
"read": ".*"
|
||||
}
|
||||
],
|
||||
"exchanges": [
|
||||
{
|
||||
"name": "pubsub",
|
||||
"vhost": "/",
|
||||
"type": "topic",
|
||||
"durable": true,
|
||||
"auto_delete": false,
|
||||
"internal": false,
|
||||
"arguments": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
83
integration/test-plan.md
Normal file
83
integration/test-plan.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Test Plan: workflow-pr Bot
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
This document outlines the test plan for the `workflow-pr` bot. The bot is responsible for synchronizing pull requests between ProjectGit and PackageGit repositories, managing reviews, and handling merges. This test plan aims to ensure the bot's functionality, reliability, and performance.
|
||||
|
||||
## 2. Scope
|
||||
|
||||
### In Scope
|
||||
|
||||
* Pull Request synchronization (creation, update, closing).
|
||||
* Reviewer management (adding, re-adding, mandatory vs. advisory).
|
||||
* Merge management, including `ManualMergeOnly` and `ManualMergeProject` flags.
|
||||
* Configuration parsing (`workflow.config`).
|
||||
* Label management (`staging/Auto`, `review/Pending`, `review/Done`).
|
||||
* Maintainership and permissions handling.
|
||||
|
||||
### Out of Scope
|
||||
|
||||
* Package deletion requests (planned feature).
|
||||
* Underlying infrastructure (Gitea, RabbitMQ, OBS).
|
||||
* Performance and load testing.
|
||||
* Closing a PackageGit PR (currently disabled).
|
||||
|
||||
## 3. Test Objectives
|
||||
|
||||
* Verify that pull requests are correctly synchronized between ProjectGit and PackageGit.
|
||||
* Ensure that reviewers are correctly added to pull requests based on the configuration.
|
||||
* Validate that pull requests are merged only when all conditions are met.
|
||||
* Confirm that the bot correctly handles various configurations in `workflow.config`.
|
||||
* Verify that labels are correctly applied to pull requests.
|
||||
* Ensure that maintainership and permissions are correctly enforced.
|
||||
|
||||
## 4. Test Strategy
|
||||
|
||||
The testing will be conducted in a dedicated test environment that mimics the production environment. The strategy will involve a combination of:
|
||||
|
||||
* **Component Testing:** Testing individual components of the bot in isolation using unit tests written in Go.
|
||||
* **Integration Testing:** Testing the bot's interaction with Gitea, RabbitMQ, and a mock OBS server using `pytest`.
|
||||
* **End-to-End Testing:** Testing the complete workflow from creating a pull request to merging it using `pytest`.
|
||||
|
||||
### Test Automation
|
||||
|
||||
* **Unit Tests:** Go's built-in testing framework will be used to write unit tests for individual functions and methods.
|
||||
* **Integration and End-to-End Tests:** `pytest` will be used to write integration and end-to-end tests that use the Gitea API to create pull requests and verify the bot's behavior.
|
||||
|
||||
### Success Metrics
|
||||
|
||||
* **Test Coverage:** The goal is to achieve at least 80% test coverage for the bot's codebase.
|
||||
* **Bug Detection Rate:** The number of bugs found during the testing phase.
|
||||
* **Test Pass Rate:** The percentage of test cases that pass without any issues.
|
||||
|
||||
|
||||
## 5. Test Cases
|
||||
|
||||
| Test Case ID | Description | Steps to Reproduce | Expected Results | Priority |
|
||||
| :--- | :--- | :--- | :--- | :--- |
|
||||
| **TC-SYNC-001** | **Create ProjectGit PR from PackageGit PR** | 1. Create a new PR in a PackageGit repository. | 1. A new PR is created in the corresponding ProjectGit repository with the title "Forwarded PRs: <package_name>".<br>2. The ProjectGit PR description contains a link to the PackageGit PR (e.g., `PR: org/package_repo!pr_number`).<br>3. The package submodule in the ProjectGit PR points to the PackageGit PR's commit. | High |
|
||||
| **TC-SYNC-002** | **Update ProjectGit PR from PackageGit PR** | 1. Push a new commit to an existing PackageGit PR. | 1. The corresponding ProjectGit PR's head branch is updated with the new commit. | High |
|
||||
| **TC-SYNC-003** | **WIP Flag Synchronization** | 1. Mark a PackageGit PR as "Work In Progress".<br>2. Remove the WIP flag from the PackageGit PR. | 1. The corresponding ProjectGit PR is also marked as "Work In Progress".<br>2. The WIP flag on the ProjectGit PR is removed. | Medium |
|
||||
| **TC-SYNC-004** | **WIP Flag (multiple referenced package PRs)** | 1. Create a ProjectGit PR that references multiple PackageGit PRs.<br>2. Mark one of the PackageGit PRs as "Work In Progress".<br>3. Remove the "Work In Progress" flag from all PackageGit PRs. | 1. The ProjectGit PR is marked as "Work In Progress".<br>2. The "Work In Progress" flag is removed from the ProjectGit PR only after it has been removed from all associated PackageGit PRs. | Medium |
|
||||
| **TC-SYNC-005** | **NoProjectGitPR = true, edits disabled** | 1. Set `NoProjectGitPR = true` in `workflow.config`.<br>2. Create a PackageGit PR without "Allow edits from maintainers" enabled. <br>3. Push a new commit to the PackageGit PR. | 1. No ProjectGit PR is created.<br>2. The bot adds a warning comment to the PackageGit PR explaining that it cannot update the PR. | High |
|
||||
| **TC-SYNC-006** | **NoProjectGitPR = true, edits enabled** | 1. Set `NoProjectGitPR = true` in `workflow.config`.<br>2. Create a PackageGit PR with "Allow edits from maintainers" enabled.<br>3. Push a new commit to the PackageGit PR. | 1. No ProjectGit PR is created.<br>2. The submodule commit on the project PR is updated with the new commit from the PackageGit PR. | High |
|
||||
| **TC-COMMENT-001** | **Detect duplicate comments** | 1. Create a PackageGit PR.<br>2. Wait for the `workflow-pr` bot to act on the PR.<br>3. Edit the body of the PR to trigger the bot a second time. | 1. The bot should not post a duplicate comment. | High |
|
||||
| **TC-REVIEW-001** | **Add mandatory reviewers** | 1. Create a new PackageGit PR. | 1. All mandatory reviewers are added to both the PackageGit and ProjectGit PRs. | High |
|
||||
| **TC-REVIEW-002** | **Add advisory reviewers** | 1. Create a new PackageGit PR with advisory reviewers defined in the configuration. | 1. Advisory reviewers are added to the PR, but their approval is not required for merging. | Medium |
|
||||
| **TC-REVIEW-003** | **Re-add reviewers** | 1. Push a new commit to a PackageGit PR after it has been approved. | 1. The original reviewers are re-added to the PR. | Medium |
|
||||
| **TC-REVIEW-004** | **Package PR created by a maintainer** | 1. Create a PackageGit PR from the account of a package maintainer. | 1. No review is requested from other package maintainers. | High |
|
||||
| **TC-REVIEW-005** | **Package PR created by an external user (approve)** | 1. Create a PackageGit PR from the account of a user who is not a package maintainer.<br>2. One of the package maintainers approves the PR. | 1. All package maintainers are added as reviewers.<br>2. Once one maintainer approves the PR, the other maintainers are removed as reviewers. | High |
|
||||
| **TC-REVIEW-006** | **Package PR created by an external user (reject)** | 1. Create a PackageGit PR from the account of a user who is not a package maintainer.<br>2. One of the package maintainers rejects the PR. | 1. All package maintainers are added as reviewers.<br>2. Once one maintainer rejects the PR, the other maintainers are removed as reviewers. | High |
|
||||
| **TC-REVIEW-007** | **Package PR created by a maintainer with ReviewRequired=true** | 1. Set `ReviewRequired = true` in `workflow.config`.<br>2. Create a PackageGit PR from the account of a package maintainer. | 1. A review is requested from other package maintainers if available. | High |
|
||||
| **TC-MERGE-001** | **Automatic Merge** | 1. Create a PackageGit PR.<br>2. Ensure all mandatory reviews are completed on both project and package PRs. | 1. The PR is automatically merged. | High |
|
||||
| **TC-MERGE-002** | **ManualMergeOnly with Package Maintainer** | 1. Create a PackageGit PR with `ManualMergeOnly` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the package PR from the account of a package maintainer for that package. | 1. The PR is merged. | High |
|
||||
| **TC-MERGE-003** | **ManualMergeOnly with unauthorized user** | 1. Create a PackageGit PR with `ManualMergeOnly` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the package PR from the account of a user who is not a maintainer for that package. | 1. The PR is not merged. | High |
|
||||
| **TC-MERGE-004** | **ManualMergeOnly with multiple packages** | 1. Create a ProjectGit PR that references multiple PackageGit PRs with `ManualMergeOnly` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on each package PR from the account of a package maintainer. | 1. The PR is merged only after "merge ok" is commented on all associated PackageGit PRs. | High |
|
||||
| **TC-MERGE-005** | **ManualMergeOnly with Project Maintainer** | 1. Create a PackageGit PR with `ManualMergeOnly` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the package PR from the account of a project maintainer. | 1. The PR is merged. | High |
|
||||
| **TC-MERGE-006** | **ManualMergeProject with Project Maintainer** | 1. Create a PackageGit PR with `ManualMergeProject` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the project PR from the account of a project maintainer. | 1. The PR is merged. | High |
|
||||
| **TC-MERGE-007** | **ManualMergeProject with unauthorized user** | 1. Create a PackageGit PR with `ManualMergeProject` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the project PR from the account of a package maintainer. | 1. The PR is not merged. | High |
|
||||
| **TC-CONFIG-001** | **Invalid Configuration** | 1. Provide an invalid `workflow.config` file. | 1. The bot reports an error and does not process any PRs. | High |
|
||||
| **TC-LABEL-001** | **Apply `staging/Auto` label** | 1. Create a new PackageGit PR. | 1. The `staging/Auto` label is applied to the ProjectGit PR. | High |
|
||||
| **TC-LABEL-002** | **Apply `review/Pending` label** | 1. Create a new PackageGit PR. | 1. The `review/Pending` label is applied to the ProjectGit PR when there are pending reviews. | Medium |
|
||||
| **TC-LABEL-003** | **Apply `review/Done` label** | 1. Ensure all mandatory reviews for a PR are completed. | 1. The `review/Done` label is applied to the ProjectGit PR when all mandatory reviews are completed. | Medium |
|
||||
|
||||
0
integration/tests/__init__.py
Normal file
0
integration/tests/__init__.py
Normal file
78
integration/tests/conftest.py
Normal file
78
integration/tests/conftest.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""
|
||||
This module contains pytest fixtures for setting up the test environment.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
import time
|
||||
import os
|
||||
|
||||
# Assuming GiteaAPIClient is in tests/lib/common_test_utils.py
|
||||
from tests.lib.common_test_utils import GiteaAPIClient
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def gitea_env():
|
||||
"""
|
||||
Sets up the Gitea environment with dummy data and provides a GiteaAPIClient instance.
|
||||
"""
|
||||
gitea_url = "http://127.0.0.1:3000"
|
||||
|
||||
# Read admin token
|
||||
admin_token_path = "./gitea-data/admin.token" # Corrected path
|
||||
admin_token = None
|
||||
try:
|
||||
with open(admin_token_path, "r") as f:
|
||||
admin_token = f.read().strip()
|
||||
except FileNotFoundError:
|
||||
raise Exception(f"Admin token file not found at {admin_token_path}. Ensure it's generated and accessible.")
|
||||
|
||||
# Headers for authenticated requests
|
||||
auth_headers = {"Authorization": f"token {admin_token}", "Content-Type": "application/json"}
|
||||
|
||||
# Wait for Gitea to be available
|
||||
print(f"Waiting for Gitea at {gitea_url}...")
|
||||
max_retries = 30
|
||||
for i in range(max_retries):
|
||||
try:
|
||||
# Check a specific API endpoint that indicates readiness
|
||||
response = requests.get(f"{gitea_url}/api/v1/version", headers=auth_headers, timeout=5)
|
||||
if response.status_code == 200:
|
||||
print("Gitea API is available.")
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
pass
|
||||
print(f"Gitea not ready ({response.status_code if 'response' in locals() else 'ConnectionError'}), retrying in 5 seconds... ({i+1}/{max_retries})")
|
||||
time.sleep(5)
|
||||
else:
|
||||
raise Exception("Gitea did not become available within the expected time.")
|
||||
|
||||
client = GiteaAPIClient(base_url=gitea_url, token=admin_token)
|
||||
|
||||
# Setup dummy data
|
||||
print("--- Starting Gitea Dummy Data Setup from Pytest Fixture ---")
|
||||
client.create_org("products")
|
||||
client.create_org("pool")
|
||||
|
||||
client.create_repo("products", "SLFO")
|
||||
client.create_repo("pool", "pkgA")
|
||||
client.create_repo("pool", "pkgB")
|
||||
|
||||
# The add_submodules method also creates workflow.config and staging.config
|
||||
client.add_submodules("products", "SLFO")
|
||||
|
||||
client.add_collaborator("products", "SLFO", "autogits_obs_staging_bot", "write")
|
||||
client.add_collaborator("products", "SLFO", "workflow-pr", "write")
|
||||
client.add_collaborator("pool", "pkgA", "workflow-pr", "write")
|
||||
client.add_collaborator("pool", "pkgB", "workflow-pr", "write")
|
||||
|
||||
client.update_repo_settings("products", "SLFO")
|
||||
client.update_repo_settings("pool", "pkgA")
|
||||
client.update_repo_settings("pool", "pkgB")
|
||||
print("--- Gitea Dummy Data Setup Complete ---")
|
||||
time.sleep(5) # Add a small delay for Gitea to fully process changes
|
||||
|
||||
yield client
|
||||
|
||||
# Teardown (optional, depending on test strategy)
|
||||
# For now, we'll leave resources for inspection. If a clean slate is needed for each test,
|
||||
# this fixture's scope would be 'function' and teardown logic would be added here.
|
||||
23
integration/tests/data/build_result.xml.template
Normal file
23
integration/tests/data/build_result.xml.template
Normal file
@@ -0,0 +1,23 @@
|
||||
<resultlist state="0fef640bfb56c3e76fcfb698b19b59c0">
|
||||
<result project="SUSE:SLFO:Main:PullRequest:1881" repository="standard" arch="aarch64" code="unpublished" state="unpublished">
|
||||
<scmsync>https://src.suse.de/products/SLFO.git?onlybuild=openjpeg2#d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scmsync>
|
||||
<scminfo>d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scminfo>
|
||||
<status package="openjpeg2" code="succeeded"/>
|
||||
</result>
|
||||
<result project="SUSE:SLFO:Main:PullRequest:1881" repository="standard" arch="ppc64le" code="unpublished" state="unpublished">
|
||||
<scmsync>https://src.suse.de/products/SLFO.git?onlybuild=openjpeg2#d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scmsync>
|
||||
<scminfo>d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scminfo>
|
||||
<status package="openjpeg2" code="succeeded"/>
|
||||
</result>
|
||||
<result project="SUSE:SLFO:Main:PullRequest:1881" repository="standard" arch="x86_64" code="unpublished" state="unpublished">
|
||||
<scmsync>https://src.suse.de/products/SLFO.git?onlybuild=openjpeg2#d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scmsync>
|
||||
<scminfo>d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scminfo>
|
||||
<status package="openjpeg2" code="succeeded"/>
|
||||
</result>
|
||||
<result project="SUSE:SLFO:Main:PullRequest:1881" repository="standard" arch="s390x" code="unpublished" state="unpublished">
|
||||
<scmsync>https://src.suse.de/products/SLFO.git?onlybuild=openjpeg2#d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scmsync>
|
||||
<scminfo>d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scminfo>
|
||||
<status package="openjpeg2" code="succeeded"/>
|
||||
</result>
|
||||
</resultlist>
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
<project name="openSUSE:Leap:16.0:PullRequest">
|
||||
<title>Leap 16.0 PullRequest area</title>
|
||||
<description>Base project to define the pull request builds</description>
|
||||
<person userid="autogits_obs_staging_bot" role="maintainer"/>
|
||||
<person userid="maxlin_factory" role="maintainer"/>
|
||||
<group groupid="maintenance-opensuse.org" role="maintainer"/>
|
||||
<debuginfo>
|
||||
<enable/>
|
||||
</debuginfo>
|
||||
<repository name="standard">
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<arch>x86_64</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
</project>
|
||||
59
integration/tests/data/source_openSUSE_Leap_16.0__meta
Normal file
59
integration/tests/data/source_openSUSE_Leap_16.0__meta
Normal file
@@ -0,0 +1,59 @@
|
||||
<project name="openSUSE:Leap:16.0">
|
||||
<title>openSUSE Leap 16.0 based on SLFO</title>
|
||||
<description>Leap 16.0 based on SLES 16.0 (specifically SLFO:1.2)</description>
|
||||
<link project="openSUSE:Backports:SLE-16.0"/>
|
||||
<scmsync>http://gitea-test:3000/products/SLFO#main</scmsync>
|
||||
<person userid="dimstar_suse" role="maintainer"/>
|
||||
<person userid="lkocman-factory" role="maintainer"/>
|
||||
<person userid="maxlin_factory" role="maintainer"/>
|
||||
<person userid="factory-auto" role="reviewer"/>
|
||||
<person userid="licensedigger" role="reviewer"/>
|
||||
<group groupid="autobuild-team" role="maintainer"/>
|
||||
<group groupid="factory-maintainers" role="maintainer"/>
|
||||
<group groupid="maintenance-opensuse.org" role="maintainer"/>
|
||||
<group groupid="factory-staging" role="reviewer"/>
|
||||
<build>
|
||||
<disable repository="ports"/>
|
||||
</build>
|
||||
<debuginfo>
|
||||
<enable/>
|
||||
</debuginfo>
|
||||
<repository name="standard" rebuild="local">
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>local</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
<repository name="product">
|
||||
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="product" trigger="manual"/>
|
||||
<path project="openSUSE:Leap:16.0:NonFree" repository="standard"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="images"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>local</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
<repository name="ports">
|
||||
<arch>armv7l</arch>
|
||||
</repository>
|
||||
<repository name="images">
|
||||
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="images" trigger="manual"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
</project>
|
||||
301
integration/tests/lib/common_test_utils.py
Normal file
301
integration/tests/lib/common_test_utils.py
Normal file
@@ -0,0 +1,301 @@
|
||||
import os
|
||||
import time
|
||||
import pytest
|
||||
import requests
|
||||
import json
|
||||
import xml.etree.ElementTree as ET
|
||||
from pathlib import Path
|
||||
import base64
|
||||
|
||||
TEST_DATA_DIR = Path(__file__).parent.parent / "data"
|
||||
BUILD_RESULT_TEMPLATE = TEST_DATA_DIR / "build_result.xml.template"
|
||||
MOCK_RESPONSES_DIR = Path(__file__).parent.parent.parent / "mock-obs" / "responses"
|
||||
MOCK_BUILD_RESULT_FILE = (
|
||||
MOCK_RESPONSES_DIR / "GET_build_openSUSE:Leap:16.0:PullRequest:*__result"
|
||||
)
|
||||
MOCK_BUILD_RESULT_FILE1 = MOCK_RESPONSES_DIR / "GET_build_openSUSE:Leap:16.0__result"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_build_result():
|
||||
"""
|
||||
Fixture to create a mock build result file from the template.
|
||||
Returns a factory function that the test can call with parameters.
|
||||
"""
|
||||
|
||||
def _create_result_file(package_name: str, code: str):
|
||||
tree = ET.parse(BUILD_RESULT_TEMPLATE)
|
||||
root = tree.getroot()
|
||||
for status_tag in root.findall(".//status"):
|
||||
status_tag.set("package", package_name)
|
||||
status_tag.set("code", code)
|
||||
|
||||
MOCK_RESPONSES_DIR.mkdir(exist_ok=True)
|
||||
tree.write(MOCK_BUILD_RESULT_FILE)
|
||||
tree.write(MOCK_BUILD_RESULT_FILE1)
|
||||
return str(MOCK_BUILD_RESULT_FILE)
|
||||
|
||||
yield _create_result_file
|
||||
|
||||
if MOCK_BUILD_RESULT_FILE.exists():
|
||||
MOCK_BUILD_RESULT_FILE.unlink()
|
||||
MOCK_BUILD_RESULT_FILE1.unlink()
|
||||
|
||||
|
||||
class GiteaAPIClient:
|
||||
def __init__(self, base_url, token):
|
||||
self.base_url = base_url
|
||||
self.headers = {"Authorization": f"token {token}", "Content-Type": "application/json"}
|
||||
|
||||
def _request(self, method, path, **kwargs):
|
||||
url = f"{self.base_url}/api/v1/{path}"
|
||||
response = requests.request(method, url, headers=self.headers, **kwargs)
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
print(f"HTTPError in _request: {e}")
|
||||
print(f"Response Content: {e.response.text}")
|
||||
raise
|
||||
return response
|
||||
|
||||
def create_org(self, org_name):
|
||||
print(f"--- Checking organization: {org_name} ---")
|
||||
try:
|
||||
self._request("GET", f"orgs/{org_name}")
|
||||
print(f"Organization '{org_name}' already exists.")
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Creating organization '{org_name}'...")
|
||||
data = {"username": org_name, "full_name": org_name}
|
||||
self._request("POST", "orgs", json=data)
|
||||
print(f"Organization '{org_name}' created.")
|
||||
else:
|
||||
raise
|
||||
|
||||
def create_repo(self, org_name, repo_name):
|
||||
print(f"--- Checking repository: {org_name}/{repo_name} ---")
|
||||
try:
|
||||
self._request("GET", f"repos/{org_name}/{repo_name}")
|
||||
print(f"Repository '{org_name}/{repo_name}' already exists.")
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Creating repository '{org_name}/{repo_name}'...")
|
||||
data = {
|
||||
"name": repo_name,
|
||||
"auto_init": True,
|
||||
"default_branch": "main",
|
||||
"gitignores": "Go",
|
||||
"license": "MIT",
|
||||
"private": False,
|
||||
"readme": "Default"
|
||||
}
|
||||
self._request("POST", f"orgs/{org_name}/repos", json=data)
|
||||
print(f"Repository '{org_name}/{repo_name}' created with a README.")
|
||||
time.sleep(1) # Added delay to allow Git operations to become available
|
||||
else:
|
||||
raise
|
||||
|
||||
def add_collaborator(self, org_name, repo_name, collaborator_name, permission="write"):
|
||||
print(f"--- Adding {collaborator_name} as a collaborator to {org_name}/{repo_name} with '{permission}' permission ---")
|
||||
data = {"permission": permission}
|
||||
# Gitea API returns 204 No Content on success and doesn't fail if already present.
|
||||
self._request("PUT", f"repos/{org_name}/{repo_name}/collaborators/{collaborator_name}", json=data)
|
||||
print(f"Attempted to add {collaborator_name} to {org_name}/{repo_name}.")
|
||||
|
||||
def add_submodules(self, org_name, repo_name):
|
||||
print(f"--- Adding submodules to {org_name}/{repo_name} using diffpatch ---")
|
||||
parent_repo_path = f"repos/{org_name}/{repo_name}"
|
||||
|
||||
try:
|
||||
self._request("GET", f"{parent_repo_path}/contents/.gitmodules")
|
||||
print("Submodules appear to be already added. Skipping.")
|
||||
return
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code != 404:
|
||||
raise
|
||||
|
||||
# Get latest commit SHAs for the submodules
|
||||
pkg_a_sha = self._request("GET", "repos/pool/pkgA/branches/main").json()["commit"]["id"]
|
||||
pkg_b_sha = self._request("GET", "repos/pool/pkgB/branches/main").json()["commit"]["id"]
|
||||
|
||||
if not pkg_a_sha or not pkg_b_sha:
|
||||
raise Exception("Error: Could not get submodule commit SHAs. Cannot apply patch.")
|
||||
|
||||
diff_content = f"""diff --git a/.gitmodules b/.gitmodules
|
||||
new file mode 100644
|
||||
index 0000000..f1838bd
|
||||
--- /dev/null
|
||||
+++ b/.gitmodules
|
||||
@@ -0,0 +1,6 @@
|
||||
+[submodule "pkgA"]
|
||||
+ path = pkgA
|
||||
+ url = ../../pool/pkgA.git
|
||||
+[submodule "pkgB"]
|
||||
+ path = pkgB
|
||||
+ url = ../../pool/pkgB.git
|
||||
diff --git a/pkgA b/pkgA
|
||||
new file mode 160000
|
||||
index 0000000..{pkg_a_sha}
|
||||
--- /dev/null
|
||||
+++ b/pkgA
|
||||
@@ -0,0 +1 @@
|
||||
+Subproject commit {pkg_a_sha}
|
||||
diff --git a/pkgB b/pkgB
|
||||
new file mode 160000
|
||||
index 0000000..{pkg_b_sha}
|
||||
--- /dev/null
|
||||
+++ b/pkgB
|
||||
@@ -0,0 +1 @@
|
||||
+Subproject commit {pkg_b_sha}
|
||||
diff --git a/workflow.config b/workflow.config
|
||||
new file mode 100644
|
||||
--- /dev/null
|
||||
+++ b/workflow.config
|
||||
@@ -0,0 +7 @@
|
||||
+{{
|
||||
+ "Workflows": ["pr"],
|
||||
+ "GitProjectName": "products/SLFO#main",
|
||||
+ "Organization": "pool",
|
||||
+ "Branch": "main",
|
||||
+ "ManualMergeProject": true,
|
||||
+ "Reviewers": [ "-autogits_obs_staging_bot" ]
|
||||
+}}
|
||||
diff --git a/staging.config b/staging.config
|
||||
new file mode 100644
|
||||
--- /dev/null
|
||||
+++ b/staging.config
|
||||
@@ -0,0 +3 @@
|
||||
+{{
|
||||
+ "ObsProject": "openSUSE:Leap:16.0",
|
||||
+ "StagingProject": "openSUSE:Leap:16.0:PullRequest"
|
||||
+}}
|
||||
"""
|
||||
message = "Add pkgA and pkgB as submodules and config files"
|
||||
data = {
|
||||
"branch": "main",
|
||||
"content": diff_content,
|
||||
"message": message
|
||||
}
|
||||
print(f"Applying submodule patch to {org_name}/{repo_name}...")
|
||||
self._request("POST", f"{parent_repo_path}/diffpatch", json=data)
|
||||
print("Submodule patch applied.")
|
||||
|
||||
def update_repo_settings(self, org_name, repo_name):
|
||||
print(f"--- Updating repository settings for: {org_name}/{repo_name} ---")
|
||||
repo_data = self._request("GET", f"repos/{org_name}/{repo_name}").json()
|
||||
|
||||
# Ensure these are boolean values, not string
|
||||
repo_data["allow_manual_merge"] = True
|
||||
repo_data["autodetect_manual_merge"] = True
|
||||
|
||||
self._request("PATCH", f"repos/{org_name}/{repo_name}", json=repo_data)
|
||||
print(f"Repository settings for '{org_name}/{repo_name}' updated.")
|
||||
|
||||
|
||||
def create_gitea_pr(self, repo_full_name: str, diff_content: str, title: str):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/pulls"
|
||||
base_branch = "main"
|
||||
|
||||
# Create a new branch for the PR
|
||||
new_branch_name = f"pr-branch-{int(time.time())}"
|
||||
|
||||
# Get the latest commit SHA of the base branch
|
||||
base_commit_sha = self._request("GET", f"repos/{owner}/{repo}/branches/{base_branch}").json()["commit"]["id"]
|
||||
|
||||
# Create the new branch
|
||||
self._request("POST", f"repos/{owner}/{repo}/branches", json={
|
||||
"new_branch_name": new_branch_name,
|
||||
"old_ref": base_commit_sha # Use the commit SHA directly
|
||||
})
|
||||
|
||||
# Create a new file or modify an existing one in the new branch
|
||||
file_path = f"test-file-{int(time.time())}.txt"
|
||||
file_content = "This is a test file for the PR."
|
||||
self._request("POST", f"repos/{owner}/{repo}/contents/{file_path}", json={
|
||||
"content": base64.b64encode(file_content.encode('utf-8')).decode('ascii'),
|
||||
"message": "Add test file",
|
||||
"branch": new_branch_name
|
||||
})
|
||||
|
||||
# Now create the PR
|
||||
data = {
|
||||
"head": new_branch_name, # Use the newly created branch as head
|
||||
"base": base_branch,
|
||||
"title": title,
|
||||
"body": "Test Pull Request"
|
||||
}
|
||||
response = self._request("POST", url, json=data)
|
||||
return response.json()
|
||||
|
||||
def modify_gitea_pr(self, repo_full_name: str, pr_number: int, diff_content: str, message: str):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
|
||||
# Get PR details to find the head branch
|
||||
pr_details = self._request("GET", f"repos/{owner}/{repo}/pulls/{pr_number}").json()
|
||||
head_branch = pr_details["head"]["ref"]
|
||||
|
||||
file_path = f"modified-file-{int(time.time())}.txt"
|
||||
file_content = "This is a modified test file for the PR."
|
||||
|
||||
self._request("POST", f"repos/{owner}/{repo}/contents/{file_path}", json={
|
||||
"content": base64.b64encode(file_content.encode('utf-8')).decode('ascii'),
|
||||
"message": message,
|
||||
"branch": head_branch
|
||||
})
|
||||
|
||||
def update_gitea_pr_properties(self, repo_full_name: str, pr_number: int, **kwargs):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/pulls/{pr_number}"
|
||||
response = self._request("PATCH", url, json=kwargs)
|
||||
return response.json()
|
||||
|
||||
def get_timeline_events(self, repo_full_name: str, pr_number: int):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/issues/{pr_number}/timeline"
|
||||
|
||||
# Retry logic for timeline events
|
||||
for i in range(10): # Try up to 10 times
|
||||
try:
|
||||
response = self._request("GET", url)
|
||||
timeline_events = response.json()
|
||||
if timeline_events: # Check if timeline_events list is not empty
|
||||
return timeline_events
|
||||
print(f"Attempt {i+1}: Timeline for PR {pr_number} is empty. Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Attempt {i+1}: Timeline for PR {pr_number} not found yet. Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
else:
|
||||
raise # Re-raise other HTTP errors
|
||||
raise Exception(f"Failed to retrieve timeline for PR {pr_number} after multiple retries.")
|
||||
|
||||
def get_comments(self, repo_full_name: str, pr_number: int):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/issues/{pr_number}/comments"
|
||||
|
||||
# Retry logic for comments
|
||||
for i in range(10): # Try up to 10 times
|
||||
try:
|
||||
response = self._request("GET", url)
|
||||
comments = response.json()
|
||||
print(f"Attempt {i+1}: Comments for PR {pr_number} received: {comments}") # Added debug print
|
||||
if comments: # Check if comments list is not empty
|
||||
return comments
|
||||
print(f"Attempt {i+1}: Comments for PR {pr_number} are empty. Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Attempt {i+1}: Comments for PR {pr_number} not found yet. Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
else:
|
||||
raise # Re-raise other HTTP errors
|
||||
raise Exception(f"Failed to retrieve comments for PR {pr_number} after multiple retries.")
|
||||
|
||||
def get_pr_details(self, repo_full_name: str, pr_number: int):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/pulls/{pr_number}"
|
||||
response = self._request("GET", url)
|
||||
return response.json()
|
||||
|
||||
153
integration/tests/test_pr_workflow.py
Executable file
153
integration/tests/test_pr_workflow.py
Executable file
@@ -0,0 +1,153 @@
|
||||
import pytest
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from tests.lib.common_test_utils import (
|
||||
GiteaAPIClient,
|
||||
mock_build_result,
|
||||
)
|
||||
|
||||
# =============================================================================
|
||||
# TEST CASES
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def test_pr_workflow_succeeded(gitea_env, mock_build_result):
|
||||
"""End-to-end test for a successful PR workflow."""
|
||||
diff = "diff --git a/test.txt b/test.txt\nnew file mode 100644\nindex 0000000..e69de29\n"
|
||||
pr = gitea_env.create_gitea_pr("pool/pkgA", diff, "Test PR - should succeed")
|
||||
initial_pr_number = pr["number"]
|
||||
|
||||
compose_dir = Path(__file__).parent.parent
|
||||
|
||||
forwarded_pr_number = None
|
||||
print(
|
||||
f"Polling pool/pkgA PR #{initial_pr_number} timeline for forwarded PR event..."
|
||||
)
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("pool/pkgA", initial_pr_number)
|
||||
for event in timeline_events:
|
||||
if event.get("type") == "pull_ref":
|
||||
if not (ref_issue := event.get("ref_issue")):
|
||||
continue
|
||||
url_to_check = ref_issue.get("html_url", "")
|
||||
match = re.search(r"products/SLFO/pulls/(\d+)", url_to_check)
|
||||
if match:
|
||||
forwarded_pr_number = match.group(1)
|
||||
break
|
||||
if forwarded_pr_number:
|
||||
break
|
||||
assert (
|
||||
forwarded_pr_number is not None
|
||||
), "Workflow bot did not create a pull_ref event on the timeline."
|
||||
print(f"Found forwarded PR: products/SLFO #{forwarded_pr_number}")
|
||||
|
||||
print(f"Polling products/SLFO PR #{forwarded_pr_number} for reviewer assignment...")
|
||||
reviewer_added = False
|
||||
for _ in range(15):
|
||||
time.sleep(1)
|
||||
pr_details = gitea_env.get_pr_details("products/SLFO", forwarded_pr_number)
|
||||
if any(
|
||||
r.get("login") == "autogits_obs_staging_bot"
|
||||
for r in pr_details.get("requested_reviewers", [])
|
||||
):
|
||||
reviewer_added = True
|
||||
break
|
||||
assert reviewer_added, "Staging bot was not added as a reviewer."
|
||||
print("Staging bot has been added as a reviewer.")
|
||||
|
||||
mock_build_result(package_name="pkgA", code="succeeded")
|
||||
|
||||
print("Restarting obs-staging-bot...")
|
||||
subprocess.run(
|
||||
["podman-compose", "restart", "obs-staging-bot"],
|
||||
cwd=compose_dir,
|
||||
check=True,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
print(f"Polling products/SLFO PR #{forwarded_pr_number} for final status...")
|
||||
status_comment_found = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("products/SLFO", forwarded_pr_number)
|
||||
for event in timeline_events:
|
||||
print(event.get("body", "not a body"))
|
||||
if event.get("body") and "successful" in event["body"]:
|
||||
status_comment_found = True
|
||||
break
|
||||
if status_comment_found:
|
||||
break
|
||||
assert status_comment_found, "Staging bot did not post a 'successful' comment."
|
||||
|
||||
|
||||
def test_pr_workflow_failed(gitea_env, mock_build_result):
|
||||
"""End-to-end test for a failed PR workflow."""
|
||||
diff = "diff --git a/another_test.txt b/another_test.txt\nnew file mode 100644\nindex 0000000..e69de29\n"
|
||||
pr = gitea_env.create_gitea_pr("pool/pkgA", diff, "Test PR - should fail")
|
||||
initial_pr_number = pr["number"]
|
||||
|
||||
compose_dir = Path(__file__).parent.parent
|
||||
|
||||
forwarded_pr_number = None
|
||||
print(
|
||||
f"Polling pool/pkgA PR #{initial_pr_number} timeline for forwarded PR event..."
|
||||
)
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("pool/pkgA", initial_pr_number)
|
||||
for event in timeline_events:
|
||||
if event.get("type") == "pull_ref":
|
||||
if not (ref_issue := event.get("ref_issue")):
|
||||
continue
|
||||
url_to_check = ref_issue.get("html_url", "")
|
||||
match = re.search(r"products/SLFO/pulls/(\d+)", url_to_check)
|
||||
if match:
|
||||
forwarded_pr_number = match.group(1)
|
||||
break
|
||||
if forwarded_pr_number:
|
||||
break
|
||||
assert (
|
||||
forwarded_pr_number is not None
|
||||
), "Workflow bot did not create a pull_ref event on the timeline."
|
||||
print(f"Found forwarded PR: products/SLFO #{forwarded_pr_number}")
|
||||
|
||||
print(f"Polling products/SLFO PR #{forwarded_pr_number} for reviewer assignment...")
|
||||
reviewer_added = False
|
||||
for _ in range(15):
|
||||
time.sleep(1)
|
||||
pr_details = gitea_env.get_pr_details("products/SLFO", forwarded_pr_number)
|
||||
if any(
|
||||
r.get("login") == "autogits_obs_staging_bot"
|
||||
for r in pr_details.get("requested_reviewers", [])
|
||||
):
|
||||
reviewer_added = True
|
||||
break
|
||||
assert reviewer_added, "Staging bot was not added as a reviewer."
|
||||
print("Staging bot has been added as a reviewer.")
|
||||
|
||||
mock_build_result(package_name="pkgA", code="failed")
|
||||
|
||||
print("Restarting obs-staging-bot...")
|
||||
subprocess.run(
|
||||
["podman-compose", "restart", "obs-staging-bot"],
|
||||
cwd=compose_dir,
|
||||
check=True,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
print(f"Polling products/SLFO PR #{forwarded_pr_number} for final status...")
|
||||
status_comment_found = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("products/SLFO", forwarded_pr_number)
|
||||
for event in timeline_events:
|
||||
if event.get("body") and "failed" in event["body"]:
|
||||
status_comment_found = True
|
||||
break
|
||||
if status_comment_found:
|
||||
break
|
||||
assert status_comment_found, "Staging bot did not post a 'failed' comment."
|
||||
117
integration/tests/workflow_pr_sync_test.py
Executable file
117
integration/tests/workflow_pr_sync_test.py
Executable file
@@ -0,0 +1,117 @@
|
||||
import pytest
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from tests.lib.common_test_utils import (
|
||||
GiteaAPIClient,
|
||||
)
|
||||
|
||||
# =============================================================================
|
||||
# TEST CASES
|
||||
# =============================================================================
|
||||
|
||||
pytest.pr = None
|
||||
pytest.pr_details = None
|
||||
pytest.initial_pr_number = None
|
||||
pytest.forwarded_pr_number = None
|
||||
|
||||
|
||||
@pytest.mark.dependency()
|
||||
def test_001_project_pr(gitea_env):
|
||||
"""Forwarded PR correct title"""
|
||||
diff = "diff --git a/another_test.txt b/another_test.txt\nnew file mode 100644\nindex 0000000..e69de29\n"
|
||||
pytest.pr = gitea_env.create_gitea_pr("pool/pkgA", diff, "Test PR")
|
||||
pytest.initial_pr_number = pytest.pr["number"]
|
||||
time.sleep(5) # Give Gitea some time to process the PR and make the timeline available
|
||||
|
||||
compose_dir = Path(__file__).parent.parent
|
||||
|
||||
pytest.forwarded_pr_number = None
|
||||
print(
|
||||
f"Polling pool/pkgA PR #{pytest.initial_pr_number} timeline for forwarded PR event..."
|
||||
)
|
||||
# Instead of polling timeline, check if forwarded PR exists directly
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("pool/pkgA", pytest.initial_pr_number)
|
||||
for event in timeline_events:
|
||||
if event.get("type") == "pull_ref":
|
||||
if not (ref_issue := event.get("ref_issue")):
|
||||
continue
|
||||
url_to_check = ref_issue.get("html_url", "")
|
||||
match = re.search(r"products/SLFO/pulls/(\d+)", url_to_check)
|
||||
if match:
|
||||
pytest.forwarded_pr_number = match.group(1)
|
||||
break
|
||||
if pytest.forwarded_pr_number:
|
||||
break
|
||||
assert (
|
||||
pytest.forwarded_pr_number is not None
|
||||
), "Workflow bot did not create a forwarded PR."
|
||||
pytest.pr_details = gitea_env.get_pr_details("products/SLFO", pytest.forwarded_pr_number)
|
||||
assert (
|
||||
pytest.pr_details["title"] == "Forwarded PRs: pkgA"
|
||||
), "Forwarded PR correct title"
|
||||
|
||||
|
||||
@pytest.mark.dependency(depends=["test_001_project_pr"])
|
||||
def test_002_updated_project_pr(gitea_env):
|
||||
"""Forwarded PR head is updated"""
|
||||
diff = "diff --git a/another_test.txt b/another_test.txt\nnew file mode 100444\nindex 0000000..e69de21\n"
|
||||
gitea_env.modify_gitea_pr("pool/pkgA", pytest.initial_pr_number, diff, "Tweaks")
|
||||
sha_old = pytest.pr_details["head"]["sha"]
|
||||
|
||||
sha_changed = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
new_pr_details = gitea_env.get_pr_details("products/SLFO", pytest.forwarded_pr_number)
|
||||
sha_new = new_pr_details["head"]["sha"]
|
||||
if sha_new != sha_old:
|
||||
print(f"Sha changed from {sha_old} to {sha_new}")
|
||||
sha_changed = True
|
||||
break
|
||||
|
||||
assert sha_changed, "Forwarded PR has sha updated"
|
||||
|
||||
|
||||
@pytest.mark.dependency(depends=["test_001_project_pr"])
|
||||
def test_003_wip(gitea_env):
|
||||
"""WIP flag set for PR"""
|
||||
# 1. set WIP flag in PR f"pool/pkgA#{pytest.initial_pr_number}"
|
||||
initial_pr_details = gitea_env.get_pr_details("pool/pkgA", pytest.initial_pr_number)
|
||||
wip_title = "WIP: " + initial_pr_details["title"]
|
||||
|
||||
gitea_env.update_gitea_pr_properties("pool/pkgA", pytest.initial_pr_number, title=wip_title)
|
||||
# 2. in loop check whether WIP flag is set for PR f"products/SLFO #{pytest.forwarded_pr_number}"
|
||||
wip_flag_set = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
forwarded_pr_details = gitea_env.get_pr_details(
|
||||
"products/SLFO", pytest.forwarded_pr_number
|
||||
)
|
||||
if "WIP: " in forwarded_pr_details["title"]:
|
||||
wip_flag_set = True
|
||||
break
|
||||
|
||||
assert wip_flag_set, "WIP flag was not set in the forwarded PR."
|
||||
|
||||
# Remove WIP flag from PR f"pool/pkgA#{pytest.initial_pr_number}"
|
||||
initial_pr_details = gitea_env.get_pr_details("pool/pkgA", pytest.initial_pr_number)
|
||||
non_wip_title = initial_pr_details["title"].replace("WIP: ", "")
|
||||
gitea_env.update_gitea_pr_properties(
|
||||
"pool/pkgA", pytest.initial_pr_number, title=non_wip_title
|
||||
)
|
||||
|
||||
# In loop check whether WIP flag is removed for PR f"products/SLFO #{pytest.forwarded_pr_number}"
|
||||
wip_flag_removed = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
forwarded_pr_details = gitea_env.get_pr_details(
|
||||
"products/SLFO", pytest.forwarded_pr_number
|
||||
)
|
||||
if "WIP: " not in forwarded_pr_details["title"]:
|
||||
wip_flag_removed = True
|
||||
break
|
||||
assert wip_flag_removed, "WIP flag was not removed from the forwarded PR."
|
||||
1
integration/workflow-pr/Dockerfile
Symbolic link
1
integration/workflow-pr/Dockerfile
Symbolic link
@@ -0,0 +1 @@
|
||||
Dockerfile.package
|
||||
17
integration/workflow-pr/Dockerfile.local
Normal file
17
integration/workflow-pr/Dockerfile.local
Normal file
@@ -0,0 +1,17 @@
|
||||
# Use the same base image as the Gitea container
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Add the custom CA to the trust store
|
||||
COPY integration/rabbitmq-config/certs/cert.pem /usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
# Install git and ssh
|
||||
RUN zypper -n in git-core openssh-clients binutils
|
||||
|
||||
# Copy the pre-built binary into the container
|
||||
COPY workflow-pr/workflow-pr /usr/local/bin/workflow-pr
|
||||
COPY integration/workflow-pr/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +4755 /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Set the entrypoint for the container
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
18
integration/workflow-pr/Dockerfile.package
Normal file
18
integration/workflow-pr/Dockerfile.package
Normal file
@@ -0,0 +1,18 @@
|
||||
# Use the same base image as the Gitea container
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Add the custom CA to the trust store
|
||||
COPY rabbitmq-config/certs/cert.pem /usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
RUN zypper ar -f http://download.opensuse.org/repositories/devel:/Factory:/git-workflow/15.7/devel:Factory:git-workflow.repo
|
||||
RUN zypper --gpg-auto-import-keys ref
|
||||
|
||||
# Install git and ssh
|
||||
RUN zypper -n in git-core openssh-clients autogits-workflow-pr binutils
|
||||
|
||||
COPY integration/workflow-pr/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +4755 /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Set the entrypoint for the container
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
66
integration/workflow-pr/entrypoint.sh
Normal file
66
integration/workflow-pr/entrypoint.sh
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
TOKEN_FILE="/var/lib/gitea/workflow-pr.token"
|
||||
|
||||
# Wait for the token file to be created by the gitea setup script
|
||||
echo "Waiting for $TOKEN_FILE..."
|
||||
while [ ! -s "$TOKEN_FILE" ]; do
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Read token and trim whitespace/newlines
|
||||
GITEA_TOKEN=$(cat "$TOKEN_FILE" | tr -d '\n\r ' )
|
||||
|
||||
if [ -z "$GITEA_TOKEN" ]; then
|
||||
echo "Error: Token file $TOKEN_FILE is empty after trimming."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export GITEA_TOKEN
|
||||
echo "GITEA_TOKEN exported (length: ${#GITEA_TOKEN})"
|
||||
|
||||
# Wait for the dummy data to be created by the gitea setup script
|
||||
echo "Waiting for workflow.config in products/SLFO..."
|
||||
API_URL="http://gitea-test:3000/api/v1/repos/products/SLFO/contents/workflow.config"
|
||||
HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token $GITEA_TOKEN" "$API_URL")
|
||||
|
||||
while [ "$HTTP_STATUS" != "200" ]; do
|
||||
echo "workflow.config not found yet (HTTP Status: $HTTP_STATUS). Retrying in 5s..."
|
||||
sleep 5
|
||||
HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token $GITEA_TOKEN" "$API_URL")
|
||||
done
|
||||
|
||||
# Wait for the shared SSH key to be generated by the gitea setup script
|
||||
echo "Waiting for /var/lib/gitea/ssh-keys/id_ed25519..."
|
||||
while [ ! -f /var/lib/gitea/ssh-keys/id_ed25519 ]; do
|
||||
sleep 2
|
||||
done
|
||||
|
||||
export AUTOGITS_IDENTITY_FILE="/root/.ssh/id_ed25519"
|
||||
|
||||
# Pre-populate known_hosts with Gitea's SSH host key
|
||||
echo "Preparing SSH environment in /root/.ssh..."
|
||||
mkdir -p /root/.ssh
|
||||
chmod 700 /root/.ssh
|
||||
|
||||
# Copy the private key to the standard location and set permissions
|
||||
cp /var/lib/gitea/ssh-keys/id_ed25519 /root/.ssh/id_ed25519
|
||||
chmod 600 /root/.ssh/id_ed25519
|
||||
|
||||
echo "Scanning Gitea SSH host key..."
|
||||
# We try multiple times because Gitea might still be starting its SSH server
|
||||
for i in {1..10}; do
|
||||
ssh-keyscan -p 3022 gitea-test >> /root/.ssh/known_hosts 2>/dev/null && break
|
||||
echo "Retrying ssh-keyscan in 2s..."
|
||||
sleep 2
|
||||
done
|
||||
chmod 644 /root/.ssh/known_hosts
|
||||
|
||||
exe=$(which workflow-pr)
|
||||
exe=${exe:-/usr/local/bin/workflow-pr}
|
||||
|
||||
package=$(rpm -qa | grep autogits-workflow-pr) || :
|
||||
|
||||
echo "!!!!!!!!!!!!!!!! using binary $exe; installed package: $package"
|
||||
which strings > /dev/null 2>&1 && strings "$exe" | grep -A 2 vcs.revision= | head -4 || :
|
||||
|
||||
exec "$exe" "$@"
|
||||
3
integration/workflow-pr/workflow-pr.json
Normal file
3
integration/workflow-pr/workflow-pr.json
Normal file
@@ -0,0 +1,3 @@
|
||||
[
|
||||
"products/SLFO#main"
|
||||
]
|
||||
@@ -4,11 +4,15 @@ OBS Staging Bot
|
||||
Build a PR against a ProjectGit, if review is requested.
|
||||
|
||||
|
||||
Areas of Responsibility
|
||||
-----------------------
|
||||
Main Tasks
|
||||
----------
|
||||
|
||||
* Monitors Notification API in Gitea for review requests
|
||||
* Reviews Package build results in OBS for all changed packages in ProjectGit PR
|
||||
* A build in OBS is initiated when a review for this bot is requested.
|
||||
* The overall build status is reported:
|
||||
* Build successful
|
||||
* Build failed
|
||||
* It checks the build status only for the involved packages compared to the last state of the project for all architectures and all flavors.
|
||||
* It adds an svg with detailed building status.
|
||||
|
||||
|
||||
Target Usage
|
||||
@@ -21,22 +25,56 @@ Configuration File
|
||||
------------------
|
||||
|
||||
Bot reads `staging.config` from the project git or the PR to the project git.
|
||||
It's a JSON file with following syntax
|
||||
It's a JSON file with following syntax:
|
||||
|
||||
```
|
||||
```json
|
||||
{
|
||||
"ObsProject": "home:foo:project",
|
||||
"StagingProject": "home:foo:project:staging",
|
||||
"QA": [
|
||||
{
|
||||
"Name": "ProjectBuild",
|
||||
"Origin": "home:foo:product:images"
|
||||
}
|
||||
]
|
||||
"ObsProject": "SUSE:SLFO:1.2",
|
||||
"StagingProject": "SUSE:SLFO:1.2:PullRequest",
|
||||
"QA": [
|
||||
{
|
||||
"Name": "SLES",
|
||||
"Origin": "SUSE:SLFO:Products:SLES:16.0",
|
||||
"Label": "BootstrapRing",
|
||||
"BuildDisableRepos": ["product"]
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
* ObsProject: (**required**) Project where the base project is built. Builds in this project will be used to compare to builds based on sources from the PR
|
||||
* StagingProject: template project that will be used as template for the staging project. Omitting this will use the ObsProject repositories to create the staging. Staging project will be created under the template, or in the bot's home directory if not specified.
|
||||
* QA: set of projects to build ontop of the binaries built in staging.
|
||||
| Field name | Details | Mandatory | Type | Allowed Values | Default |
|
||||
| ----- | ----- | ----- | ----- | ----- | ----- |
|
||||
| *ObsProject* | Product OBS project. Builds in this project will be used to compare to builds based on sources from the PR. | yes | string | `[a-zA-Z0-9-_:]+` | |
|
||||
| *StagingProject* | Used both as base project and prefix for all OBS staging projects. Upon being added as a reviewer to a PrjGit PR, this bot automatically generates an OBS project named *StagingProject:<PR_Number>*. It must be a sub-project of the *ObsProject*. | yes | string | `[a-zA-Z0-9-_:]+` | |
|
||||
| *QA* | Crucial for generating a product build (such as an ISO or FTP tree) that incorporates the packages. | no | array of objects | | |
|
||||
| *QA > Name* | Suffix for the QA OBS staging project. The project is named *StagingProject:<PR_Number>:Name*. | no | string | | |
|
||||
| *QA > Origin* | OBS reference project | no | string | | |
|
||||
| *QA > Label* | Setup the project only when the given gitea label is set on pull request | no | string | | |
|
||||
| *QA > BuildDisableRepos* | The names of OBS repositories to build-disable, if any. | no | array of strings | | [] |
|
||||
|
||||
|
||||
Details
|
||||
-------
|
||||
|
||||
* **OBS staging projects are deleted** when the relative PrjGit PR is closed or merged.
|
||||
|
||||
* **PrjGit PR - staging project**
|
||||
* The OBS staging project utilizes an **scmsync** tag, configured with the `onlybuild` flag, to exclusively build packages associated with this specific PrjGit PR.
|
||||
* The **build config** is inherited from the PrjGit PR config file (even if unchanged).
|
||||
* The **project meta** creates a standard repository following the StagingProject as a project path.
|
||||
* The base *StagingProject* has the macro **FromScratch:** set in its config, which prevents inheriting the configuration from the included project paths.
|
||||
* The bot copies the project maintainers from *StagingProject* to the specific staging project (*StagingProject:<PR_Number>*).
|
||||
* The bot reports “Build successful” only if the build is successful for all repositories and all architectures.
|
||||
|
||||
* **PrjGit PR - QA staging project**
|
||||
* The QA staging project is meant for building the product; the relative build config is inherited from the `QA > Origin` project.
|
||||
* In this case, the **scmsync** tag is inherited from the `QA > Origin` project.
|
||||
* It is desirable in some cases to avoid building some specific build service repositories when not needed. In this case, `QA > BuildDisableRepos` can be specified.
|
||||
These repositories would be disabled in the project meta when generating the QA project.
|
||||
* QA projects can build on each other. In this case it is important that the order to setup is correct
|
||||
in the staging.config file.
|
||||
* Based on Label settings QA projects can get created or removed. The staging bot is also checking that these
|
||||
projects build successfully.
|
||||
* It is possible to include the sources from the staging project also in the QA project. Define a template using
|
||||
a project link pointing to the project defined as "StagingProject". You must *not* use scmsync directly in the
|
||||
same project then, but you can use it indirectly via a second project link
|
||||
|
||||
@@ -19,6 +19,7 @@ package main
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -109,161 +110,110 @@ const (
|
||||
BuildStatusSummaryUnknown = 4
|
||||
)
|
||||
|
||||
func ProcessBuildStatus(project, refProject *common.BuildResultList) BuildStatusSummary {
|
||||
if _, finished := refProject.BuildResultSummary(); !finished {
|
||||
common.LogDebug("refProject not finished building??")
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
type DisableFlag struct {
|
||||
XMLName string `xml:"disable"`
|
||||
Name string `xml:"repository,attr"`
|
||||
}
|
||||
|
||||
func ProcessBuildStatus(project *common.BuildResultList) BuildStatusSummary {
|
||||
if _, finished := project.BuildResultSummary(); !finished {
|
||||
common.LogDebug("Still building...")
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
// the repositories should be setup equally between the projects. We
|
||||
// need to verify that packages that are building in `refProject` are not
|
||||
// failing in the `project`
|
||||
BuildResultSorter := func(a, b *common.BuildResult) int {
|
||||
if c := strings.Compare(a.Repository, b.Repository); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := strings.Compare(a.Arch, b.Arch); c != 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
panic("Should not happen -- BuiltResultSorter equal repos?")
|
||||
}
|
||||
slices.SortFunc(project.Result, BuildResultSorter)
|
||||
if refProject == nil {
|
||||
// just return if buid finished and have some successes, since new package
|
||||
common.LogInfo("New package. Only need some success...")
|
||||
SomeSuccess := false
|
||||
for i := 0; i < len(project.Result); i++ {
|
||||
repoRes := project.Result[i]
|
||||
repoResStatus, ok := common.ObsRepoStatusDetails[repoRes.Code]
|
||||
if !ok {
|
||||
common.LogDebug("cannot find code:", repoRes.Code)
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
if !repoResStatus.Finished {
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
for _, pkg := range repoRes.Status {
|
||||
pkgStatus, ok := common.ObsBuildStatusDetails[pkg.Code]
|
||||
if !ok {
|
||||
common.LogInfo("Unknown package build status:", pkg.Code, "for", pkg.Package)
|
||||
common.LogDebug("Details:", pkg.Details)
|
||||
}
|
||||
|
||||
if pkgStatus.Success {
|
||||
SomeSuccess = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if SomeSuccess {
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
return BuildStatusSummaryFailed
|
||||
}
|
||||
|
||||
slices.SortFunc(refProject.Result, BuildResultSorter)
|
||||
|
||||
common.LogDebug("comparing results", len(project.Result), "vs. ref", len(refProject.Result))
|
||||
SomeSuccess := false
|
||||
common.LogDebug("build results", len(project.Result))
|
||||
for i := 0; i < len(project.Result); i++ {
|
||||
common.LogDebug("searching for", project.Result[i].Repository, "/", project.Result[i].Arch)
|
||||
j := 0
|
||||
found:
|
||||
for ; j < len(refProject.Result); j++ {
|
||||
if project.Result[i].Repository != refProject.Result[j].Repository ||
|
||||
project.Result[i].Arch != refProject.Result[j].Arch {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := 0; j < len(project.Result); j++ {
|
||||
common.LogDebug(" found match for @ idx:", j)
|
||||
res, success := ProcessRepoBuildStatus(project.Result[i].Status, refProject.Result[j].Status)
|
||||
res := ProcessRepoBuildStatus(project.Result[i].Status)
|
||||
switch res {
|
||||
case BuildStatusSummarySuccess:
|
||||
SomeSuccess = SomeSuccess || success
|
||||
break found
|
||||
case BuildStatusSummaryFailed:
|
||||
return BuildStatusSummaryFailed
|
||||
default:
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
if j >= len(refProject.Result) {
|
||||
common.LogDebug("Cannot find results...")
|
||||
common.LogDebug(project.Result[i])
|
||||
common.LogDebug(refProject.Result)
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
}
|
||||
|
||||
if SomeSuccess {
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
return BuildStatusSummaryFailed
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func ProcessRepoBuildStatus(results, ref []*common.PackageBuildStatus) (status BuildStatusSummary, SomeSuccess bool) {
|
||||
PackageBuildStatusSorter := func(a, b *common.PackageBuildStatus) int {
|
||||
return strings.Compare(a.Package, b.Package)
|
||||
}
|
||||
func ProcessRepoBuildStatus(results []*common.PackageBuildStatus) (status BuildStatusSummary) {
|
||||
|
||||
PackageBuildStatusSorter := func(a, b *common.PackageBuildStatus) int {
|
||||
return strings.Compare(a.Package, b.Package)
|
||||
}
|
||||
|
||||
common.LogDebug("******** REF: ")
|
||||
data, _ := xml.MarshalIndent(ref, "", " ")
|
||||
common.LogDebug(string(data))
|
||||
common.LogDebug("******* RESULTS: ")
|
||||
data, _ = xml.MarshalIndent(results, "", " ")
|
||||
data, _ := xml.MarshalIndent(results, "", " ")
|
||||
common.LogDebug(string(data))
|
||||
common.LogDebug("*******")
|
||||
|
||||
// compare build result
|
||||
slices.SortFunc(results, PackageBuildStatusSorter)
|
||||
slices.SortFunc(ref, PackageBuildStatusSorter)
|
||||
|
||||
j := 0
|
||||
SomeSuccess = false
|
||||
for i := 0; i < len(results); i++ {
|
||||
res, ok := common.ObsBuildStatusDetails[results[i].Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown package result code:", results[i].Code, "for package:", results[i].Package)
|
||||
return BuildStatusSummaryUnknown, SomeSuccess
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
|
||||
if !res.Finished {
|
||||
return BuildStatusSummaryBuilding, SomeSuccess
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
if !res.Success {
|
||||
// not failed if reference project also failed for same package here
|
||||
for ; j < len(results) && strings.Compare(results[i].Package, ref[j].Package) < 0; j++ {
|
||||
}
|
||||
|
||||
if j < len(results) && results[i].Package == ref[j].Package {
|
||||
refRes, ok := common.ObsBuildStatusDetails[ref[j].Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown ref package result code:", ref[j].Code, "package:", ref[j].Package)
|
||||
return BuildStatusSummaryUnknown, SomeSuccess
|
||||
}
|
||||
|
||||
if !refRes.Finished {
|
||||
common.LogDebug("Not finished building in reference project?")
|
||||
}
|
||||
|
||||
if refRes.Success {
|
||||
return BuildStatusSummaryFailed, SomeSuccess
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SomeSuccess = true
|
||||
return BuildStatusSummaryFailed
|
||||
}
|
||||
}
|
||||
|
||||
return BuildStatusSummarySuccess, SomeSuccess
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func GetPackageBuildStatus(project *common.BuildResultList, packageName string) (bool, BuildStatusSummary) {
|
||||
var packageStatuses []*common.PackageBuildStatus
|
||||
|
||||
// Collect all statuses for the package
|
||||
for _, result := range project.Result {
|
||||
for _, pkgStatus := range result.Status {
|
||||
if pkgStatus.Package == packageName {
|
||||
packageStatuses = append(packageStatuses, pkgStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(packageStatuses) == 0 {
|
||||
return true, BuildStatusSummaryUnknown // true for 'missing'
|
||||
}
|
||||
|
||||
// Check for any failures
|
||||
for _, pkgStatus := range packageStatuses {
|
||||
res, ok := common.ObsBuildStatusDetails[pkgStatus.Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown package result code:", pkgStatus.Code, "for package:", pkgStatus.Package)
|
||||
return false, BuildStatusSummaryUnknown
|
||||
}
|
||||
if !res.Success {
|
||||
return false, BuildStatusSummaryFailed
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any unfinished builds
|
||||
for _, pkgStatus := range packageStatuses {
|
||||
res, _ := common.ObsBuildStatusDetails[pkgStatus.Code]
|
||||
// 'ok' is already checked in the loop above
|
||||
if !res.Finished {
|
||||
return false, BuildStatusSummaryBuilding
|
||||
}
|
||||
}
|
||||
|
||||
// If we got here, all are finished and successful
|
||||
return false, BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingPrj, buildPrj string, stagingMasterPrj string) (*common.ProjectMeta, error) {
|
||||
@@ -322,9 +272,9 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
common.LogError("error fetching project meta for", buildPrj, ". Err:", err)
|
||||
return nil, err
|
||||
}
|
||||
common.LogInfo("Meta: ", meta)
|
||||
|
||||
// generate new project with paths pointinig back to original repos
|
||||
// disable publishing
|
||||
|
||||
meta.Name = stagingPrj
|
||||
meta.Description = fmt.Sprintf(`Pull request build job PR#%d to branch %s of %s/%s`,
|
||||
@@ -339,7 +289,10 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
|
||||
urlPkg := make([]string, 0, len(modifiedOrNew))
|
||||
for _, pkg := range modifiedOrNew {
|
||||
urlPkg = append(urlPkg, "onlybuild="+url.QueryEscape(pkg))
|
||||
// FIXME: skip manifest subdirectories itself
|
||||
// strip any leading directory name and just hand over last directory as package name
|
||||
onlybuilds := strings.Split(pkg, "/")
|
||||
urlPkg = append(urlPkg, "onlybuild="+url.QueryEscape(onlybuilds[len(onlybuilds)-1]))
|
||||
}
|
||||
meta.ScmSync = pr.Head.Repo.CloneURL + "?" + strings.Join(urlPkg, "&") + "#" + pr.Head.Sha
|
||||
if len(meta.ScmSync) >= 65535 {
|
||||
@@ -377,28 +330,97 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
// stagingProject:$buildProject
|
||||
// ^- stagingProject:$buildProject:$subProjectName (based on templateProject)
|
||||
|
||||
func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingProject, templateProject, subProjectName string) error {
|
||||
func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingProject, templateProject, subProjectName string, buildDisableRepos []string) error {
|
||||
common.LogDebug("Setup QA sub projects")
|
||||
common.LogDebug("reading templateProject ", templateProject)
|
||||
templateMeta, err := ObsClient.GetProjectMeta(templateProject)
|
||||
if err != nil {
|
||||
common.LogError("error fetching template project meta for", templateProject, ":", err)
|
||||
return err
|
||||
}
|
||||
// patch baseMeta to become the new project
|
||||
common.LogDebug("upcoming project name ", stagingProject, ":", subProjectName)
|
||||
templateMeta.Name = stagingProject + ":" + subProjectName
|
||||
// freeze tag for now
|
||||
if len(templateMeta.ScmSync) > 0 {
|
||||
repository, err := url.Parse(templateMeta.ScmSync)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
common.LogDebug("getting data for ", repository.EscapedPath())
|
||||
split := strings.Split(repository.EscapedPath(), "/")
|
||||
org, repo := split[1], split[2]
|
||||
|
||||
common.LogDebug("getting commit for ", org, " repo ", repo, " fragment ", repository.Fragment)
|
||||
branch, err := gitea.GetCommit(org, repo, repository.Fragment)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// set expanded commit url
|
||||
repository.Fragment = branch.SHA
|
||||
templateMeta.ScmSync = repository.String()
|
||||
common.LogDebug("Setting scmsync url to ", templateMeta.ScmSync)
|
||||
}
|
||||
// Build-disable repositories if asked
|
||||
if len(buildDisableRepos) > 0 {
|
||||
toDisable := make([]DisableFlag, len(buildDisableRepos))
|
||||
for idx, repositoryName := range buildDisableRepos {
|
||||
toDisable[idx] = DisableFlag{Name: repositoryName}
|
||||
}
|
||||
|
||||
output, err := xml.Marshal(toDisable)
|
||||
if err != nil {
|
||||
common.LogError("error while marshalling, skipping BuildDisableRepos: ", err)
|
||||
} else {
|
||||
templateMeta.BuildFlags.Contents += string(output)
|
||||
}
|
||||
}
|
||||
|
||||
// include sources from submission project when link points to staging project
|
||||
for idx, l := range templateMeta.Link {
|
||||
if l.Project == stagingConfig.StagingProject {
|
||||
templateMeta.Link[idx].Project = stagingProject
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup ReleaseTarget and modify affected path entries
|
||||
for idx, r := range templateMeta.Repositories {
|
||||
templateMeta.Repositories[idx].ReleaseTargets = nil
|
||||
|
||||
for pidx, path := range r.Paths {
|
||||
// Check for path building against code stream
|
||||
common.LogDebug(" checking in ", templateMeta.Name)
|
||||
common.LogDebug(" stagingProject ", stagingProject)
|
||||
common.LogDebug(" checking for ", templateMeta.Repositories[idx].Paths[pidx].Project)
|
||||
common.LogDebug(" path.Project ", path.Project)
|
||||
common.LogDebug(" stagingConfig.ObsProject ", stagingConfig.ObsProject)
|
||||
common.LogDebug(" stagingConfig.StagingProject ", stagingConfig.StagingProject)
|
||||
common.LogDebug(" templateProject ", templateProject)
|
||||
if path.Project == stagingConfig.ObsProject {
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = stagingProject
|
||||
}
|
||||
} else
|
||||
// Check for path building against a repo in template project itself
|
||||
if path.Project == templateProject {
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = templateMeta.Name
|
||||
} else
|
||||
// Check for path prefixes against a template project inside of template project area
|
||||
if strings.HasPrefix(path.Project, stagingConfig.StagingProject + ":") {
|
||||
newProjectName := stagingProject
|
||||
// find project name
|
||||
for _, setup := range stagingConfig.QA {
|
||||
if setup.Origin == path.Project {
|
||||
common.LogDebug(" Match:", setup.Origin)
|
||||
newProjectName = newProjectName + ":" + setup.Name
|
||||
common.LogDebug(" New:", newProjectName)
|
||||
break
|
||||
}
|
||||
}
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = newProjectName
|
||||
common.LogDebug(" Matched prefix")
|
||||
}
|
||||
common.LogDebug(" Path using project ", templateMeta.Repositories[idx].Paths[pidx].Project)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -406,6 +428,8 @@ func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, git
|
||||
err = ObsClient.SetProjectMeta(templateMeta)
|
||||
if err != nil {
|
||||
common.LogError("cannot create project:", templateMeta.Name, err)
|
||||
x, _ := xml.MarshalIndent(templateMeta, "", " ")
|
||||
common.LogError(string(x))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -512,7 +536,7 @@ func FetchOurLatestActionableReview(gitea common.Gitea, org, repo string, id int
|
||||
}
|
||||
|
||||
func ParseNotificationToPR(thread *models.NotificationThread) (org string, repo string, num int64, err error) {
|
||||
rx := regexp.MustCompile(`^https://src\.(?:open)?suse\.(?:org|de)/api/v\d+/repos/(?<org>[-_a-zA-Z0-9]+)/(?<project>[-_a-zA-Z0-9]+)/issues/(?<num>[0-9]+)$`)
|
||||
rx := regexp.MustCompile(`^.*/api/v\d+/repos/(?<org>[-_a-zA-Z0-9]+)/(?<project>[-_a-zA-Z0-9]+)/issues/(?<num>[0-9]+)$`)
|
||||
notification := thread.Subject
|
||||
match := rx.FindStringSubmatch(notification.URL)
|
||||
if match == nil {
|
||||
@@ -661,6 +685,64 @@ func SetStatus(gitea common.Gitea, org, repo, hash string, status *models.Commit
|
||||
return err
|
||||
}
|
||||
|
||||
func commentOnPackagePR(gitea common.Gitea, org string, repo string, prNum int64, msg string) {
|
||||
if IsDryRun {
|
||||
common.LogInfo("Would comment on package PR %s/%s#%d: %s", org, repo, prNum, msg)
|
||||
return
|
||||
}
|
||||
|
||||
pr, err := gitea.GetPullRequest(org, repo, prNum)
|
||||
if err != nil {
|
||||
common.LogError("Failed to get package PR %s/%s#%d: %v", org, repo, prNum, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = gitea.AddComment(pr, msg)
|
||||
if err != nil {
|
||||
common.LogError("Failed to comment on package PR %s/%s#%d: %v", org, repo, prNum, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create and remove QA projects
|
||||
func ProcessQaProjects(stagingConfig *common.StagingConfig, git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingProject string) []string {
|
||||
usedQAprojects := make([]string, 0)
|
||||
prLabelNames := make(map[string]int)
|
||||
for _, label := range pr.Labels {
|
||||
prLabelNames[label.Name] = 1
|
||||
}
|
||||
msg := ""
|
||||
for _, setup := range stagingConfig.QA {
|
||||
QAproject := stagingProject + ":" + setup.Name
|
||||
if len(setup.Label) > 0 {
|
||||
if _, ok := prLabelNames[setup.Label]; !ok {
|
||||
if !IsDryRun {
|
||||
// blindly remove, will fail when not existing
|
||||
ObsClient.DeleteProject(QAproject)
|
||||
}
|
||||
common.LogInfo("QA project ", setup.Name, "has no matching Label")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
usedQAprojects = append(usedQAprojects, QAproject)
|
||||
// check for existens first, no error, but no meta is a 404
|
||||
if meta, err := ObsClient.GetProjectMeta(QAproject); meta == nil && err == nil {
|
||||
common.LogInfo("Create QA project ", QAproject)
|
||||
CreateQASubProject(stagingConfig, git, gitea, pr,
|
||||
stagingProject,
|
||||
setup.Origin,
|
||||
setup.Name,
|
||||
setup.BuildDisableRepos)
|
||||
msg = msg + "QA Project added: " + ObsWebHost + "/project/show/" +
|
||||
QAproject + "\n"
|
||||
}
|
||||
}
|
||||
if len(msg) > 1 {
|
||||
gitea.AddComment(pr, msg)
|
||||
}
|
||||
return usedQAprojects
|
||||
}
|
||||
|
||||
func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, error) {
|
||||
dir, err := os.MkdirTemp(os.TempDir(), BotName)
|
||||
common.PanicOnError(err)
|
||||
@@ -738,11 +820,12 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
meta, err := ObsClient.GetProjectMeta(stagingConfig.ObsProject)
|
||||
if err != nil || meta == nil {
|
||||
common.LogError("Cannot find reference project meta:", stagingConfig.ObsProject, err)
|
||||
if !IsDryRun {
|
||||
if !IsDryRun && err == nil {
|
||||
common.LogError("Reference project is absent:", stagingConfig.ObsProject, err)
|
||||
_, err := gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, "Cannot fetch reference project meta")
|
||||
return true, err
|
||||
}
|
||||
return true, nil
|
||||
return true, err
|
||||
}
|
||||
|
||||
if metaUrl, err := url.Parse(meta.ScmSync); err != nil {
|
||||
@@ -774,6 +857,7 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
l := len(stagingConfig.ObsProject)
|
||||
if l >= len(stagingConfig.StagingProject) || stagingConfig.ObsProject != stagingConfig.StagingProject[0:l] {
|
||||
common.LogError("StagingProject (", stagingConfig.StagingProject, ") is not child of target project", stagingConfig.ObsProject)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -892,35 +976,49 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
msg = "Build is started in " + ObsWebHost + "/project/show/" +
|
||||
stagingProject + " .\n"
|
||||
|
||||
if len(stagingConfig.QA) > 0 {
|
||||
msg = msg + "\nAdditional QA builds: \n"
|
||||
}
|
||||
SetStatus(gitea, org, repo, pr.Head.Sha, status)
|
||||
for _, setup := range stagingConfig.QA {
|
||||
CreateQASubProject(stagingConfig, git, gitea, pr,
|
||||
stagingProject,
|
||||
setup.Origin,
|
||||
setup.Name)
|
||||
msg = msg + ObsWebHost + "/project/show/" +
|
||||
stagingProject + ":" + setup.Name + "\n"
|
||||
}
|
||||
}
|
||||
if change != RequestModificationNoChange && !IsDryRun {
|
||||
gitea.AddComment(pr, msg)
|
||||
}
|
||||
|
||||
baseResult, err := ObsClient.LastBuildResults(stagingConfig.ObsProject, modifiedPackages...)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching ref project status for", stagingConfig.ObsProject, ":", err)
|
||||
}
|
||||
stagingResult, err := ObsClient.BuildStatus(stagingProject)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching stage project status for", stagingProject, ":", err)
|
||||
}
|
||||
buildStatus := ProcessBuildStatus(stagingResult, baseResult)
|
||||
|
||||
_, packagePRs := common.ExtractDescriptionAndPRs(bufio.NewScanner(strings.NewReader(pr.Body)))
|
||||
|
||||
// always update QA projects because Labels can change
|
||||
qaProjects := ProcessQaProjects(stagingConfig, git, gitea, pr, stagingProject)
|
||||
|
||||
done := false
|
||||
switch buildStatus {
|
||||
overallBuildStatus := ProcessBuildStatus(stagingResult)
|
||||
commentSuffix := ""
|
||||
if len(qaProjects) > 0 && overallBuildStatus == BuildStatusSummarySuccess {
|
||||
seperator := " in "
|
||||
for _, qaProject := range qaProjects {
|
||||
qaResult, err := ObsClient.BuildStatus(qaProject)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching stage project status for", qaProject, ":", err)
|
||||
}
|
||||
qaBuildStatus := ProcessBuildStatus(qaResult)
|
||||
if qaBuildStatus != BuildStatusSummarySuccess {
|
||||
// either still building or in failed state
|
||||
overallBuildStatus = qaBuildStatus
|
||||
commentSuffix = commentSuffix + seperator + qaProject
|
||||
seperator = ", "
|
||||
}
|
||||
if qaBuildStatus == BuildStatusSummaryFailed {
|
||||
// main project was successful, but QA project, adapt the link to QA project
|
||||
// and change commit state to fail
|
||||
status.Status = common.CommitStatus_Fail
|
||||
status.TargetURL = ObsWebHost + "/project/show/" + qaProject
|
||||
SetStatus(gitea, org, repo, pr.Head.Sha, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
switch overallBuildStatus {
|
||||
case BuildStatusSummarySuccess:
|
||||
status.Status = common.CommitStatus_Success
|
||||
done = true
|
||||
@@ -940,7 +1038,44 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
}
|
||||
}
|
||||
}
|
||||
common.LogInfo("Build status:", buildStatus)
|
||||
|
||||
if overallBuildStatus == BuildStatusSummarySuccess || overallBuildStatus == BuildStatusSummaryFailed {
|
||||
// avoid commenting while build is in progress
|
||||
missingPkgs := []string{}
|
||||
|
||||
for _, packagePR := range packagePRs {
|
||||
missing, packageBuildStatus := GetPackageBuildStatus(stagingResult, packagePR.Repo)
|
||||
if missing {
|
||||
missingPkgs = append(missingPkgs, packagePR.Repo)
|
||||
continue
|
||||
}
|
||||
var msg string
|
||||
switch packageBuildStatus {
|
||||
case BuildStatusSummarySuccess:
|
||||
msg = fmt.Sprintf("Build successful, for more information go in %s/project/show/%s.\n", ObsWebHost, stagingProject)
|
||||
case BuildStatusSummaryFailed:
|
||||
msg = fmt.Sprintf("Build failed, for more information go in %s/project/show/%s.\n", ObsWebHost, stagingProject)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
commentOnPackagePR(gitea, packagePR.Org, packagePR.Repo, packagePR.Num, msg)
|
||||
}
|
||||
|
||||
if len(missingPkgs) > 0 {
|
||||
overallBuildStatus = BuildStatusSummaryFailed
|
||||
msg := "The following packages were not found in the staging project:\n"
|
||||
for _, pkg := range missingPkgs {
|
||||
msg = msg + " - " + pkg + "\n"
|
||||
}
|
||||
common.LogInfo(msg)
|
||||
err := gitea.AddComment(pr, msg)
|
||||
if err != nil {
|
||||
common.LogError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
common.LogInfo("Build status:", overallBuildStatus)
|
||||
if !IsDryRun {
|
||||
if err = SetStatus(gitea, org, repo, pr.Head.Sha, status); err != nil {
|
||||
return false, err
|
||||
@@ -1005,6 +1140,7 @@ func PollWorkNotifications(giteaUrl string) {
|
||||
|
||||
var ListPullNotificationsOnly bool
|
||||
var GiteaUrl string
|
||||
var ObsApiHost string
|
||||
var ObsWebHost string
|
||||
var IsDryRun bool
|
||||
var ProcessPROnly string
|
||||
@@ -1027,8 +1163,8 @@ func main() {
|
||||
flag.BoolVar(&ListPullNotificationsOnly, "list-notifications-only", false, "Only lists notifications without acting on them")
|
||||
ProcessPROnly := flag.String("pr", "", "Process only specific PR and ignore the rest. Use for debugging")
|
||||
buildRoot := flag.String("build-root", "", "Default build location for staging projects. Default is bot's home project")
|
||||
flag.StringVar(&GiteaUrl, "gitea-url", "https://src.opensuse.org", "Gitea instance")
|
||||
obsApiHost := flag.String("obs", "https://api.opensuse.org", "API for OBS instance")
|
||||
flag.StringVar(&GiteaUrl, "gitea-url", "", "Gitea instance")
|
||||
flag.StringVar(&ObsApiHost, "obs", "", "API for OBS instance")
|
||||
flag.StringVar(&ObsWebHost, "obs-web", "", "Web OBS instance, if not derived from the obs config")
|
||||
flag.BoolVar(&IsDryRun, "dry", false, "Dry-run, don't actually create any build projects or review changes")
|
||||
debug := flag.Bool("debug", false, "Turns on debug logging")
|
||||
@@ -1040,18 +1176,34 @@ func main() {
|
||||
common.SetLoggingLevel(common.LogLevelInfo)
|
||||
}
|
||||
|
||||
if len(GiteaUrl) == 0 {
|
||||
GiteaUrl = os.Getenv(common.GiteaHostEnv)
|
||||
}
|
||||
if len(GiteaUrl) == 0 {
|
||||
GiteaUrl = "https://src.opensuse.org"
|
||||
}
|
||||
if len(ObsApiHost) == 0 {
|
||||
ObsApiHost = os.Getenv(common.ObsApiEnv)
|
||||
}
|
||||
if len(ObsApiHost) == 0 {
|
||||
ObsApiHost = "https://api.opensuse.org"
|
||||
}
|
||||
if len(ObsWebHost) == 0 {
|
||||
ObsWebHost = ObsWebHostFromApiHost(*obsApiHost)
|
||||
ObsWebHost = os.Getenv(common.ObsWebEnv)
|
||||
}
|
||||
if len(ObsWebHost) == 0 {
|
||||
ObsWebHost = "https://build.opensuse.org"
|
||||
}
|
||||
|
||||
common.LogDebug("OBS Gitea Host:", GiteaUrl)
|
||||
common.LogDebug("OBS Web Host:", ObsWebHost)
|
||||
common.LogDebug("OBS API Host:", *obsApiHost)
|
||||
common.LogDebug("OBS API Host:", ObsApiHost)
|
||||
|
||||
common.PanicOnErrorWithMsg(common.RequireGiteaSecretToken(), "Cannot find GITEA_TOKEN")
|
||||
common.PanicOnErrorWithMsg(common.RequireObsSecretToken(), "Cannot find OBS_USER and OBS_PASSWORD")
|
||||
|
||||
var err error
|
||||
if ObsClient, err = common.NewObsClient(*obsApiHost); err != nil {
|
||||
if ObsClient, err = common.NewObsClient(ObsApiHost); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,7 +1,10 @@
|
||||
OBS Status Service
|
||||
==================
|
||||
|
||||
Reports build status of OBS service as an easily to produce SVG
|
||||
Reports build status of OBS service as an easily to produce SVG. Repository
|
||||
results (build results) are cached for 10 seconds and repository listing
|
||||
for OBS instance are cached for 5 minutes -- new repositories take up to
|
||||
5 minutes to be visible.
|
||||
|
||||
Requests for individual build results:
|
||||
|
||||
@@ -17,19 +20,31 @@ Get requests for / will also return 404 statu normally. If the Backend redis
|
||||
server is not available, it will return 500
|
||||
|
||||
|
||||
By default, SVG output is generated, suitable for inclusion. But JSON and XML
|
||||
output is possible by setting `Accept:` request header
|
||||
|
||||
| Accept Request Header | Output format
|
||||
|------------------------|---------------------
|
||||
| | SVG image
|
||||
| application/json | JSON data
|
||||
| application/obs+xml | XML output
|
||||
|
||||
|
||||
Areas of Responsibility
|
||||
-----------------------
|
||||
|
||||
* Monitors RabbitMQ interface for notification of OBS package and project status
|
||||
* Produces SVG output based on GET request
|
||||
* Cache results (sqlite) and periodically update results from OBS (in case of messages are missing)
|
||||
* Fetch and cache internal data from OBS and present it in usable format:
|
||||
+ Generate SVG output for specific OBS project or package
|
||||
+ Generate JSON/XML output for automated processing
|
||||
* Low-overhead
|
||||
|
||||
|
||||
Target Usage
|
||||
------------
|
||||
|
||||
* README.md of package git or project git
|
||||
* inside README.md of package git or project git
|
||||
* comment section of a Gitea PR
|
||||
* automated build result processing
|
||||
|
||||
Running
|
||||
-------
|
||||
@@ -42,3 +57,4 @@ Default parameters can be changed by env variables
|
||||
| `OBS_STATUS_SERVICE_LISTEN` | [::1]:8080 | Listening address and port
|
||||
| `OBS_STATUS_SERVICE_CERT` | /run/obs-status-service.pem | Location of certificate file for service
|
||||
| `OBS_STATUS_SERVICE_KEY` | /run/obs-status-service.pem | Location of key file for service
|
||||
| `REDIS` | | OBS's Redis instance URL
|
||||
|
||||
BIN
obs-status-service/factory.results.json.bz2
LFS
Normal file
BIN
obs-status-service/factory.results.json.bz2
LFS
Normal file
Binary file not shown.
BIN
obs-status-service/gcc15.results.json.bz2
LFS
Normal file
BIN
obs-status-service/gcc15.results.json.bz2
LFS
Normal file
Binary file not shown.
@@ -20,6 +20,7 @@ package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html"
|
||||
@@ -104,10 +105,10 @@ func ProjectStatusSummarySvg(res []*common.BuildResult) []byte {
|
||||
|
||||
func LinkToBuildlog(R *common.BuildResult, S *common.PackageBuildStatus) string {
|
||||
if R != nil && S != nil {
|
||||
switch S.Code {
|
||||
case "succeeded", "failed", "building":
|
||||
//switch S.Code {
|
||||
//case "succeeded", "failed", "building":
|
||||
return "/buildlog/" + url.PathEscape(R.Project) + "/" + url.PathEscape(S.Package) + "/" + url.PathEscape(R.Repository) + "/" + url.PathEscape(R.Arch)
|
||||
}
|
||||
//}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -169,6 +170,7 @@ func BuildStatusSvg(repo *common.BuildResult, status *common.PackageBuildStatus)
|
||||
buildStatus, ok := common.ObsBuildStatusDetails[status.Code]
|
||||
if !ok {
|
||||
buildStatus = common.ObsBuildStatusDetails["error"]
|
||||
common.LogError("Cannot find detail for status.Code", status.Code)
|
||||
}
|
||||
fillColor := "#480" // orange
|
||||
textColor := "#888"
|
||||
@@ -205,6 +207,17 @@ func WriteJson(data any, res http.ResponseWriter) {
|
||||
}
|
||||
}
|
||||
|
||||
func WriteXml(data any, res http.ResponseWriter) {
|
||||
if xmlData, err := xml.MarshalIndent(data, "", " "); err != nil {
|
||||
res.WriteHeader(500)
|
||||
} else {
|
||||
res.Header().Add("size", fmt.Sprint(len(xmlData)))
|
||||
res.Write([]byte("<resultlist>"))
|
||||
res.Write(xmlData)
|
||||
res.Write([]byte("</resultlist>"))
|
||||
}
|
||||
}
|
||||
|
||||
var ObsUrl *string
|
||||
|
||||
func main() {
|
||||
@@ -265,7 +278,7 @@ func main() {
|
||||
http.HandleFunc("GET /status/{Project}", func(res http.ResponseWriter, req *http.Request) {
|
||||
mime := ParseMimeHeader(req)
|
||||
obsPrj := req.PathValue("Project")
|
||||
common.LogInfo(" GET /status/" + obsPrj, "[" + mime.MimeType() + "]")
|
||||
common.LogInfo(" GET /status/"+obsPrj, "["+mime.MimeType()+"]")
|
||||
|
||||
status := FindAndUpdateProjectResults(obsPrj)
|
||||
if len(status) == 0 {
|
||||
@@ -279,17 +292,21 @@ func main() {
|
||||
res.Write(svg)
|
||||
} else if mime.IsJson() {
|
||||
WriteJson(status, res)
|
||||
} else if mime.IsXml() {
|
||||
WriteXml(status, res)
|
||||
}
|
||||
})
|
||||
http.HandleFunc("GET /status/{Project}/{Package}", func(res http.ResponseWriter, req *http.Request) {
|
||||
mime := ParseMimeHeader(req)
|
||||
obsPrj := req.PathValue("Project")
|
||||
obsPkg := req.PathValue("Package")
|
||||
common.LogInfo(" GET /status/" + obsPrj + "/" + obsPkg, "[" + mime.MimeType() + "]")
|
||||
common.LogInfo(" GET /status/"+obsPrj+"/"+obsPkg, "["+mime.MimeType()+"]")
|
||||
|
||||
status := slices.Clone(FindAndUpdateProjectResults(obsPrj))
|
||||
for _, s := range status {
|
||||
s.Status = slices.DeleteFunc(slices.Clone(s.Status), DeleteExceptPkg(obsPkg))
|
||||
for i, s := range status {
|
||||
f := *s
|
||||
f.Status = slices.DeleteFunc(slices.Clone(s.Status), DeleteExceptPkg(obsPkg))
|
||||
status[i] = &f
|
||||
}
|
||||
if len(status) == 0 {
|
||||
res.WriteHeader(404)
|
||||
@@ -304,6 +321,8 @@ func main() {
|
||||
res.Write(svg)
|
||||
} else if mime.IsJson() {
|
||||
WriteJson(status, res)
|
||||
} else if mime.IsXml() {
|
||||
WriteXml(status, res)
|
||||
}
|
||||
|
||||
})
|
||||
@@ -312,11 +331,13 @@ func main() {
|
||||
obsPrj := req.PathValue("Project")
|
||||
obsPkg := req.PathValue("Package")
|
||||
repo := req.PathValue("Repository")
|
||||
common.LogInfo(" GET /status/" + obsPrj + "/" + obsPkg, "[" + mime.MimeType() + "]")
|
||||
common.LogInfo(" GET /status/"+obsPrj+"/"+obsPkg, "["+mime.MimeType()+"]")
|
||||
|
||||
status := slices.Clone(FindAndUpdateRepoResults(obsPrj, repo))
|
||||
for _, s := range status {
|
||||
s.Status = slices.DeleteFunc(slices.Clone(s.Status), DeleteExceptPkg(obsPkg))
|
||||
for i, s := range status {
|
||||
f := *s
|
||||
f.Status = slices.DeleteFunc(slices.Clone(s.Status), DeleteExceptPkg(obsPkg))
|
||||
status[i] = &f
|
||||
}
|
||||
if len(status) == 0 {
|
||||
res.WriteHeader(404)
|
||||
@@ -330,6 +351,8 @@ func main() {
|
||||
res.Write(svg)
|
||||
} else if mime.IsJson() {
|
||||
WriteJson(status, res)
|
||||
} else if mime.IsXml() {
|
||||
WriteXml(status, res)
|
||||
}
|
||||
})
|
||||
http.HandleFunc("GET /status/{Project}/{Package}/{Repository}/{Arch}", func(res http.ResponseWriter, req *http.Request) {
|
||||
@@ -338,7 +361,7 @@ func main() {
|
||||
pkg := req.PathValue("Package")
|
||||
repo := req.PathValue("Repository")
|
||||
arch := req.PathValue("Arch")
|
||||
common.LogInfo(" GET /status/" + prj + "/" + pkg + "/" + repo + "/" + arch, "[" + mime.MimeType() + "]")
|
||||
common.LogInfo(" GET /status/"+prj+"/"+pkg+"/"+repo+"/"+arch, "["+mime.MimeType()+"]")
|
||||
|
||||
res.Header().Add("content-type", mime.MimeHeader)
|
||||
for _, r := range FindAndUpdateRepoResults(prj, repo) {
|
||||
@@ -349,6 +372,8 @@ func main() {
|
||||
res.Write(BuildStatusSvg(r, status))
|
||||
} else if mime.IsJson() {
|
||||
WriteJson(status, res)
|
||||
} else if mime.IsXml() {
|
||||
WriteXml(status, res)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"compress/bzip2"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -82,3 +85,36 @@ func TestStatusSvg(t *testing.T) {
|
||||
os.WriteFile("testpackage.svg", PackageStatusSummarySvg("pkg2", data), 0o777)
|
||||
os.WriteFile("testproject.svg", ProjectStatusSummarySvg(data), 0o777)
|
||||
}
|
||||
|
||||
func TestFactoryResults(t *testing.T) {
|
||||
data, err := os.Open("factory.results.json.bz2")
|
||||
if err != nil {
|
||||
t.Fatal("Openning factory.results.json.bz2 failed:", err)
|
||||
}
|
||||
UncompressedData, err := io.ReadAll(bzip2.NewReader(data))
|
||||
if err != nil {
|
||||
t.Fatal("Reading factory.results.json.bz2 failed:", err)
|
||||
}
|
||||
|
||||
var results []*common.BuildResult
|
||||
if err := json.Unmarshal(UncompressedData, &results); err != nil {
|
||||
t.Fatal("Failed parsing test data", err)
|
||||
}
|
||||
|
||||
// add tests here
|
||||
tests := []struct {
|
||||
name string
|
||||
}{
|
||||
// add test data here
|
||||
{
|
||||
name: "First test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
// and test code here
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,12 +11,14 @@ type MimeHeader struct {
|
||||
|
||||
const (
|
||||
JsonMime = "application/json"
|
||||
XmlMime = "application/obs+xml"
|
||||
SvgMime = "image/svg+xml"
|
||||
)
|
||||
|
||||
var AcceptedStatusMimes []string = []string{
|
||||
SvgMime,
|
||||
JsonMime,
|
||||
XmlMime,
|
||||
}
|
||||
|
||||
func ParseMimeHeader(req *http.Request) *MimeHeader {
|
||||
@@ -41,6 +43,10 @@ func (m *MimeHeader) IsJson() bool {
|
||||
return m.MimeHeader == JsonMime
|
||||
}
|
||||
|
||||
func (m *MimeHeader) IsXml() bool {
|
||||
return m.MimeHeader == XmlMime
|
||||
}
|
||||
|
||||
func (m *MimeHeader) IsSvg() bool {
|
||||
return m.MimeHeader == SvgMime
|
||||
}
|
||||
@@ -48,6 +54,8 @@ func (m *MimeHeader) IsSvg() bool {
|
||||
func (m *MimeHeader) MimeType() string {
|
||||
if m.IsJson() {
|
||||
return JsonMime
|
||||
} else if m.IsXml() {
|
||||
return XmlMime
|
||||
}
|
||||
|
||||
return SvgMime // default
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"html"
|
||||
"net/url"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type SvgWriter struct {
|
||||
@@ -133,7 +134,7 @@ func (svg *SvgWriter) WritePackageStatus(loglink, arch, status, detail string) {
|
||||
}
|
||||
|
||||
func (svg *SvgWriter) WriteProjectStatus(project, repo, arch, status string, count int) {
|
||||
u, err := url.Parse(*ObsUrl + "/project/monitor/" + url.PathEscape(project) + "?defaults=0&" + url.QueryEscape(status) + "=1&arch_" + url.QueryEscape(arch) + "=1&repo_" + url.QueryEscape(repo) + "=1")
|
||||
u, err := url.Parse(*ObsUrl + "/project/monitor/" + url.PathEscape(project) + "?defaults=0&" + url.QueryEscape(status) + "=1&arch_" + url.QueryEscape(arch) + "=1&repo_" + url.QueryEscape(strings.ReplaceAll(repo, ".", "_")) + "=1")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
15
systemd/group-review@.service
Normal file
15
systemd/group-review@.service
Normal file
@@ -0,0 +1,15 @@
|
||||
[Unit]
|
||||
Description=Group Review bot for %i
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
ExecStart=/usr/bin/group-review %i
|
||||
EnvironmentFile=-/etc/default/group-review/%i.env
|
||||
DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
23
systemd/workflow-direct@.service
Normal file
23
systemd/workflow-direct@.service
Normal file
@@ -0,0 +1,23 @@
|
||||
[Unit]
|
||||
Description=WorkflowDirect git bot for %i
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
ExecStart=/usr/bin/workflow-direct
|
||||
EnvironmentFile=-/etc/default/%i/workflow-direct.env
|
||||
#DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
# DynamicUser does not work as we cannot seem to be able to put SSH keyfiles into the temp home that are readable by SSH
|
||||
# Also, systemd override is needed away to assign User to run this. This should be dependent per instance.
|
||||
ProtectHome=no
|
||||
PrivateTmp=yes
|
||||
# RuntimeDirectory=%i
|
||||
# SLES 15 doesn't have HOME set for dynamic users, so we improvise
|
||||
# BindReadOnlyPaths=/etc/default/%i/known_hosts:/etc/ssh/ssh_known_hosts /etc/default/%i/config.json:%t/%i/config.json /etc/default/%i/id_ed25519 /etc/default/%i/id_ed25519.pub
|
||||
# WorkingDirectory=%t/%i
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
23
systemd/workflow-pr@.service
Normal file
23
systemd/workflow-pr@.service
Normal file
@@ -0,0 +1,23 @@
|
||||
[Unit]
|
||||
Description=WorkflowPR git bot for %i
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
ExecStart=/usr/bin/workflow-pr
|
||||
EnvironmentFile=-/etc/default/%i/workflow-pr.env
|
||||
#DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
# DynamicUser does not work as we cannot seem to be able to put SSH keyfiles into the temp home that are readable by SSH
|
||||
# Also, systemd override is needed away to assign User to run this. This should be dependent per instance.
|
||||
ProtectHome=no
|
||||
PrivateTmp=yes
|
||||
# RuntimeDirectory=%i
|
||||
# SLES 15 doesn't have HOME set for dynamic users, so we improvise
|
||||
# BindReadOnlyPaths=/etc/default/%i/known_hosts:/etc/ssh/ssh_known_hosts /etc/default/%i/config.json:%t/%i/config.json /etc/default/%i/id_ed25519 /etc/default/%i/id_ed25519.pub
|
||||
# WorkingDirectory=%t/%i
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
98
utils/maintainer-update/main.go
Normal file
98
utils/maintainer-update/main.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
func WriteNewMaintainershipFile(m *common.MaintainershipMap, filename string) {
|
||||
f, err := os.Create(filename + ".new")
|
||||
common.PanicOnError(err)
|
||||
common.PanicOnError(m.WriteMaintainershipFile(f))
|
||||
common.PanicOnError(f.Sync())
|
||||
common.PanicOnError(f.Close())
|
||||
common.PanicOnError(os.Rename(filename+".new", filename))
|
||||
}
|
||||
|
||||
func run() error {
|
||||
pkg := flag.String("package", "", "Package to modify")
|
||||
rm := flag.Bool("rm", false, "Remove maintainer from package")
|
||||
add := flag.Bool("add", false, "Add maintainer to package")
|
||||
lint := flag.Bool("lint-only", false, "Reformat entire _maintainership.json only")
|
||||
flag.Parse()
|
||||
|
||||
if (*add == *rm) && !*lint {
|
||||
return fmt.Errorf("Need to either add or remove a maintainer, or lint")
|
||||
}
|
||||
|
||||
filename := common.MaintainershipFile
|
||||
if *lint {
|
||||
if len(flag.Args()) > 0 {
|
||||
filename = flag.Arg(0)
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m, err := common.ParseMaintainershipData(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse JSON: %w", err)
|
||||
}
|
||||
|
||||
if *lint {
|
||||
m.Raw = nil // forces a rewrite
|
||||
} else {
|
||||
users := flag.Args()
|
||||
if len(users) > 0 {
|
||||
maintainers, ok := m.Data[*pkg]
|
||||
if !ok && !*add {
|
||||
return fmt.Errorf("No package %s and not adding one.", *pkg)
|
||||
}
|
||||
|
||||
if *add {
|
||||
for _, u := range users {
|
||||
if !slices.Contains(maintainers, u) {
|
||||
maintainers = append(maintainers, u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *rm {
|
||||
newMaintainers := make([]string, 0, len(maintainers))
|
||||
for _, m := range maintainers {
|
||||
if !slices.Contains(users, m) {
|
||||
newMaintainers = append(newMaintainers, m)
|
||||
}
|
||||
}
|
||||
maintainers = newMaintainers
|
||||
}
|
||||
|
||||
if len(maintainers) > 0 {
|
||||
slices.Sort(maintainers)
|
||||
m.Data[*pkg] = maintainers
|
||||
} else {
|
||||
delete(m.Data, *pkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WriteNewMaintainershipFile(m, filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
common.LogError(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
242
utils/maintainer-update/main_test.go
Normal file
242
utils/maintainer-update/main_test.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"os"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if os.Getenv("BE_MAIN") == "1" {
|
||||
main()
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inData string
|
||||
expectedOut string
|
||||
params []string
|
||||
expectedError string
|
||||
isDir bool
|
||||
}{
|
||||
{
|
||||
name: "add user to existing package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "add user to new package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg2", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"], "pkg2": ["user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "no-op with no users",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "add existing user",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "remove user from package",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "remove last user from package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user1"},
|
||||
expectedOut: `{}`,
|
||||
},
|
||||
{
|
||||
name: "remove non-existent user",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint only unsorted",
|
||||
inData: `{"pkg1": ["user2", "user1"]}`,
|
||||
params: []string{"-lint-only"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint only no changes",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-lint-only"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "no file",
|
||||
params: []string{"-add"},
|
||||
expectedError: "no such file or directory",
|
||||
},
|
||||
{
|
||||
name: "invalid json",
|
||||
inData: `{"pkg1": ["user1"`,
|
||||
params: []string{"-add"},
|
||||
expectedError: "Failed to parse JSON",
|
||||
},
|
||||
{
|
||||
name: "add",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user3"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2", "user3"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint specific file",
|
||||
inData: `{"pkg1": ["user2", "user1"]}`,
|
||||
params: []string{"-lint-only", "other.json"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "add user to package when it was not there before",
|
||||
inData: `{}`,
|
||||
params: []string{"-package", "newpkg", "-add", "user1"},
|
||||
expectedOut: `{"newpkg": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "unreadable file (is a directory)",
|
||||
isDir: true,
|
||||
params: []string{"-rm"},
|
||||
expectedError: "is a directory",
|
||||
},
|
||||
{
|
||||
name: "remove user from non-existent package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg2", "-rm", "user2"},
|
||||
expectedError: "No package pkg2 and not adding one.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
oldWd, _ := os.Getwd()
|
||||
_ = os.Chdir(dir)
|
||||
defer os.Chdir(oldWd)
|
||||
|
||||
targetFile := common.MaintainershipFile
|
||||
if tt.name == "lint specific file" {
|
||||
targetFile = "other.json"
|
||||
}
|
||||
|
||||
if tt.isDir {
|
||||
_ = os.Mkdir(targetFile, 0755)
|
||||
} else if tt.inData != "" {
|
||||
_ = os.WriteFile(targetFile, []byte(tt.inData), 0644)
|
||||
}
|
||||
|
||||
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
|
||||
|
||||
os.Args = append([]string{"cmd"}, tt.params...)
|
||||
err := run()
|
||||
|
||||
if tt.expectedError != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error containing %q, but got none", tt.expectedError)
|
||||
}
|
||||
if !strings.Contains(err.Error(), tt.expectedError) {
|
||||
t.Fatalf("expected error containing %q, got %q", tt.expectedError, err.Error())
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if tt.expectedOut != "" {
|
||||
data, _ := os.ReadFile(targetFile)
|
||||
var got, expected map[string][]string
|
||||
_ = json.Unmarshal(data, &got)
|
||||
_ = json.Unmarshal([]byte(tt.expectedOut), &expected)
|
||||
|
||||
if len(got) == 0 && len(expected) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMainRecursive(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inData string
|
||||
expectedOut string
|
||||
params []string
|
||||
expectExit bool
|
||||
}{
|
||||
{
|
||||
name: "test main() via recursive call",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "test main() failure",
|
||||
params: []string{"-package", "pkg1"},
|
||||
expectExit: true,
|
||||
},
|
||||
}
|
||||
|
||||
exe, _ := os.Executable()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
oldWd, _ := os.Getwd()
|
||||
_ = os.Chdir(dir)
|
||||
defer os.Chdir(oldWd)
|
||||
|
||||
if tt.inData != "" {
|
||||
_ = os.WriteFile(common.MaintainershipFile, []byte(tt.inData), 0644)
|
||||
}
|
||||
|
||||
cmd := exec.Command(exe, append([]string{"-test.run=None"}, tt.params...)...)
|
||||
cmd.Env = append(os.Environ(), "BE_MAIN=1")
|
||||
out, runErr := cmd.CombinedOutput()
|
||||
|
||||
if tt.expectExit {
|
||||
if runErr == nil {
|
||||
t.Fatalf("expected exit with error, but it succeeded")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if runErr != nil {
|
||||
t.Fatalf("unexpected error: %v: %s", runErr, string(out))
|
||||
}
|
||||
|
||||
if tt.expectedOut != "" {
|
||||
data, _ := os.ReadFile(common.MaintainershipFile)
|
||||
var got, expected map[string][]string
|
||||
_ = json.Unmarshal(data, &got)
|
||||
_ = json.Unmarshal([]byte(tt.expectedOut), &expected)
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
12
vendor/go.uber.org/mock/AUTHORS
generated
vendored
Normal file
12
vendor/go.uber.org/mock/AUTHORS
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
# This is the official list of GoMock authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Alex Reece <awreece@gmail.com>
|
||||
Google Inc.
|
||||
202
vendor/go.uber.org/mock/LICENSE
generated
vendored
Normal file
202
vendor/go.uber.org/mock/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
506
vendor/go.uber.org/mock/gomock/call.go
generated
vendored
Normal file
506
vendor/go.uber.org/mock/gomock/call.go
generated
vendored
Normal file
@@ -0,0 +1,506 @@
|
||||
// Copyright 2010 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gomock
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Call represents an expected call to a mock.
|
||||
type Call struct {
|
||||
t TestHelper // for triggering test failures on invalid call setup
|
||||
|
||||
receiver any // the receiver of the method call
|
||||
method string // the name of the method
|
||||
methodType reflect.Type // the type of the method
|
||||
args []Matcher // the args
|
||||
origin string // file and line number of call setup
|
||||
|
||||
preReqs []*Call // prerequisite calls
|
||||
|
||||
// Expectations
|
||||
minCalls, maxCalls int
|
||||
|
||||
numCalls int // actual number made
|
||||
|
||||
// actions are called when this Call is called. Each action gets the args and
|
||||
// can set the return values by returning a non-nil slice. Actions run in the
|
||||
// order they are created.
|
||||
actions []func([]any) []any
|
||||
}
|
||||
|
||||
// newCall creates a *Call. It requires the method type in order to support
|
||||
// unexported methods.
|
||||
func newCall(t TestHelper, receiver any, method string, methodType reflect.Type, args ...any) *Call {
|
||||
t.Helper()
|
||||
|
||||
// TODO: check arity, types.
|
||||
mArgs := make([]Matcher, len(args))
|
||||
for i, arg := range args {
|
||||
if m, ok := arg.(Matcher); ok {
|
||||
mArgs[i] = m
|
||||
} else if arg == nil {
|
||||
// Handle nil specially so that passing a nil interface value
|
||||
// will match the typed nils of concrete args.
|
||||
mArgs[i] = Nil()
|
||||
} else {
|
||||
mArgs[i] = Eq(arg)
|
||||
}
|
||||
}
|
||||
|
||||
// callerInfo's skip should be updated if the number of calls between the user's test
|
||||
// and this line changes, i.e. this code is wrapped in another anonymous function.
|
||||
// 0 is us, 1 is RecordCallWithMethodType(), 2 is the generated recorder, and 3 is the user's test.
|
||||
origin := callerInfo(3)
|
||||
actions := []func([]any) []any{func([]any) []any {
|
||||
// Synthesize the zero value for each of the return args' types.
|
||||
rets := make([]any, methodType.NumOut())
|
||||
for i := 0; i < methodType.NumOut(); i++ {
|
||||
rets[i] = reflect.Zero(methodType.Out(i)).Interface()
|
||||
}
|
||||
return rets
|
||||
}}
|
||||
return &Call{
|
||||
t: t, receiver: receiver, method: method, methodType: methodType,
|
||||
args: mArgs, origin: origin, minCalls: 1, maxCalls: 1, actions: actions,
|
||||
}
|
||||
}
|
||||
|
||||
// AnyTimes allows the expectation to be called 0 or more times
|
||||
func (c *Call) AnyTimes() *Call {
|
||||
c.minCalls, c.maxCalls = 0, 1e8 // close enough to infinity
|
||||
return c
|
||||
}
|
||||
|
||||
// MinTimes requires the call to occur at least n times. If AnyTimes or MaxTimes have not been called or if MaxTimes
|
||||
// was previously called with 1, MinTimes also sets the maximum number of calls to infinity.
|
||||
func (c *Call) MinTimes(n int) *Call {
|
||||
c.minCalls = n
|
||||
if c.maxCalls == 1 {
|
||||
c.maxCalls = 1e8
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// MaxTimes limits the number of calls to n times. If AnyTimes or MinTimes have not been called or if MinTimes was
|
||||
// previously called with 1, MaxTimes also sets the minimum number of calls to 0.
|
||||
func (c *Call) MaxTimes(n int) *Call {
|
||||
c.maxCalls = n
|
||||
if c.minCalls == 1 {
|
||||
c.minCalls = 0
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// DoAndReturn declares the action to run when the call is matched.
|
||||
// The return values from this function are returned by the mocked function.
|
||||
// It takes an any argument to support n-arity functions.
|
||||
// The anonymous function must match the function signature mocked method.
|
||||
func (c *Call) DoAndReturn(f any) *Call {
|
||||
// TODO: Check arity and types here, rather than dying badly elsewhere.
|
||||
v := reflect.ValueOf(f)
|
||||
|
||||
c.addAction(func(args []any) []any {
|
||||
c.t.Helper()
|
||||
ft := v.Type()
|
||||
if c.methodType.NumIn() != ft.NumIn() {
|
||||
if ft.IsVariadic() {
|
||||
c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v The function signature must match the mocked method, a variadic function cannot be used.",
|
||||
c.receiver, c.method)
|
||||
} else {
|
||||
c.t.Fatalf("wrong number of arguments in DoAndReturn func for %T.%v: got %d, want %d [%s]",
|
||||
c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
vArgs := make([]reflect.Value, len(args))
|
||||
for i := 0; i < len(args); i++ {
|
||||
if args[i] != nil {
|
||||
vArgs[i] = reflect.ValueOf(args[i])
|
||||
} else {
|
||||
// Use the zero value for the arg.
|
||||
vArgs[i] = reflect.Zero(ft.In(i))
|
||||
}
|
||||
}
|
||||
vRets := v.Call(vArgs)
|
||||
rets := make([]any, len(vRets))
|
||||
for i, ret := range vRets {
|
||||
rets[i] = ret.Interface()
|
||||
}
|
||||
return rets
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
// Do declares the action to run when the call is matched. The function's
|
||||
// return values are ignored to retain backward compatibility. To use the
|
||||
// return values call DoAndReturn.
|
||||
// It takes an any argument to support n-arity functions.
|
||||
// The anonymous function must match the function signature mocked method.
|
||||
func (c *Call) Do(f any) *Call {
|
||||
// TODO: Check arity and types here, rather than dying badly elsewhere.
|
||||
v := reflect.ValueOf(f)
|
||||
|
||||
c.addAction(func(args []any) []any {
|
||||
c.t.Helper()
|
||||
ft := v.Type()
|
||||
if c.methodType.NumIn() != ft.NumIn() {
|
||||
if ft.IsVariadic() {
|
||||
c.t.Fatalf("wrong number of arguments in Do func for %T.%v The function signature must match the mocked method, a variadic function cannot be used.",
|
||||
c.receiver, c.method)
|
||||
} else {
|
||||
c.t.Fatalf("wrong number of arguments in Do func for %T.%v: got %d, want %d [%s]",
|
||||
c.receiver, c.method, ft.NumIn(), c.methodType.NumIn(), c.origin)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
vArgs := make([]reflect.Value, len(args))
|
||||
for i := 0; i < len(args); i++ {
|
||||
if args[i] != nil {
|
||||
vArgs[i] = reflect.ValueOf(args[i])
|
||||
} else {
|
||||
// Use the zero value for the arg.
|
||||
vArgs[i] = reflect.Zero(ft.In(i))
|
||||
}
|
||||
}
|
||||
v.Call(vArgs)
|
||||
return nil
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
// Return declares the values to be returned by the mocked function call.
|
||||
func (c *Call) Return(rets ...any) *Call {
|
||||
c.t.Helper()
|
||||
|
||||
mt := c.methodType
|
||||
if len(rets) != mt.NumOut() {
|
||||
c.t.Fatalf("wrong number of arguments to Return for %T.%v: got %d, want %d [%s]",
|
||||
c.receiver, c.method, len(rets), mt.NumOut(), c.origin)
|
||||
}
|
||||
for i, ret := range rets {
|
||||
if got, want := reflect.TypeOf(ret), mt.Out(i); got == want {
|
||||
// Identical types; nothing to do.
|
||||
} else if got == nil {
|
||||
// Nil needs special handling.
|
||||
switch want.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
// ok
|
||||
default:
|
||||
c.t.Fatalf("argument %d to Return for %T.%v is nil, but %v is not nillable [%s]",
|
||||
i, c.receiver, c.method, want, c.origin)
|
||||
}
|
||||
} else if got.AssignableTo(want) {
|
||||
// Assignable type relation. Make the assignment now so that the generated code
|
||||
// can return the values with a type assertion.
|
||||
v := reflect.New(want).Elem()
|
||||
v.Set(reflect.ValueOf(ret))
|
||||
rets[i] = v.Interface()
|
||||
} else {
|
||||
c.t.Fatalf("wrong type of argument %d to Return for %T.%v: %v is not assignable to %v [%s]",
|
||||
i, c.receiver, c.method, got, want, c.origin)
|
||||
}
|
||||
}
|
||||
|
||||
c.addAction(func([]any) []any {
|
||||
return rets
|
||||
})
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// Times declares the exact number of times a function call is expected to be executed.
|
||||
func (c *Call) Times(n int) *Call {
|
||||
c.minCalls, c.maxCalls = n, n
|
||||
return c
|
||||
}
|
||||
|
||||
// SetArg declares an action that will set the nth argument's value,
|
||||
// indirected through a pointer. Or, in the case of a slice and map, SetArg
|
||||
// will copy value's elements/key-value pairs into the nth argument.
|
||||
func (c *Call) SetArg(n int, value any) *Call {
|
||||
c.t.Helper()
|
||||
|
||||
mt := c.methodType
|
||||
// TODO: This will break on variadic methods.
|
||||
// We will need to check those at invocation time.
|
||||
if n < 0 || n >= mt.NumIn() {
|
||||
c.t.Fatalf("SetArg(%d, ...) called for a method with %d args [%s]",
|
||||
n, mt.NumIn(), c.origin)
|
||||
}
|
||||
// Permit setting argument through an interface.
|
||||
// In the interface case, we don't (nay, can't) check the type here.
|
||||
at := mt.In(n)
|
||||
switch at.Kind() {
|
||||
case reflect.Ptr:
|
||||
dt := at.Elem()
|
||||
if vt := reflect.TypeOf(value); !vt.AssignableTo(dt) {
|
||||
c.t.Fatalf("SetArg(%d, ...) argument is a %v, not assignable to %v [%s]",
|
||||
n, vt, dt, c.origin)
|
||||
}
|
||||
case reflect.Interface, reflect.Slice, reflect.Map:
|
||||
// nothing to do
|
||||
default:
|
||||
c.t.Fatalf("SetArg(%d, ...) referring to argument of non-pointer non-interface non-slice non-map type %v [%s]",
|
||||
n, at, c.origin)
|
||||
}
|
||||
|
||||
c.addAction(func(args []any) []any {
|
||||
v := reflect.ValueOf(value)
|
||||
switch reflect.TypeOf(args[n]).Kind() {
|
||||
case reflect.Slice:
|
||||
setSlice(args[n], v)
|
||||
case reflect.Map:
|
||||
setMap(args[n], v)
|
||||
default:
|
||||
reflect.ValueOf(args[n]).Elem().Set(v)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return c
|
||||
}
|
||||
|
||||
// isPreReq returns true if other is a direct or indirect prerequisite to c.
|
||||
func (c *Call) isPreReq(other *Call) bool {
|
||||
for _, preReq := range c.preReqs {
|
||||
if other == preReq || preReq.isPreReq(other) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// After declares that the call may only match after preReq has been exhausted.
|
||||
func (c *Call) After(preReq *Call) *Call {
|
||||
c.t.Helper()
|
||||
|
||||
if c == preReq {
|
||||
c.t.Fatalf("A call isn't allowed to be its own prerequisite")
|
||||
}
|
||||
if preReq.isPreReq(c) {
|
||||
c.t.Fatalf("Loop in call order: %v is a prerequisite to %v (possibly indirectly).", c, preReq)
|
||||
}
|
||||
|
||||
c.preReqs = append(c.preReqs, preReq)
|
||||
return c
|
||||
}
|
||||
|
||||
// Returns true if the minimum number of calls have been made.
|
||||
func (c *Call) satisfied() bool {
|
||||
return c.numCalls >= c.minCalls
|
||||
}
|
||||
|
||||
// Returns true if the maximum number of calls have been made.
|
||||
func (c *Call) exhausted() bool {
|
||||
return c.numCalls >= c.maxCalls
|
||||
}
|
||||
|
||||
func (c *Call) String() string {
|
||||
args := make([]string, len(c.args))
|
||||
for i, arg := range c.args {
|
||||
args[i] = arg.String()
|
||||
}
|
||||
arguments := strings.Join(args, ", ")
|
||||
return fmt.Sprintf("%T.%v(%s) %s", c.receiver, c.method, arguments, c.origin)
|
||||
}
|
||||
|
||||
// Tests if the given call matches the expected call.
|
||||
// If yes, returns nil. If no, returns error with message explaining why it does not match.
|
||||
func (c *Call) matches(args []any) error {
|
||||
if !c.methodType.IsVariadic() {
|
||||
if len(args) != len(c.args) {
|
||||
return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d",
|
||||
c.origin, len(args), len(c.args))
|
||||
}
|
||||
|
||||
for i, m := range c.args {
|
||||
if !m.Matches(args[i]) {
|
||||
return fmt.Errorf(
|
||||
"expected call at %s doesn't match the argument at index %d.\nGot: %v\nWant: %v",
|
||||
c.origin, i, formatGottenArg(m, args[i]), m,
|
||||
)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(c.args) < c.methodType.NumIn()-1 {
|
||||
return fmt.Errorf("expected call at %s has the wrong number of matchers. Got: %d, want: %d",
|
||||
c.origin, len(c.args), c.methodType.NumIn()-1)
|
||||
}
|
||||
if len(c.args) != c.methodType.NumIn() && len(args) != len(c.args) {
|
||||
return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: %d",
|
||||
c.origin, len(args), len(c.args))
|
||||
}
|
||||
if len(args) < len(c.args)-1 {
|
||||
return fmt.Errorf("expected call at %s has the wrong number of arguments. Got: %d, want: greater than or equal to %d",
|
||||
c.origin, len(args), len(c.args)-1)
|
||||
}
|
||||
|
||||
for i, m := range c.args {
|
||||
if i < c.methodType.NumIn()-1 {
|
||||
// Non-variadic args
|
||||
if !m.Matches(args[i]) {
|
||||
return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||
c.origin, strconv.Itoa(i), formatGottenArg(m, args[i]), m)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// The last arg has a possibility of a variadic argument, so let it branch
|
||||
|
||||
// sample: Foo(a int, b int, c ...int)
|
||||
if i < len(c.args) && i < len(args) {
|
||||
if m.Matches(args[i]) {
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB, gomock.Any())
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB, someSliceMatcher)
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC)
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB)
|
||||
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// The number of actual args don't match the number of matchers,
|
||||
// or the last matcher is a slice and the last arg is not.
|
||||
// If this function still matches it is because the last matcher
|
||||
// matches all the remaining arguments or the lack of any.
|
||||
// Convert the remaining arguments, if any, into a slice of the
|
||||
// expected type.
|
||||
vArgsType := c.methodType.In(c.methodType.NumIn() - 1)
|
||||
vArgs := reflect.MakeSlice(vArgsType, 0, len(args)-i)
|
||||
for _, arg := range args[i:] {
|
||||
vArgs = reflect.Append(vArgs, reflect.ValueOf(arg))
|
||||
}
|
||||
if m.Matches(vArgs.Interface()) {
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, gomock.Any())
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, someSliceMatcher)
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB, gomock.Any())
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB, someEmptySliceMatcher)
|
||||
break
|
||||
}
|
||||
// Wrong number of matchers or not match. Fail.
|
||||
// Got Foo(a, b) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
// Got Foo(a, b, c, d) want Foo(matcherA, matcherB, matcherC, matcherD, matcherE)
|
||||
// Got Foo(a, b, c, d, e) want Foo(matcherA, matcherB, matcherC, matcherD)
|
||||
// Got Foo(a, b, c) want Foo(matcherA, matcherB)
|
||||
|
||||
return fmt.Errorf("expected call at %s doesn't match the argument at index %s.\nGot: %v\nWant: %v",
|
||||
c.origin, strconv.Itoa(i), formatGottenArg(m, args[i:]), c.args[i])
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all prerequisite calls have been satisfied.
|
||||
for _, preReqCall := range c.preReqs {
|
||||
if !preReqCall.satisfied() {
|
||||
return fmt.Errorf("expected call at %s doesn't have a prerequisite call satisfied:\n%v\nshould be called before:\n%v",
|
||||
c.origin, preReqCall, c)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that the call is not exhausted.
|
||||
if c.exhausted() {
|
||||
return fmt.Errorf("expected call at %s has already been called the max number of times", c.origin)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// dropPrereqs tells the expected Call to not re-check prerequisite calls any
|
||||
// longer, and to return its current set.
|
||||
func (c *Call) dropPrereqs() (preReqs []*Call) {
|
||||
preReqs = c.preReqs
|
||||
c.preReqs = nil
|
||||
return
|
||||
}
|
||||
|
||||
func (c *Call) call() []func([]any) []any {
|
||||
c.numCalls++
|
||||
return c.actions
|
||||
}
|
||||
|
||||
// InOrder declares that the given calls should occur in order.
|
||||
// It panics if the type of any of the arguments isn't *Call or a generated
|
||||
// mock with an embedded *Call.
|
||||
func InOrder(args ...any) {
|
||||
calls := make([]*Call, 0, len(args))
|
||||
for i := 0; i < len(args); i++ {
|
||||
if call := getCall(args[i]); call != nil {
|
||||
calls = append(calls, call)
|
||||
continue
|
||||
}
|
||||
panic(fmt.Sprintf(
|
||||
"invalid argument at position %d of type %T, InOrder expects *gomock.Call or generated mock types with an embedded *gomock.Call",
|
||||
i,
|
||||
args[i],
|
||||
))
|
||||
}
|
||||
for i := 1; i < len(calls); i++ {
|
||||
calls[i].After(calls[i-1])
|
||||
}
|
||||
}
|
||||
|
||||
// getCall checks if the parameter is a *Call or a generated struct
|
||||
// that wraps a *Call and returns the *Call pointer - if neither, it returns nil.
|
||||
func getCall(arg any) *Call {
|
||||
if call, ok := arg.(*Call); ok {
|
||||
return call
|
||||
}
|
||||
t := reflect.ValueOf(arg)
|
||||
if t.Kind() != reflect.Ptr && t.Kind() != reflect.Interface {
|
||||
return nil
|
||||
}
|
||||
t = t.Elem()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
if !f.CanInterface() {
|
||||
continue
|
||||
}
|
||||
if call, ok := f.Interface().(*Call); ok {
|
||||
return call
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setSlice(arg any, v reflect.Value) {
|
||||
va := reflect.ValueOf(arg)
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
va.Index(i).Set(v.Index(i))
|
||||
}
|
||||
}
|
||||
|
||||
func setMap(arg any, v reflect.Value) {
|
||||
va := reflect.ValueOf(arg)
|
||||
for _, e := range va.MapKeys() {
|
||||
va.SetMapIndex(e, reflect.Value{})
|
||||
}
|
||||
for _, e := range v.MapKeys() {
|
||||
va.SetMapIndex(e, v.MapIndex(e))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Call) addAction(action func([]any) []any) {
|
||||
c.actions = append(c.actions, action)
|
||||
}
|
||||
|
||||
func formatGottenArg(m Matcher, arg any) string {
|
||||
got := fmt.Sprintf("%v (%T)", arg, arg)
|
||||
if gs, ok := m.(GotFormatter); ok {
|
||||
got = gs.Got(arg)
|
||||
}
|
||||
return got
|
||||
}
|
||||
164
vendor/go.uber.org/mock/gomock/callset.go
generated
vendored
Normal file
164
vendor/go.uber.org/mock/gomock/callset.go
generated
vendored
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2011 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gomock
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// callSet represents a set of expected calls, indexed by receiver and method
|
||||
// name.
|
||||
type callSet struct {
|
||||
// Calls that are still expected.
|
||||
expected map[callSetKey][]*Call
|
||||
expectedMu *sync.Mutex
|
||||
// Calls that have been exhausted.
|
||||
exhausted map[callSetKey][]*Call
|
||||
// when set to true, existing call expectations are overridden when new call expectations are made
|
||||
allowOverride bool
|
||||
}
|
||||
|
||||
// callSetKey is the key in the maps in callSet
|
||||
type callSetKey struct {
|
||||
receiver any
|
||||
fname string
|
||||
}
|
||||
|
||||
func newCallSet() *callSet {
|
||||
return &callSet{
|
||||
expected: make(map[callSetKey][]*Call),
|
||||
expectedMu: &sync.Mutex{},
|
||||
exhausted: make(map[callSetKey][]*Call),
|
||||
}
|
||||
}
|
||||
|
||||
func newOverridableCallSet() *callSet {
|
||||
return &callSet{
|
||||
expected: make(map[callSetKey][]*Call),
|
||||
expectedMu: &sync.Mutex{},
|
||||
exhausted: make(map[callSetKey][]*Call),
|
||||
allowOverride: true,
|
||||
}
|
||||
}
|
||||
|
||||
// Add adds a new expected call.
|
||||
func (cs callSet) Add(call *Call) {
|
||||
key := callSetKey{call.receiver, call.method}
|
||||
|
||||
cs.expectedMu.Lock()
|
||||
defer cs.expectedMu.Unlock()
|
||||
|
||||
m := cs.expected
|
||||
if call.exhausted() {
|
||||
m = cs.exhausted
|
||||
}
|
||||
if cs.allowOverride {
|
||||
m[key] = make([]*Call, 0)
|
||||
}
|
||||
|
||||
m[key] = append(m[key], call)
|
||||
}
|
||||
|
||||
// Remove removes an expected call.
|
||||
func (cs callSet) Remove(call *Call) {
|
||||
key := callSetKey{call.receiver, call.method}
|
||||
|
||||
cs.expectedMu.Lock()
|
||||
defer cs.expectedMu.Unlock()
|
||||
|
||||
calls := cs.expected[key]
|
||||
for i, c := range calls {
|
||||
if c == call {
|
||||
// maintain order for remaining calls
|
||||
cs.expected[key] = append(calls[:i], calls[i+1:]...)
|
||||
cs.exhausted[key] = append(cs.exhausted[key], call)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// FindMatch searches for a matching call. Returns error with explanation message if no call matched.
|
||||
func (cs callSet) FindMatch(receiver any, method string, args []any) (*Call, error) {
|
||||
key := callSetKey{receiver, method}
|
||||
|
||||
cs.expectedMu.Lock()
|
||||
defer cs.expectedMu.Unlock()
|
||||
|
||||
// Search through the expected calls.
|
||||
expected := cs.expected[key]
|
||||
var callsErrors bytes.Buffer
|
||||
for _, call := range expected {
|
||||
err := call.matches(args)
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(&callsErrors, "\n%v", err)
|
||||
} else {
|
||||
return call, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If we haven't found a match then search through the exhausted calls so we
|
||||
// get useful error messages.
|
||||
exhausted := cs.exhausted[key]
|
||||
for _, call := range exhausted {
|
||||
if err := call.matches(args); err != nil {
|
||||
_, _ = fmt.Fprintf(&callsErrors, "\n%v", err)
|
||||
continue
|
||||
}
|
||||
_, _ = fmt.Fprintf(
|
||||
&callsErrors, "all expected calls for method %q have been exhausted", method,
|
||||
)
|
||||
}
|
||||
|
||||
if len(expected)+len(exhausted) == 0 {
|
||||
_, _ = fmt.Fprintf(&callsErrors, "there are no expected calls of the method %q for that receiver", method)
|
||||
}
|
||||
|
||||
return nil, errors.New(callsErrors.String())
|
||||
}
|
||||
|
||||
// Failures returns the calls that are not satisfied.
|
||||
func (cs callSet) Failures() []*Call {
|
||||
cs.expectedMu.Lock()
|
||||
defer cs.expectedMu.Unlock()
|
||||
|
||||
failures := make([]*Call, 0, len(cs.expected))
|
||||
for _, calls := range cs.expected {
|
||||
for _, call := range calls {
|
||||
if !call.satisfied() {
|
||||
failures = append(failures, call)
|
||||
}
|
||||
}
|
||||
}
|
||||
return failures
|
||||
}
|
||||
|
||||
// Satisfied returns true in case all expected calls in this callSet are satisfied.
|
||||
func (cs callSet) Satisfied() bool {
|
||||
cs.expectedMu.Lock()
|
||||
defer cs.expectedMu.Unlock()
|
||||
|
||||
for _, calls := range cs.expected {
|
||||
for _, call := range calls {
|
||||
if !call.satisfied() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
326
vendor/go.uber.org/mock/gomock/controller.go
generated
vendored
Normal file
326
vendor/go.uber.org/mock/gomock/controller.go
generated
vendored
Normal file
@@ -0,0 +1,326 @@
|
||||
// Copyright 2010 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gomock
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A TestReporter is something that can be used to report test failures. It
|
||||
// is satisfied by the standard library's *testing.T.
|
||||
type TestReporter interface {
|
||||
Errorf(format string, args ...any)
|
||||
Fatalf(format string, args ...any)
|
||||
}
|
||||
|
||||
// TestHelper is a TestReporter that has the Helper method. It is satisfied
|
||||
// by the standard library's *testing.T.
|
||||
type TestHelper interface {
|
||||
TestReporter
|
||||
Helper()
|
||||
}
|
||||
|
||||
// cleanuper is used to check if TestHelper also has the `Cleanup` method. A
|
||||
// common pattern is to pass in a `*testing.T` to
|
||||
// `NewController(t TestReporter)`. In Go 1.14+, `*testing.T` has a cleanup
|
||||
// method. This can be utilized to call `Finish()` so the caller of this library
|
||||
// does not have to.
|
||||
type cleanuper interface {
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// A Controller represents the top-level control of a mock ecosystem. It
|
||||
// defines the scope and lifetime of mock objects, as well as their
|
||||
// expectations. It is safe to call Controller's methods from multiple
|
||||
// goroutines. Each test should create a new Controller.
|
||||
//
|
||||
// func TestFoo(t *testing.T) {
|
||||
// ctrl := gomock.NewController(t)
|
||||
// // ..
|
||||
// }
|
||||
//
|
||||
// func TestBar(t *testing.T) {
|
||||
// t.Run("Sub-Test-1", st) {
|
||||
// ctrl := gomock.NewController(st)
|
||||
// // ..
|
||||
// })
|
||||
// t.Run("Sub-Test-2", st) {
|
||||
// ctrl := gomock.NewController(st)
|
||||
// // ..
|
||||
// })
|
||||
// })
|
||||
type Controller struct {
|
||||
// T should only be called within a generated mock. It is not intended to
|
||||
// be used in user code and may be changed in future versions. T is the
|
||||
// TestReporter passed in when creating the Controller via NewController.
|
||||
// If the TestReporter does not implement a TestHelper it will be wrapped
|
||||
// with a nopTestHelper.
|
||||
T TestHelper
|
||||
mu sync.Mutex
|
||||
expectedCalls *callSet
|
||||
finished bool
|
||||
}
|
||||
|
||||
// NewController returns a new Controller. It is the preferred way to create a Controller.
|
||||
//
|
||||
// Passing [*testing.T] registers cleanup function to automatically call [Controller.Finish]
|
||||
// when the test and all its subtests complete.
|
||||
func NewController(t TestReporter, opts ...ControllerOption) *Controller {
|
||||
h, ok := t.(TestHelper)
|
||||
if !ok {
|
||||
h = &nopTestHelper{t}
|
||||
}
|
||||
ctrl := &Controller{
|
||||
T: h,
|
||||
expectedCalls: newCallSet(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.apply(ctrl)
|
||||
}
|
||||
if c, ok := isCleanuper(ctrl.T); ok {
|
||||
c.Cleanup(func() {
|
||||
ctrl.T.Helper()
|
||||
ctrl.finish(true, nil)
|
||||
})
|
||||
}
|
||||
|
||||
return ctrl
|
||||
}
|
||||
|
||||
// ControllerOption configures how a Controller should behave.
|
||||
type ControllerOption interface {
|
||||
apply(*Controller)
|
||||
}
|
||||
|
||||
type overridableExpectationsOption struct{}
|
||||
|
||||
// WithOverridableExpectations allows for overridable call expectations
|
||||
// i.e., subsequent call expectations override existing call expectations
|
||||
func WithOverridableExpectations() overridableExpectationsOption {
|
||||
return overridableExpectationsOption{}
|
||||
}
|
||||
|
||||
func (o overridableExpectationsOption) apply(ctrl *Controller) {
|
||||
ctrl.expectedCalls = newOverridableCallSet()
|
||||
}
|
||||
|
||||
type cancelReporter struct {
|
||||
t TestHelper
|
||||
cancel func()
|
||||
}
|
||||
|
||||
func (r *cancelReporter) Errorf(format string, args ...any) {
|
||||
r.t.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (r *cancelReporter) Fatalf(format string, args ...any) {
|
||||
defer r.cancel()
|
||||
r.t.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (r *cancelReporter) Helper() {
|
||||
r.t.Helper()
|
||||
}
|
||||
|
||||
// WithContext returns a new Controller and a Context, which is cancelled on any
|
||||
// fatal failure.
|
||||
func WithContext(ctx context.Context, t TestReporter) (*Controller, context.Context) {
|
||||
h, ok := t.(TestHelper)
|
||||
if !ok {
|
||||
h = &nopTestHelper{t: t}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
return NewController(&cancelReporter{t: h, cancel: cancel}), ctx
|
||||
}
|
||||
|
||||
type nopTestHelper struct {
|
||||
t TestReporter
|
||||
}
|
||||
|
||||
func (h *nopTestHelper) Errorf(format string, args ...any) {
|
||||
h.t.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (h *nopTestHelper) Fatalf(format string, args ...any) {
|
||||
h.t.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (h nopTestHelper) Helper() {}
|
||||
|
||||
// RecordCall is called by a mock. It should not be called by user code.
|
||||
func (ctrl *Controller) RecordCall(receiver any, method string, args ...any) *Call {
|
||||
ctrl.T.Helper()
|
||||
|
||||
recv := reflect.ValueOf(receiver)
|
||||
for i := 0; i < recv.Type().NumMethod(); i++ {
|
||||
if recv.Type().Method(i).Name == method {
|
||||
return ctrl.RecordCallWithMethodType(receiver, method, recv.Method(i).Type(), args...)
|
||||
}
|
||||
}
|
||||
ctrl.T.Fatalf("gomock: failed finding method %s on %T", method, receiver)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// RecordCallWithMethodType is called by a mock. It should not be called by user code.
|
||||
func (ctrl *Controller) RecordCallWithMethodType(receiver any, method string, methodType reflect.Type, args ...any) *Call {
|
||||
ctrl.T.Helper()
|
||||
|
||||
call := newCall(ctrl.T, receiver, method, methodType, args...)
|
||||
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
ctrl.expectedCalls.Add(call)
|
||||
|
||||
return call
|
||||
}
|
||||
|
||||
// Call is called by a mock. It should not be called by user code.
|
||||
func (ctrl *Controller) Call(receiver any, method string, args ...any) []any {
|
||||
ctrl.T.Helper()
|
||||
|
||||
// Nest this code so we can use defer to make sure the lock is released.
|
||||
actions := func() []func([]any) []any {
|
||||
ctrl.T.Helper()
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
|
||||
expected, err := ctrl.expectedCalls.FindMatch(receiver, method, args)
|
||||
if err != nil {
|
||||
// callerInfo's skip should be updated if the number of calls between the user's test
|
||||
// and this line changes, i.e. this code is wrapped in another anonymous function.
|
||||
// 0 is us, 1 is controller.Call(), 2 is the generated mock, and 3 is the user's test.
|
||||
origin := callerInfo(3)
|
||||
stringArgs := make([]string, len(args))
|
||||
for i, arg := range args {
|
||||
stringArgs[i] = getString(arg)
|
||||
}
|
||||
ctrl.T.Fatalf("Unexpected call to %T.%v(%v) at %s because: %s", receiver, method, stringArgs, origin, err)
|
||||
}
|
||||
|
||||
// Two things happen here:
|
||||
// * the matching call no longer needs to check prerequisite calls,
|
||||
// * and the prerequisite calls are no longer expected, so remove them.
|
||||
preReqCalls := expected.dropPrereqs()
|
||||
for _, preReqCall := range preReqCalls {
|
||||
ctrl.expectedCalls.Remove(preReqCall)
|
||||
}
|
||||
|
||||
actions := expected.call()
|
||||
if expected.exhausted() {
|
||||
ctrl.expectedCalls.Remove(expected)
|
||||
}
|
||||
return actions
|
||||
}()
|
||||
|
||||
var rets []any
|
||||
for _, action := range actions {
|
||||
if r := action(args); r != nil {
|
||||
rets = r
|
||||
}
|
||||
}
|
||||
|
||||
return rets
|
||||
}
|
||||
|
||||
// Finish checks to see if all the methods that were expected to be called were called.
|
||||
// It is not idempotent and therefore can only be invoked once.
|
||||
//
|
||||
// Note: If you pass a *testing.T into [NewController], you no longer
|
||||
// need to call ctrl.Finish() in your test methods.
|
||||
func (ctrl *Controller) Finish() {
|
||||
// If we're currently panicking, probably because this is a deferred call.
|
||||
// This must be recovered in the deferred function.
|
||||
err := recover()
|
||||
ctrl.finish(false, err)
|
||||
}
|
||||
|
||||
// Satisfied returns whether all expected calls bound to this Controller have been satisfied.
|
||||
// Calling Finish is then guaranteed to not fail due to missing calls.
|
||||
func (ctrl *Controller) Satisfied() bool {
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
return ctrl.expectedCalls.Satisfied()
|
||||
}
|
||||
|
||||
func (ctrl *Controller) finish(cleanup bool, panicErr any) {
|
||||
ctrl.T.Helper()
|
||||
|
||||
ctrl.mu.Lock()
|
||||
defer ctrl.mu.Unlock()
|
||||
|
||||
if ctrl.finished {
|
||||
if _, ok := isCleanuper(ctrl.T); !ok {
|
||||
ctrl.T.Fatalf("Controller.Finish was called more than once. It has to be called exactly once.")
|
||||
}
|
||||
return
|
||||
}
|
||||
ctrl.finished = true
|
||||
|
||||
// Short-circuit, pass through the panic.
|
||||
if panicErr != nil {
|
||||
panic(panicErr)
|
||||
}
|
||||
|
||||
// Check that all remaining expected calls are satisfied.
|
||||
failures := ctrl.expectedCalls.Failures()
|
||||
for _, call := range failures {
|
||||
ctrl.T.Errorf("missing call(s) to %v", call)
|
||||
}
|
||||
if len(failures) != 0 {
|
||||
if !cleanup {
|
||||
ctrl.T.Fatalf("aborting test due to missing call(s)")
|
||||
return
|
||||
}
|
||||
ctrl.T.Errorf("aborting test due to missing call(s)")
|
||||
}
|
||||
}
|
||||
|
||||
// callerInfo returns the file:line of the call site. skip is the number
|
||||
// of stack frames to skip when reporting. 0 is callerInfo's call site.
|
||||
func callerInfo(skip int) string {
|
||||
if _, file, line, ok := runtime.Caller(skip + 1); ok {
|
||||
return fmt.Sprintf("%s:%d", file, line)
|
||||
}
|
||||
return "unknown file"
|
||||
}
|
||||
|
||||
// isCleanuper checks it if t's base TestReporter has a Cleanup method.
|
||||
func isCleanuper(t TestReporter) (cleanuper, bool) {
|
||||
tr := unwrapTestReporter(t)
|
||||
c, ok := tr.(cleanuper)
|
||||
return c, ok
|
||||
}
|
||||
|
||||
// unwrapTestReporter unwraps TestReporter to the base implementation.
|
||||
func unwrapTestReporter(t TestReporter) TestReporter {
|
||||
tr := t
|
||||
switch nt := t.(type) {
|
||||
case *cancelReporter:
|
||||
tr = nt.t
|
||||
if h, check := tr.(*nopTestHelper); check {
|
||||
tr = h.t
|
||||
}
|
||||
case *nopTestHelper:
|
||||
tr = nt.t
|
||||
default:
|
||||
// not wrapped
|
||||
}
|
||||
return tr
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user