Compare commits
43 Commits
new-packag
...
mergemodes
| Author | SHA256 | Date | |
|---|---|---|---|
| db70452cbc | |||
| 53eebb75f7 | |||
| 9f9a4660e9 | |||
| cb2f17a287 | |||
| 3125df4d6a | |||
| 06600813b4 | |||
| 3b510182d6 | |||
|
|
d1bcc222ce | ||
|
|
b632952f62 | ||
|
|
1b90299d94
|
||
| 708add1017 | |||
| 712349d638 | |||
| ba5a42dd29 | |||
| 53cf2c8bad | |||
| 868c28cd5a | |||
| 962c4b2562 | |||
| 57cb251dbc | |||
| 75c4fada50 | |||
| 7d13e586ac | |||
| 7729b845b0 | |||
| c662b2fdbf | |||
|
|
4cedb37da4 | ||
|
|
fe519628c8 | ||
|
|
ff18828692 | ||
| 6337ef7e50 | |||
| e9992d2e99 | |||
| aac218fc6d | |||
| 139f40fce3 | |||
| c44d34fdbe | |||
| 23be3df1fb | |||
| 68b67c6975 | |||
| 478a3a140a | |||
| df4da87bfd | |||
| b19d301d95 | |||
| 9532aa897c | |||
| f942909ac7 | |||
| 7f98298b89 | |||
| f0b053ca07 | |||
| 844ec8a87b | |||
| 6ee8fcc597 | |||
| 1220799e57 | |||
| 86a176a785 | |||
| bb9e9a08e5 |
5
.gitignore
vendored
5
.gitignore
vendored
@@ -1,2 +1,7 @@
|
||||
*.osc
|
||||
*.conf
|
||||
/integration/gitea-data
|
||||
/integration/gitea-logs
|
||||
/integration/rabbitmq-data
|
||||
/integration/workflow-pr-repos
|
||||
__pycache__/
|
||||
|
||||
4
Makefile
Normal file
4
Makefile
Normal file
@@ -0,0 +1,4 @@
|
||||
MODULES := devel-importer utils/hujson utils/maintainer-update gitea-events-rabbitmq-publisher gitea_status_proxy group-review obs-forward-bot obs-staging-bot obs-status-service workflow-direct workflow-pr
|
||||
|
||||
build:
|
||||
for m in $(MODULES); do go build -C $$m -buildmode=pie || exit 1 ; done
|
||||
@@ -129,6 +129,9 @@ go build \
|
||||
go build \
|
||||
-C utils/hujson \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C utils/maintainer-update \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C gitea-events-rabbitmq-publisher \
|
||||
-buildmode=pie
|
||||
@@ -160,6 +163,7 @@ go test -C group-review -v
|
||||
go test -C obs-staging-bot -v
|
||||
go test -C obs-status-service -v
|
||||
go test -C workflow-direct -v
|
||||
go test -C utils/maintainer-update
|
||||
# TODO build fails
|
||||
#go test -C workflow-pr -v
|
||||
|
||||
@@ -178,7 +182,9 @@ install -D -m0644 systemd/obs-status-service.service
|
||||
install -D -m0755 workflow-direct/workflow-direct %{buildroot}%{_bindir}/workflow-direct
|
||||
install -D -m0644 systemd/workflow-direct@.service %{buildroot}%{_unitdir}/workflow-direct@.service
|
||||
install -D -m0755 workflow-pr/workflow-pr %{buildroot}%{_bindir}/workflow-pr
|
||||
install -D -m0644 systemd/workflow-pr@.service %{buildroot}%{_unitdir}/workflow-pr@.service
|
||||
install -D -m0755 utils/hujson/hujson %{buildroot}%{_bindir}/hujson
|
||||
install -D -m0755 utils/maintainer-update/maintainer-update %{buildroot}%{_bindir}/maintainer-update
|
||||
|
||||
%pre gitea-events-rabbitmq-publisher
|
||||
%service_add_pre gitea-events-rabbitmq-publisher.service
|
||||
@@ -228,17 +234,29 @@ install -D -m0755 utils/hujson/hujson
|
||||
%postun obs-status-service
|
||||
%service_del_postun obs-status-service.service
|
||||
|
||||
%pre workflow-direct
|
||||
%service_add_pre workflow-direct.service
|
||||
|
||||
%post workflow-direct
|
||||
%service_add_post workflow-direct.service
|
||||
|
||||
%preun workflow-direct
|
||||
%service_del_preun workflow-direct.service
|
||||
|
||||
%postun workflow-direct
|
||||
%service_del_postun workflow-direct.service
|
||||
|
||||
%pre workflow-pr
|
||||
%service_add_pre workflow-direct@.service
|
||||
%service_add_pre workflow-pr.service
|
||||
|
||||
%post workflow-pr
|
||||
%service_add_post workflow-direct@.service
|
||||
%service_add_post workflow-pr.service
|
||||
|
||||
%preun workflow-pr
|
||||
%service_del_preun workflow-direct@.service
|
||||
%service_del_preun workflow-pr.service
|
||||
|
||||
%postun workflow-pr
|
||||
%service_del_postun workflow-direct@.service
|
||||
%service_del_postun workflow-pr.service
|
||||
|
||||
%files devel-importer
|
||||
%license COPYING
|
||||
@@ -285,6 +303,7 @@ install -D -m0755 utils/hujson/hujson
|
||||
%files utils
|
||||
%license COPYING
|
||||
%{_bindir}/hujson
|
||||
%{_bindir}/maintainer-update
|
||||
|
||||
%files workflow-direct
|
||||
%license COPYING
|
||||
@@ -296,4 +315,5 @@ install -D -m0755 utils/hujson/hujson
|
||||
%license COPYING
|
||||
%doc workflow-pr/README.md
|
||||
%{_bindir}/workflow-pr
|
||||
%{_unitdir}/workflow-pr@.service
|
||||
|
||||
|
||||
@@ -39,6 +39,10 @@ const (
|
||||
|
||||
Permission_ForceMerge = "force-merge"
|
||||
Permission_Group = "release-engineering"
|
||||
|
||||
MergeModeFF = "ff-only"
|
||||
MergeModeReplace = "replace"
|
||||
MergeModeDevel = "devel"
|
||||
)
|
||||
|
||||
type ConfigFile struct {
|
||||
@@ -52,8 +56,9 @@ type ReviewGroup struct {
|
||||
}
|
||||
|
||||
type QAConfig struct {
|
||||
Name string
|
||||
Origin string
|
||||
Name string
|
||||
Origin string
|
||||
Label string // requires this gitea lable to be set or skipped
|
||||
BuildDisableRepos []string // which repos to build disable in the new project
|
||||
}
|
||||
|
||||
@@ -87,7 +92,8 @@ type AutogitConfig struct {
|
||||
Committers []string // group in addition to Reviewers and Maintainers that can order the bot around, mostly as helper for factory-maintainers
|
||||
Subdirs []string // list of directories to sort submodules into. Needed b/c _manifest cannot list non-existent directories
|
||||
|
||||
Labels map[string]string // list of tags, if not default, to apply
|
||||
Labels map[string]string // list of tags, if not default, to apply
|
||||
MergeMode string // project merge mode
|
||||
|
||||
NoProjectGitPR bool // do not automatically create project git PRs, just assign reviewers and assume somethign else creates the ProjectGit PR
|
||||
ManualMergeOnly bool // only merge with "Merge OK" comment by Project Maintainers and/or Package Maintainers and/or reviewers
|
||||
@@ -182,6 +188,17 @@ func ReadWorkflowConfig(gitea GiteaFileContentAndRepoFetcher, git_project string
|
||||
}
|
||||
}
|
||||
config.GitProjectName = config.GitProjectName + "#" + branch
|
||||
|
||||
// verify merge modes
|
||||
switch config.MergeMode {
|
||||
case MergeModeFF, MergeModeDevel, MergeModeReplace:
|
||||
break // good results
|
||||
case "":
|
||||
config.MergeMode = MergeModeFF
|
||||
default:
|
||||
return nil, fmt.Errorf("Unsupported merge mode in %s: %s", git_project, config.MergeMode)
|
||||
}
|
||||
|
||||
return config, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -342,3 +342,67 @@ func TestConfigPermissions(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfigMergeModeParser(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
json string
|
||||
mergeMode string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
json: "{}",
|
||||
mergeMode: common.MergeModeFF,
|
||||
},
|
||||
{
|
||||
name: "ff-only",
|
||||
json: `{"MergeMode": "ff-only"}`,
|
||||
mergeMode: common.MergeModeFF,
|
||||
},
|
||||
{
|
||||
name: "replace",
|
||||
json: `{"MergeMode": "replace"}`,
|
||||
mergeMode: common.MergeModeReplace,
|
||||
},
|
||||
{
|
||||
name: "devel",
|
||||
json: `{"MergeMode": "devel"}`,
|
||||
mergeMode: common.MergeModeDevel,
|
||||
},
|
||||
{
|
||||
name: "unsupported",
|
||||
json: `{"MergeMode": "invalid"}`,
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
repo := models.Repository{
|
||||
DefaultBranch: "master",
|
||||
}
|
||||
|
||||
ctl := gomock.NewController(t)
|
||||
gitea := mock_common.NewMockGiteaFileContentAndRepoFetcher(ctl)
|
||||
gitea.EXPECT().GetRepositoryFileContent("foo", "bar", "", "workflow.config").Return([]byte(test.json), "abc", nil)
|
||||
gitea.EXPECT().GetRepository("foo", "bar").Return(&repo, nil)
|
||||
|
||||
config, err := common.ReadWorkflowConfig(gitea, "foo/bar")
|
||||
if test.wantErr {
|
||||
if err == nil {
|
||||
t.Fatal("Expected error, got nil")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if config.MergeMode != test.mergeMode {
|
||||
t.Errorf("Expected MergeMode %s, got %s", test.mergeMode, config.MergeMode)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,10 +20,13 @@ package common
|
||||
|
||||
const (
|
||||
GiteaTokenEnv = "GITEA_TOKEN"
|
||||
GiteaHostEnv = "GITEA_HOST"
|
||||
ObsUserEnv = "OBS_USER"
|
||||
ObsPasswordEnv = "OBS_PASSWORD"
|
||||
ObsSshkeyEnv = "OBS_SSHKEY"
|
||||
ObsSshkeyFileEnv = "OBS_SSHKEYFILE"
|
||||
ObsApiEnv = "OBS_API"
|
||||
ObsWebEnv = "OBS_WEB"
|
||||
|
||||
DefaultGitPrj = "_ObsPrj"
|
||||
PrjLinksFile = "links.json"
|
||||
|
||||
@@ -822,6 +822,7 @@ func (gitea *GiteaTransport) ResetTimelineCache(org, repo string, idx int64) {
|
||||
Cache, IsCached := giteaTimelineCache[prID]
|
||||
if IsCached {
|
||||
Cache.lastCheck = Cache.lastCheck.Add(-time.Hour)
|
||||
giteaTimelineCache[prID] = Cache
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"src.opensuse.org/autogits/common/gitea-generated/client/repository"
|
||||
"src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
@@ -27,11 +29,13 @@ type MaintainershipMap struct {
|
||||
IsDir bool
|
||||
Config *AutogitConfig
|
||||
FetchPackage func(string) ([]byte, error)
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
func parseMaintainershipData(data []byte) (*MaintainershipMap, error) {
|
||||
func ParseMaintainershipData(data []byte) (*MaintainershipMap, error) {
|
||||
maintainers := &MaintainershipMap{
|
||||
Data: make(map[string][]string),
|
||||
Raw: data,
|
||||
}
|
||||
if err := json.Unmarshal(data, &maintainers.Data); err != nil {
|
||||
return nil, err
|
||||
@@ -62,7 +66,7 @@ func FetchProjectMaintainershipData(gitea GiteaMaintainershipReader, config *Aut
|
||||
}
|
||||
}
|
||||
|
||||
m, err := parseMaintainershipData(data)
|
||||
m, err := ParseMaintainershipData(data)
|
||||
if m != nil {
|
||||
m.Config = config
|
||||
m.IsDir = dir
|
||||
@@ -84,6 +88,8 @@ func (data *MaintainershipMap) ListProjectMaintainers(groups []*ReviewGroup) []s
|
||||
return nil
|
||||
}
|
||||
|
||||
m = slices.Clone(m)
|
||||
|
||||
// expands groups
|
||||
for _, g := range groups {
|
||||
m = g.ExpandMaintainers(m)
|
||||
@@ -120,6 +126,7 @@ func (data *MaintainershipMap) ListPackageMaintainers(pkg string, groups []*Revi
|
||||
}
|
||||
}
|
||||
}
|
||||
pkgMaintainers = slices.Clone(pkgMaintainers)
|
||||
prjMaintainers := data.ListProjectMaintainers(nil)
|
||||
|
||||
prjMaintainer:
|
||||
@@ -171,13 +178,135 @@ func (data *MaintainershipMap) IsApproved(pkg string, reviews []*models.PullRevi
|
||||
return false
|
||||
}
|
||||
|
||||
func (data *MaintainershipMap) modifyInplace(writer io.StringWriter) error {
|
||||
var original map[string][]string
|
||||
if err := json.Unmarshal(data.Raw, &original); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(bytes.NewReader(data.Raw))
|
||||
_, err := dec.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
output := ""
|
||||
lastPos := 0
|
||||
modified := false
|
||||
|
||||
type entry struct {
|
||||
key string
|
||||
valStart int
|
||||
valEnd int
|
||||
}
|
||||
var entries []entry
|
||||
|
||||
for dec.More() {
|
||||
kToken, _ := dec.Token()
|
||||
key := kToken.(string)
|
||||
var raw json.RawMessage
|
||||
dec.Decode(&raw)
|
||||
valEnd := int(dec.InputOffset())
|
||||
valStart := valEnd - len(raw)
|
||||
entries = append(entries, entry{key, valStart, valEnd})
|
||||
}
|
||||
|
||||
changed := make(map[string]bool)
|
||||
for k, v := range data.Data {
|
||||
if ov, ok := original[k]; !ok || !slices.Equal(v, ov) {
|
||||
changed[k] = true
|
||||
}
|
||||
}
|
||||
for k := range original {
|
||||
if _, ok := data.Data[k]; !ok {
|
||||
changed[k] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(changed) == 0 {
|
||||
_, err = writer.WriteString(string(data.Raw))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
if v, ok := data.Data[e.key]; ok {
|
||||
prefix := string(data.Raw[lastPos:e.valStart])
|
||||
if modified && strings.TrimSpace(output) == "{" {
|
||||
if commaIdx := strings.Index(prefix, ","); commaIdx != -1 {
|
||||
if quoteIdx := strings.Index(prefix, "\""); quoteIdx == -1 || commaIdx < quoteIdx {
|
||||
prefix = prefix[:commaIdx] + prefix[commaIdx+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
output += prefix
|
||||
if changed[e.key] {
|
||||
slices.Sort(v)
|
||||
newVal, _ := json.Marshal(v)
|
||||
output += string(newVal)
|
||||
modified = true
|
||||
} else {
|
||||
output += string(data.Raw[e.valStart:e.valEnd])
|
||||
}
|
||||
} else {
|
||||
// Deleted
|
||||
modified = true
|
||||
}
|
||||
lastPos = e.valEnd
|
||||
}
|
||||
output += string(data.Raw[lastPos:])
|
||||
|
||||
// Handle additions (simplistic: at the end)
|
||||
for k, v := range data.Data {
|
||||
if _, ok := original[k]; !ok {
|
||||
slices.Sort(v)
|
||||
newVal, _ := json.Marshal(v)
|
||||
keyStr, _ := json.Marshal(k)
|
||||
|
||||
// Insert before closing brace
|
||||
if idx := strings.LastIndex(output, "}"); idx != -1 {
|
||||
prefix := output[:idx]
|
||||
suffix := output[idx:]
|
||||
|
||||
trimmedPrefix := strings.TrimRight(prefix, " \n\r\t")
|
||||
if !strings.HasSuffix(trimmedPrefix, "{") && !strings.HasSuffix(trimmedPrefix, ",") {
|
||||
// find the actual position of the last non-whitespace character in prefix
|
||||
lastCharIdx := strings.LastIndexAny(prefix, "]}0123456789\"")
|
||||
if lastCharIdx != -1 {
|
||||
prefix = prefix[:lastCharIdx+1] + "," + prefix[lastCharIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
insertion := fmt.Sprintf(" %s: %s", string(keyStr), string(newVal))
|
||||
if !strings.HasSuffix(prefix, "\n") {
|
||||
insertion = "\n" + insertion
|
||||
}
|
||||
output = prefix + insertion + "\n" + suffix
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if modified {
|
||||
_, err := writer.WriteString(output)
|
||||
return err
|
||||
}
|
||||
_, err = writer.WriteString(string(data.Raw))
|
||||
return err
|
||||
}
|
||||
|
||||
func (data *MaintainershipMap) WriteMaintainershipFile(writer io.StringWriter) error {
|
||||
if data.IsDir {
|
||||
return fmt.Errorf("Not implemented")
|
||||
}
|
||||
|
||||
writer.WriteString("{\n")
|
||||
if len(data.Raw) > 0 {
|
||||
if err := data.modifyInplace(writer); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to full write
|
||||
writer.WriteString("{\n")
|
||||
if d, ok := data.Data[""]; ok {
|
||||
eol := ","
|
||||
if len(data.Data) == 1 {
|
||||
@@ -188,17 +317,12 @@ func (data *MaintainershipMap) WriteMaintainershipFile(writer io.StringWriter) e
|
||||
writer.WriteString(fmt.Sprintf(" \"\": %s%s\n", string(str), eol))
|
||||
}
|
||||
|
||||
keys := make([]string, len(data.Data))
|
||||
i := 0
|
||||
keys := make([]string, 0, len(data.Data))
|
||||
for pkg := range data.Data {
|
||||
if pkg == "" {
|
||||
continue
|
||||
}
|
||||
keys[i] = pkg
|
||||
i++
|
||||
}
|
||||
if len(keys) >= i {
|
||||
keys = slices.Delete(keys, i, len(keys))
|
||||
keys = append(keys, pkg)
|
||||
}
|
||||
slices.Sort(keys)
|
||||
for i, pkg := range keys {
|
||||
|
||||
@@ -208,6 +208,7 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
name string
|
||||
is_dir bool
|
||||
maintainers map[string][]string
|
||||
raw []byte
|
||||
expected_output string
|
||||
expected_error error
|
||||
}{
|
||||
@@ -231,6 +232,43 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
},
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical modification",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one", "two"},
|
||||
"foo": {"byte", "four", "newone"},
|
||||
"pkg1": {},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\",\"newone\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "no change",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one", "two"},
|
||||
"foo": {"byte", "four"},
|
||||
"pkg1": {},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical addition",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one"},
|
||||
"new": {"user"},
|
||||
},
|
||||
raw: []byte("{\n \"\": [ \"one\" ]\n}\n"),
|
||||
expected_output: "{\n \"\": [ \"one\" ],\n \"new\": [\"user\"]\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical deletion",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one"},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\"],\n \"old\": [\"user\"]\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\"]\n}\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -239,6 +277,7 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
data := common.MaintainershipMap{
|
||||
Data: test.maintainers,
|
||||
IsDir: test.is_dir,
|
||||
Raw: test.raw,
|
||||
}
|
||||
|
||||
if err := data.WriteMaintainershipFile(&b); err != test.expected_error {
|
||||
@@ -248,7 +287,7 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
output := b.String()
|
||||
|
||||
if test.expected_output != output {
|
||||
t.Fatal("unexpected output:", output, "Expecting:", test.expected_output)
|
||||
t.Fatalf("unexpected output:\n%q\nExpecting:\n%q", output, test.expected_output)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -293,3 +332,89 @@ func TestReviewRequired(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaintainershipDataCorruption_PackageAppend(t *testing.T) {
|
||||
// Test corruption when append happens (merging project maintainers)
|
||||
// If backing array has capacity, append writes to it.
|
||||
|
||||
// We construct a slice with capacity > len to simulate this common scenario
|
||||
backingArray := make([]string, 1, 10)
|
||||
backingArray[0] = "@g1"
|
||||
|
||||
initialData := map[string][]string{
|
||||
"pkg": backingArray, // len 1, cap 10
|
||||
"": {"prjUser"},
|
||||
}
|
||||
|
||||
m := &common.MaintainershipMap{
|
||||
Data: initialData,
|
||||
}
|
||||
|
||||
groups := []*common.ReviewGroup{
|
||||
{
|
||||
Name: "@g1",
|
||||
Reviewers: []string{"u1"},
|
||||
},
|
||||
}
|
||||
|
||||
// ListPackageMaintainers("pkg", groups)
|
||||
// 1. gets ["@g1"] (cap 10)
|
||||
// 2. Appends "prjUser" -> ["@g1", "prjUser"] (in backing array)
|
||||
// 3. Expands "@g1" -> "u1".
|
||||
// Replace: ["u1", "prjUser"]
|
||||
// Sort: ["prjUser", "u1"]
|
||||
//
|
||||
// The backing array is now ["prjUser", "u1", ...]
|
||||
// The map entry "pkg" is still len 1.
|
||||
// So it sees ["prjUser"].
|
||||
|
||||
list1 := m.ListPackageMaintainers("pkg", groups)
|
||||
t.Logf("List1: %v", list1)
|
||||
|
||||
// ListPackageMaintainers("pkg", nil)
|
||||
// Should be ["@g1", "prjUser"] (because prjUser is appended from project maintainers)
|
||||
// But since backing array is corrupted:
|
||||
// It sees ["prjUser"] (from map) + appends "prjUser" -> ["prjUser", "prjUser"].
|
||||
|
||||
list2 := m.ListPackageMaintainers("pkg", nil)
|
||||
t.Logf("List2: %v", list2)
|
||||
|
||||
if !slices.Contains(list2, "@g1") {
|
||||
t.Errorf("Corruption: '@g1' is missing from second call. Got %v", list2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaintainershipDataCorruption_ProjectInPlace(t *testing.T) {
|
||||
// Test corruption in ListProjectMaintainers when replacement fits in place
|
||||
// e.g. replacing 1 group with 1 user.
|
||||
|
||||
initialData := map[string][]string{
|
||||
"": {"@g1"},
|
||||
}
|
||||
|
||||
m := &common.MaintainershipMap{
|
||||
Data: initialData,
|
||||
}
|
||||
|
||||
groups := []*common.ReviewGroup{
|
||||
{
|
||||
Name: "@g1",
|
||||
Reviewers: []string{"u1"},
|
||||
},
|
||||
}
|
||||
|
||||
// First call with expansion
|
||||
// Replaces "@g1" with "u1". Length stays 1. Modifies backing array in place.
|
||||
list1 := m.ListProjectMaintainers(groups)
|
||||
t.Logf("List1: %v", list1)
|
||||
|
||||
// Second call without expansion
|
||||
// Should return ["@g1"]
|
||||
list2 := m.ListProjectMaintainers(nil)
|
||||
t.Logf("List2: %v", list2)
|
||||
|
||||
if !slices.Contains(list2, "@g1") {
|
||||
t.Errorf("Corruption: '@g1' is missing from second call (Project). Got %v", list2)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -116,13 +116,18 @@ type Flags struct {
|
||||
Contents string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
type ProjectLinkMeta struct {
|
||||
Project string `xml:"project,attr"`
|
||||
}
|
||||
|
||||
type ProjectMeta struct {
|
||||
XMLName xml.Name `xml:"project"`
|
||||
Name string `xml:"name,attr"`
|
||||
Title string `xml:"title"`
|
||||
Description string `xml:"description"`
|
||||
Url string `xml:"url,omitempty"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
ScmSync string `xml:"scmsync,omitempty"`
|
||||
Link []ProjectLinkMeta `xml:"link"`
|
||||
Persons []PersonRepoMeta `xml:"person"`
|
||||
Groups []GroupRepoMeta `xml:"group"`
|
||||
Repositories []RepositoryMeta `xml:"repository"`
|
||||
@@ -138,8 +143,8 @@ type ProjectMeta struct {
|
||||
type PackageMeta struct {
|
||||
XMLName xml.Name `xml:"package"`
|
||||
Name string `xml:"name,attr"`
|
||||
Project string `xml:"project,attr"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
Project string `xml:"project,attr,omitempty"`
|
||||
ScmSync string `xml:"scmsync,omitempty"`
|
||||
Persons []PersonRepoMeta `xml:"person"`
|
||||
Groups []GroupRepoMeta `xml:"group"`
|
||||
|
||||
|
||||
154
common/pr.go
154
common/pr.go
@@ -552,6 +552,145 @@ func (rs *PRSet) IsApproved(gitea GiteaPRChecker, maintainers MaintainershipData
|
||||
return is_manually_reviewed_ok
|
||||
}
|
||||
|
||||
func (rs *PRSet) AddMergeCommit(git Git, remote string, pr int) bool {
|
||||
prinfo := rs.PRs[pr]
|
||||
|
||||
LogDebug("Adding merge commit for %s", PRtoString(prinfo.PR))
|
||||
if !prinfo.PR.AllowMaintainerEdit {
|
||||
LogError(" PR is not editable by maintainer")
|
||||
return false
|
||||
}
|
||||
|
||||
repo := prinfo.PR.Base.Repo
|
||||
head := prinfo.PR.Head
|
||||
br := rs.Config.Branch
|
||||
if len(br) == 0 {
|
||||
br = prinfo.PR.Base.Name
|
||||
}
|
||||
|
||||
msg := fmt.Sprintf("Merge branch '%s' into %s", br, head.Name)
|
||||
if err := git.GitExec(repo.Name, "merge", "--no-ff", "--no-commit", "-X", "theirs", head.Sha); err != nil {
|
||||
if err := git.GitExec(repo.Name, "merge", "--no-ff", "--no-commit", "--allow-unrelated-histories", "-X", "theirs", head.Sha); err != nil {
|
||||
return false
|
||||
}
|
||||
LogError("WARNING: Merging unrelated histories")
|
||||
}
|
||||
|
||||
// ensure only files that are in head.Sha are kept
|
||||
git.GitExecOrPanic(repo.Name, "read-tree", "-m", head.Sha)
|
||||
git.GitExecOrPanic(repo.Name, "commit", "-m", msg)
|
||||
git.GitExecOrPanic(repo.Name, "clean", "-fxd")
|
||||
|
||||
if !IsDryRun {
|
||||
git.GitExecOrPanic(repo.Name, "push", remote, "HEAD:"+head.Name)
|
||||
prinfo.PR.Head.Sha = strings.TrimSpace(git.GitExecWithOutputOrPanic(repo.Name, "rev-list", "-1", "HEAD")) // need to update as it's pushed but pr not refetched
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (rs *PRSet) HasMerge(git Git, pr int) bool {
|
||||
prinfo := rs.PRs[pr]
|
||||
|
||||
repo := prinfo.PR.Base.Repo
|
||||
head := prinfo.PR.Head
|
||||
br := rs.Config.Branch
|
||||
if len(br) == 0 {
|
||||
br = prinfo.PR.Base.Name
|
||||
}
|
||||
|
||||
parents, err := git.GitExecWithOutput(repo.Name, "show", "-s", "--format=%P", head.Sha)
|
||||
if err == nil {
|
||||
p := strings.Fields(strings.TrimSpace(parents))
|
||||
if len(p) == 2 {
|
||||
targetHead, _ := git.GitExecWithOutput(repo.Name, "rev-parse", "HEAD")
|
||||
targetHead = strings.TrimSpace(targetHead)
|
||||
if p[0] == targetHead || p[1] == targetHead {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (rs *PRSet) PrepareForMerge(git Git) bool {
|
||||
// verify that package can merge here. Checkout current target branch of each PRSet, make a temporary branch
|
||||
// PR_#_mergetest and perform the merge based
|
||||
|
||||
if rs.Config.MergeMode == MergeModeDevel {
|
||||
return true // always can merge as we set branch here, not merge anything
|
||||
} else {
|
||||
// make sure that all the package PRs are in mergeable state
|
||||
for idx, prinfo := range rs.PRs {
|
||||
if rs.IsPrjGitPR(prinfo.PR) {
|
||||
continue
|
||||
}
|
||||
|
||||
repo := prinfo.PR.Base.Repo
|
||||
head := prinfo.PR.Head
|
||||
br := rs.Config.Branch
|
||||
if len(br) == 0 {
|
||||
br = prinfo.PR.Base.Name
|
||||
}
|
||||
|
||||
remote, err := git.GitClone(repo.Name, br, repo.SSHURL)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
git.GitExecOrPanic(repo.Name, "fetch", remote, head.Sha)
|
||||
switch rs.Config.MergeMode {
|
||||
case MergeModeFF:
|
||||
if err := git.GitExec(repo.Name, "merge-base", "--is-ancestor", "HEAD", head.Sha); err != nil {
|
||||
return false
|
||||
}
|
||||
case MergeModeReplace:
|
||||
Verify:
|
||||
if err := git.GitExec(repo.Name, "merge-base", "--is-ancestor", "HEAD", head.Sha); err != nil {
|
||||
if !rs.HasMerge(git, idx) {
|
||||
forkRemote, err := git.GitClone(repo.Name, head.Name, head.Repo.SSHURL)
|
||||
if err != nil {
|
||||
LogError("Failed to clone head repo:", head.Name, head.Repo.SSHURL)
|
||||
return false
|
||||
}
|
||||
LogDebug("Merge commit is missing and this is not FF merge possibility")
|
||||
git.GitExecOrPanic(repo.Name, "checkout", remote+"/"+br)
|
||||
if !rs.AddMergeCommit(git, forkRemote, idx) {
|
||||
return false
|
||||
}
|
||||
if !IsDryRun {
|
||||
goto Verify
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// now we check project git if mergeable
|
||||
prjgit_info, err := rs.GetPrjGitPR()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
prjgit := prjgit_info.PR
|
||||
|
||||
_, _, prjgitBranch := rs.Config.GetPrjGit()
|
||||
remote, err := git.GitClone(DefaultGitPrj, prjgitBranch, prjgit.Base.Repo.SSHURL)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
testBranch := fmt.Sprintf("PR_%d_mergetest", prjgit.Index)
|
||||
git.GitExecOrPanic(DefaultGitPrj, "fetch", remote, prjgit.Head.Sha)
|
||||
if err := git.GitExec(DefaultGitPrj, "checkout", "-B", testBranch, prjgit.Base.Sha); err != nil {
|
||||
return false
|
||||
}
|
||||
if err := git.GitExec(DefaultGitPrj, "merge", "--no-ff", "--no-commit", prjgit.Head.Sha); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
prjgit_info, err := rs.GetPrjGitPR()
|
||||
if err != nil {
|
||||
@@ -690,8 +829,12 @@ func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
}
|
||||
prinfo.RemoteName, err = git.GitClone(repo.Name, br, repo.SSHURL)
|
||||
PanicOnError(err)
|
||||
git.GitExecOrPanic(repo.Name, "fetch", prinfo.RemoteName, head.Sha)
|
||||
git.GitExecOrPanic(repo.Name, "merge", "--ff", head.Sha)
|
||||
if rs.Config.MergeMode == MergeModeDevel {
|
||||
git.GitExecOrPanic(repo.Name, "checkout", "-B", br, head.Sha)
|
||||
} else {
|
||||
git.GitExecOrPanic(repo.Name, "fetch", prinfo.RemoteName, head.Sha)
|
||||
git.GitExecOrPanic(repo.Name, "merge", "--ff", head.Sha)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -708,7 +851,12 @@ func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
repo := prinfo.PR.Base.Repo
|
||||
|
||||
if !IsDryRun {
|
||||
git.GitExecOrPanic(repo.Name, "push", prinfo.RemoteName)
|
||||
params := []string{"push"}
|
||||
if rs.Config.MergeMode == MergeModeDevel {
|
||||
params = append(params, "-f")
|
||||
}
|
||||
params = append(params, prinfo.RemoteName)
|
||||
git.GitExecOrPanic(repo.Name, params...)
|
||||
} else {
|
||||
LogInfo("*** WOULD push", repo.Name, "to", prinfo.RemoteName)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package common_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@@ -1228,7 +1229,7 @@ func TestPRMerge(t *testing.T) {
|
||||
Owner: &models.User{
|
||||
UserName: "org",
|
||||
},
|
||||
SSHURL: "file://" + path.Join(repoDir, "prjgit"),
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/prj.git",
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
@@ -1248,7 +1249,7 @@ func TestPRMerge(t *testing.T) {
|
||||
Owner: &models.User{
|
||||
UserName: "org",
|
||||
},
|
||||
SSHURL: "file://" + path.Join(cmd.Dir, "prjgit"),
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/prj.git",
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
@@ -1338,3 +1339,346 @@ func TestPRChanges(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPRPrepareForMerge(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(*mock_common.MockGit, *models.PullRequest, *models.PullRequest)
|
||||
config *common.AutogitConfig
|
||||
expected bool
|
||||
editable bool
|
||||
}{
|
||||
{
|
||||
name: "Success Devel",
|
||||
config: &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeDevel,
|
||||
},
|
||||
setup: func(m *mock_common.MockGit, prjPR, pkgPR *models.PullRequest) {},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Success FF",
|
||||
config: &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeFF,
|
||||
},
|
||||
setup: func(m *mock_common.MockGit, prjPR, pkgPR *models.PullRequest) {
|
||||
m.EXPECT().GitClone("pkg", "master", pkgPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "fetch", "origin", pkgPR.Head.Sha)
|
||||
m.EXPECT().GitExec("pkg", "merge-base", "--is-ancestor", "HEAD", pkgPR.Head.Sha).Return(nil)
|
||||
|
||||
m.EXPECT().GitClone("_ObsPrj", "master", prjPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("_ObsPrj", "fetch", "origin", prjPR.Head.Sha)
|
||||
m.EXPECT().GitExec("_ObsPrj", "checkout", "-B", "PR_1_mergetest", prjPR.Base.Sha).Return(nil)
|
||||
m.EXPECT().GitExec("_ObsPrj", "merge", "--no-ff", "--no-commit", prjPR.Head.Sha).Return(nil)
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Success Replace MergeCommit",
|
||||
config: &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeReplace,
|
||||
},
|
||||
setup: func(m *mock_common.MockGit, prjPR, pkgPR *models.PullRequest) {
|
||||
m.EXPECT().GitClone("pkg", "master", pkgPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "fetch", "origin", pkgPR.Head.Sha)
|
||||
// merge-base fails initially
|
||||
m.EXPECT().GitExec("pkg", "merge-base", "--is-ancestor", "HEAD", pkgPR.Head.Sha).Return(fmt.Errorf("not ancestor"))
|
||||
// HasMerge returns true
|
||||
m.EXPECT().GitExecWithOutput("pkg", "show", "-s", "--format=%P", pkgPR.Head.Sha).Return("parent1 target_head", nil)
|
||||
m.EXPECT().GitExecWithOutput("pkg", "rev-parse", "HEAD").Return("target_head", nil)
|
||||
|
||||
m.EXPECT().GitClone("_ObsPrj", "master", prjPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("_ObsPrj", "fetch", "origin", prjPR.Head.Sha)
|
||||
m.EXPECT().GitExec("_ObsPrj", "checkout", "-B", "PR_1_mergetest", prjPR.Base.Sha).Return(nil)
|
||||
m.EXPECT().GitExec("_ObsPrj", "merge", "--no-ff", "--no-commit", prjPR.Head.Sha).Return(nil)
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Merge Conflict in PrjGit",
|
||||
config: &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeFF,
|
||||
},
|
||||
setup: func(m *mock_common.MockGit, prjPR, pkgPR *models.PullRequest) {
|
||||
m.EXPECT().GitClone("pkg", "master", pkgPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "fetch", "origin", pkgPR.Head.Sha)
|
||||
m.EXPECT().GitExec("pkg", "merge-base", "--is-ancestor", "HEAD", pkgPR.Head.Sha).Return(nil)
|
||||
|
||||
m.EXPECT().GitClone("_ObsPrj", "master", prjPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("_ObsPrj", "fetch", "origin", prjPR.Head.Sha)
|
||||
m.EXPECT().GitExec("_ObsPrj", "checkout", "-B", "PR_1_mergetest", prjPR.Base.Sha).Return(nil)
|
||||
m.EXPECT().GitExec("_ObsPrj", "merge", "--no-ff", "--no-commit", prjPR.Head.Sha).Return(fmt.Errorf("conflict"))
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Not FF in PkgGit",
|
||||
config: &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeFF,
|
||||
},
|
||||
setup: func(m *mock_common.MockGit, prjPR, pkgPR *models.PullRequest) {
|
||||
m.EXPECT().GitClone("pkg", "master", pkgPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "fetch", "origin", pkgPR.Head.Sha)
|
||||
m.EXPECT().GitExec("pkg", "merge-base", "--is-ancestor", "HEAD", pkgPR.Head.Sha).Return(fmt.Errorf("not ancestor"))
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Success Replace with AddMergeCommit",
|
||||
config: &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeReplace,
|
||||
},
|
||||
editable: true,
|
||||
setup: func(m *mock_common.MockGit, prjPR, pkgPR *models.PullRequest) {
|
||||
m.EXPECT().GitClone("pkg", "master", pkgPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "fetch", "origin", pkgPR.Head.Sha)
|
||||
// First merge-base fails
|
||||
m.EXPECT().GitExec("pkg", "merge-base", "--is-ancestor", "HEAD", pkgPR.Head.Sha).Return(fmt.Errorf("not ancestor"))
|
||||
// HasMerge returns false
|
||||
m.EXPECT().GitExecWithOutput("pkg", "show", "-s", "--format=%P", pkgPR.Head.Sha).Return("parent1", nil)
|
||||
m.EXPECT().GitClone("pkg", pkgPR.Head.Name, pkgPR.Base.Repo.SSHURL).Return("origin_fork", nil)
|
||||
// AddMergeCommit is called
|
||||
m.EXPECT().GitExecOrPanic("pkg", "checkout", "origin/master")
|
||||
m.EXPECT().GitExec("pkg", "merge", "--no-ff", "--no-commit", "-X", "theirs", pkgPR.Head.Sha).Return(nil)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "read-tree", "-m", pkgPR.Head.Sha)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "commit", "-m", gomock.Any())
|
||||
m.EXPECT().GitExecOrPanic("pkg", "clean", "-fxd")
|
||||
m.EXPECT().GitExecOrPanic("pkg", "push", "origin_fork", "HEAD:"+pkgPR.Head.Name)
|
||||
m.EXPECT().GitExecWithOutputOrPanic("pkg", "rev-list", "-1", "HEAD").Return("new_pkg_head_sha")
|
||||
// Second merge-base succeeds (after goto Verify)
|
||||
m.EXPECT().GitExec("pkg", "merge-base", "--is-ancestor", "HEAD", "new_pkg_head_sha").Return(nil)
|
||||
|
||||
m.EXPECT().GitClone("_ObsPrj", "master", prjPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("_ObsPrj", "fetch", "origin", prjPR.Head.Sha)
|
||||
m.EXPECT().GitExec("_ObsPrj", "checkout", "-B", "PR_1_mergetest", prjPR.Base.Sha).Return(nil)
|
||||
m.EXPECT().GitExec("_ObsPrj", "merge", "--no-ff", "--no-commit", prjPR.Head.Sha).Return(nil)
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
prjPR := &models.PullRequest{
|
||||
Index: 1,
|
||||
Base: &models.PRBranchInfo{
|
||||
Name: "master",
|
||||
Sha: "base_sha",
|
||||
Repo: &models.Repository{
|
||||
Owner: &models.User{UserName: "org"},
|
||||
Name: "_ObsPrj",
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/_ObsPrj.git",
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
Sha: "head_sha",
|
||||
Repo: &models.Repository{
|
||||
Owner: &models.User{UserName: "org"},
|
||||
Name: "_ObsPrj",
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/_ObsPrj.git",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
pkgPR := &models.PullRequest{
|
||||
Index: 2,
|
||||
Base: &models.PRBranchInfo{
|
||||
Name: "master",
|
||||
Sha: "pkg_base_sha",
|
||||
Repo: &models.Repository{
|
||||
Owner: &models.User{UserName: "org"},
|
||||
Name: "pkg",
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/pkg.git",
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
Name: "branch_name",
|
||||
Sha: "pkg_head_sha",
|
||||
Repo: &models.Repository{
|
||||
Owner: &models.User{UserName: "org"},
|
||||
Name: "pkg",
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/pkg.git",
|
||||
},
|
||||
},
|
||||
AllowMaintainerEdit: test.editable,
|
||||
}
|
||||
|
||||
ctl := gomock.NewController(t)
|
||||
git := mock_common.NewMockGit(ctl)
|
||||
test.setup(git, prjPR, pkgPR)
|
||||
|
||||
prset := &common.PRSet{
|
||||
Config: test.config,
|
||||
PRs: []*common.PRInfo{
|
||||
{PR: prjPR},
|
||||
{PR: pkgPR},
|
||||
},
|
||||
}
|
||||
|
||||
if res := prset.PrepareForMerge(git); res != test.expected {
|
||||
t.Errorf("Expected %v, got %v", test.expected, res)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPRMergeMock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
setup func(*mock_common.MockGit, *models.PullRequest, *models.PullRequest)
|
||||
config *common.AutogitConfig
|
||||
}{
|
||||
{
|
||||
name: "Success FF",
|
||||
config: &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeFF,
|
||||
},
|
||||
setup: func(m *mock_common.MockGit, prjPR, pkgPR *models.PullRequest) {
|
||||
m.EXPECT().GitClone("_ObsPrj", "master", prjPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("_ObsPrj", "fetch", "origin", prjPR.Head.Sha)
|
||||
m.EXPECT().GitExec("_ObsPrj", "merge", "--no-ff", "-m", gomock.Any(), prjPR.Head.Sha).Return(nil)
|
||||
|
||||
m.EXPECT().GitClone("pkg", "master", pkgPR.Base.Repo.SSHURL).Return("origin_pkg", nil)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "fetch", "origin_pkg", pkgPR.Head.Sha)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "merge", "--ff", pkgPR.Head.Sha)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "push", "origin_pkg")
|
||||
m.EXPECT().GitExecOrPanic("_ObsPrj", "push", "origin")
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Success Devel",
|
||||
config: &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeDevel,
|
||||
},
|
||||
setup: func(m *mock_common.MockGit, prjPR, pkgPR *models.PullRequest) {
|
||||
m.EXPECT().GitClone("_ObsPrj", "master", prjPR.Base.Repo.SSHURL).Return("origin", nil)
|
||||
m.EXPECT().GitExecOrPanic("_ObsPrj", "fetch", "origin", prjPR.Head.Sha)
|
||||
m.EXPECT().GitExec("_ObsPrj", "merge", "--no-ff", "-m", gomock.Any(), prjPR.Head.Sha).Return(nil)
|
||||
|
||||
m.EXPECT().GitClone("pkg", "master", pkgPR.Base.Repo.SSHURL).Return("origin_pkg", nil)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "checkout", "-B", "master", pkgPR.Head.Sha)
|
||||
m.EXPECT().GitExecOrPanic("pkg", "push", "-f", "origin_pkg")
|
||||
m.EXPECT().GitExecOrPanic("_ObsPrj", "push", "origin")
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
prjPR := &models.PullRequest{
|
||||
Index: 1,
|
||||
Base: &models.PRBranchInfo{
|
||||
Name: "master",
|
||||
Sha: "prj_base_sha",
|
||||
Repo: &models.Repository{
|
||||
Owner: &models.User{UserName: "org"},
|
||||
Name: "_ObsPrj",
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/_ObsPrj.git",
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
Sha: "prj_head_sha",
|
||||
},
|
||||
}
|
||||
pkgPR := &models.PullRequest{
|
||||
Index: 2,
|
||||
Base: &models.PRBranchInfo{
|
||||
Name: "master",
|
||||
Sha: "pkg_base_sha",
|
||||
Repo: &models.Repository{
|
||||
Owner: &models.User{UserName: "org"},
|
||||
Name: "pkg",
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/pkg.git",
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
Sha: "pkg_head_sha",
|
||||
},
|
||||
}
|
||||
|
||||
ctl := gomock.NewController(t)
|
||||
git := mock_common.NewMockGit(ctl)
|
||||
reviewUnrequestMock := mock_common.NewMockGiteaReviewUnrequester(ctl)
|
||||
reviewUnrequestMock.EXPECT().UnrequestReview(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().Return(nil)
|
||||
|
||||
test.setup(git, prjPR, pkgPR)
|
||||
|
||||
prset := &common.PRSet{
|
||||
Config: test.config,
|
||||
PRs: []*common.PRInfo{
|
||||
{PR: prjPR},
|
||||
{PR: pkgPR},
|
||||
},
|
||||
}
|
||||
|
||||
if err := prset.Merge(reviewUnrequestMock, git); err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPRAddMergeCommit(t *testing.T) {
|
||||
pkgPR := &models.PullRequest{
|
||||
Index: 2,
|
||||
Base: &models.PRBranchInfo{
|
||||
Name: "master",
|
||||
Sha: "pkg_base_sha",
|
||||
Repo: &models.Repository{
|
||||
Owner: &models.User{UserName: "org"},
|
||||
Name: "pkg",
|
||||
SSHURL: "ssh://git@src.opensuse.org/org/pkg.git",
|
||||
},
|
||||
},
|
||||
Head: &models.PRBranchInfo{
|
||||
Name: "branch_name",
|
||||
Sha: "pkg_head_sha",
|
||||
},
|
||||
AllowMaintainerEdit: true,
|
||||
}
|
||||
|
||||
config := &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/_ObsPrj#master",
|
||||
MergeMode: common.MergeModeReplace,
|
||||
}
|
||||
|
||||
ctl := gomock.NewController(t)
|
||||
git := mock_common.NewMockGit(ctl)
|
||||
|
||||
git.EXPECT().GitExec("pkg", "merge", "--no-ff", "--no-commit", "-X", "theirs", pkgPR.Head.Sha).Return(nil)
|
||||
git.EXPECT().GitExecOrPanic("pkg", "read-tree", "-m", pkgPR.Head.Sha)
|
||||
git.EXPECT().GitExecOrPanic("pkg", "commit", "-m", gomock.Any())
|
||||
git.EXPECT().GitExecOrPanic("pkg", "clean", "-fxd")
|
||||
git.EXPECT().GitExecOrPanic("pkg", "push", "origin", "HEAD:branch_name")
|
||||
git.EXPECT().GitExecWithOutputOrPanic("pkg", "rev-list", "-1", "HEAD").Return("new_head_sha")
|
||||
|
||||
prset := &common.PRSet{
|
||||
Config: config,
|
||||
PRs: []*common.PRInfo{
|
||||
{PR: &models.PullRequest{}}, // prjgit at index 0
|
||||
{PR: pkgPR}, // pkg at index 1
|
||||
},
|
||||
}
|
||||
|
||||
if res := prset.AddMergeCommit(git, "origin", 1); !res {
|
||||
t.Errorf("Expected true, got %v", res)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ devel:languages:lua
|
||||
devel:languages:nodejs
|
||||
devel:languages:perl
|
||||
devel:languages:python:Factory
|
||||
devel:languages:python:mailman
|
||||
devel:languages:python:pytest
|
||||
devel:openSUSE:Factory
|
||||
network:chromium
|
||||
|
||||
11
integration/Dockerfile
Normal file
11
integration/Dockerfile
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
FROM opensuse/tumbleweed
|
||||
ENV container=podman
|
||||
|
||||
ENV LANG=en_US.UTF-8
|
||||
|
||||
RUN zypper -vvvn install podman podman-compose vim make python3-pytest python3-requests python3-pytest-dependency
|
||||
|
||||
COPY . /opt/project/
|
||||
|
||||
WORKDIR /opt/project/integration
|
||||
76
integration/Makefile
Normal file
76
integration/Makefile
Normal file
@@ -0,0 +1,76 @@
|
||||
# We want to be able to test in two **modes**:
|
||||
# A. bots are used from official packages as defined in */Dockerfile.package
|
||||
# B. bots are just picked up from binaries that are placed in corresponding parent directory.
|
||||
|
||||
# The topology is defined in podman-compose file and can be spawned in two ways:
|
||||
# 1. Privileged container (needs no additional dependancies)
|
||||
# 2. podman-compose on a local machine (needs dependencies as defined in the Dockerfile)
|
||||
|
||||
|
||||
# Typical workflow:
|
||||
# A1: - run 'make test_package'
|
||||
# B1: - run 'make test_local' (make sure that the go binaries in parent folder are built)
|
||||
# A2:
|
||||
# 1. 'make build_package' - prepares images (recommended, otherwise there might be surprises if image fails to build during `make up`)
|
||||
# 2. 'make up' - spawns podman-compose
|
||||
# 3. 'pytest -v tests/*' - run tests
|
||||
# 4. 'make down' - once the containers are not needed
|
||||
# B2: (make sure the go binaries in the parent folder are built)
|
||||
# 4. 'make build_local' - prepared images (recommended, otherwise there might be surprises if image fails to build during `make up`)
|
||||
# 5. 'make up' - spawns podman-compose
|
||||
# 6. 'pytest -v tests/*' - run tests
|
||||
# 7. 'make down' - once the containers are not needed
|
||||
|
||||
|
||||
AUTO_DETECT_MODE := $(shell if test -e ../workflow-pr/workflow-pr; then echo .local; else echo .package; fi)
|
||||
|
||||
# try to detect mode B1, otherwise mode A1
|
||||
test: GIWTF_IMAGE_SUFFIX=$(AUTO_DETECT_MODE)
|
||||
test: build_container test_container
|
||||
|
||||
# mode A1
|
||||
test_package: GIWTF_IMAGE_SUFFIX=.package
|
||||
test_package: build_container test_container
|
||||
|
||||
# mode B1
|
||||
test_local: GIWTF_IMAGE_SUFFIX=.local
|
||||
test_local: build_container test_container
|
||||
|
||||
MODULES := gitea-events-rabbitmq-publisher obs-staging-bot workflow-pr
|
||||
|
||||
# Prepare topology 1
|
||||
build_container:
|
||||
podman build ../ -f integration/Dockerfile -t autogits_integration
|
||||
|
||||
# Run tests in topology 1
|
||||
test_container:
|
||||
podman run --rm --privileged -t --network integration_gitea-network -e GIWTF_IMAGE_SUFFIX=$(GIWTF_IMAGE_SUFFIX) autogits_integration /usr/bin/bash -c "make build && make up && sleep 25 && pytest -v tests/*"
|
||||
|
||||
|
||||
build_local: AUTO_DETECT_MODE=.local
|
||||
build_local: build
|
||||
|
||||
build_package: AUTO_DETECT_MODE=.package
|
||||
build_package: build
|
||||
|
||||
# parse all service images from podman-compose and build them (topology 2)
|
||||
build:
|
||||
podman pull docker.io/library/rabbitmq:3.13.7-management
|
||||
for i in $$(grep -A 1000 services: podman-compose.yml | grep -oE '^ [^: ]+'); do GIWTF_IMAGE_SUFFIX=$(AUTO_DETECT_MODE) podman-compose build $$i || exit 1; done
|
||||
|
||||
# this will spawn prebuilt containers (topology 2)
|
||||
up:
|
||||
podman-compose up -d
|
||||
|
||||
# tear down (topology 2)
|
||||
down:
|
||||
podman-compose down
|
||||
|
||||
# mode A
|
||||
up-bots-package:
|
||||
GIWTF_IMAGE_SUFFIX=.package podman-compose up -d
|
||||
|
||||
# mode B
|
||||
up-bots-local:
|
||||
GIWTF_IMAGE_SUFFIX=.local podman-compose up -d
|
||||
|
||||
1
integration/clean.sh
Executable file
1
integration/clean.sh
Executable file
@@ -0,0 +1 @@
|
||||
sudo rm -rf gitea-data/ gitea-logs/ rabbitmq-data/ workflow-pr-repos/
|
||||
1
integration/gitea-events-rabbitmq-publisher/Dockerfile
Symbolic link
1
integration/gitea-events-rabbitmq-publisher/Dockerfile
Symbolic link
@@ -0,0 +1 @@
|
||||
Dockerfile.package
|
||||
15
integration/gitea-events-rabbitmq-publisher/Dockerfile.local
Normal file
15
integration/gitea-events-rabbitmq-publisher/Dockerfile.local
Normal file
@@ -0,0 +1,15 @@
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Add the custom CA to the trust store
|
||||
COPY integration/rabbitmq-config/certs/cert.pem /usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
RUN zypper -n in which binutils
|
||||
|
||||
# Copy the pre-built binary into the container
|
||||
# The user will build this and place it in the same directory as this Dockerfile
|
||||
COPY gitea-events-rabbitmq-publisher/gitea-events-rabbitmq-publisher /usr/local/bin/
|
||||
COPY integration/gitea-events-rabbitmq-publisher/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
@@ -0,0 +1,15 @@
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Add the custom CA to the trust store
|
||||
COPY integration/rabbitmq-config/certs/cert.pem /usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
RUN zypper ar -f http://download.opensuse.org/repositories/devel:/Factory:/git-workflow/15.7/devel:Factory:git-workflow.repo
|
||||
RUN zypper --gpg-auto-import-keys ref
|
||||
|
||||
RUN zypper -n in git-core curl autogits-gitea-events-rabbitmq-publisher binutils
|
||||
|
||||
COPY integration/gitea-events-rabbitmq-publisher/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
13
integration/gitea-events-rabbitmq-publisher/entrypoint.sh
Normal file
13
integration/gitea-events-rabbitmq-publisher/entrypoint.sh
Normal file
@@ -0,0 +1,13 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
exe=$(which gitea-events-rabbitmq-publisher 2>/dev/null) || :
|
||||
exe=${exe:-/usr/local/bin/gitea-events-rabbitmq-publisher}
|
||||
|
||||
package=$(rpm -qa | grep autogits-gitea-events-rabbitmq-publisher) || :
|
||||
|
||||
echo "!!!!!!!!!!!!!!!! using binary $exe; installed package: $package"
|
||||
which strings > /dev/null 2>&1 && strings "$exe" | grep -A 2 vcs.revision= | head -4 || :
|
||||
echo "RABBITMQ_HOST: $RABBITMQ_HOST"
|
||||
|
||||
exec $exe "$@"
|
||||
25
integration/gitea/Dockerfile
Normal file
25
integration/gitea/Dockerfile
Normal file
@@ -0,0 +1,25 @@
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
RUN zypper ar --repo https://download.opensuse.org/repositories/devel:/Factory:/git-workflow/15.7/devel:Factory:git-workflow.repo \
|
||||
&& zypper -n --gpg-auto-import-keys refresh
|
||||
|
||||
RUN zypper -n install \
|
||||
git \
|
||||
sqlite3 \
|
||||
curl \
|
||||
gawk \
|
||||
openssh \
|
||||
jq \
|
||||
devel_Factory_git-workflow:gitea \
|
||||
&& rm -rf /var/cache/zypp/*
|
||||
|
||||
# Copy the minimal set of required files from the local 'container-files' directory
|
||||
COPY container-files/ /
|
||||
|
||||
RUN chmod -R 777 /etc/gitea/conf
|
||||
|
||||
# Make the setup and entrypoint scripts executable
|
||||
RUN chmod +x /opt/setup/setup-gitea.sh && chmod +x /opt/setup/entrypoint.sh && chmod +x /opt/setup/setup-webhook.sh && chmod +x /opt/setup/setup-dummy-data.sh
|
||||
|
||||
# Use the new entrypoint script to start the container
|
||||
ENTRYPOINT ["/opt/setup/entrypoint.sh"]
|
||||
42
integration/gitea/container-files/etc/gitea/conf/app.ini
Normal file
42
integration/gitea/container-files/etc/gitea/conf/app.ini
Normal file
@@ -0,0 +1,42 @@
|
||||
WORK_PATH = /var/lib/gitea
|
||||
|
||||
[server]
|
||||
CERT_FILE = /etc/gitea/https/cert.pem
|
||||
KEY_FILE = /etc/gitea/https/key.pem
|
||||
STATIC_ROOT_PATH = /usr/share/gitea
|
||||
APP_DATA_PATH = /var/lib/gitea/data
|
||||
PPROF_DATA_PATH = /var/lib/gitea/data/tmp/pprof
|
||||
PROTOCOL = http
|
||||
DOMAIN = gitea-test
|
||||
SSH_DOMAIN = gitea-test
|
||||
ROOT_URL = http://gitea-test:3000/
|
||||
HTTP_PORT = 3000
|
||||
DISABLE_SSH = false
|
||||
START_SSH_SERVER = true
|
||||
SSH_PORT = 3022
|
||||
LFS_START_SERVER = true
|
||||
|
||||
[lfs]
|
||||
PATH = /var/lib/gitea/data/lfs
|
||||
|
||||
[database]
|
||||
DB_TYPE = sqlite3
|
||||
PATH = /var/lib/gitea/data/gitea.db
|
||||
|
||||
[security]
|
||||
INSTALL_LOCK = true
|
||||
|
||||
[oauth2]
|
||||
ENABLED = false
|
||||
|
||||
[log]
|
||||
ROOT_PATH = /var/log/gitea
|
||||
MODE = console, file
|
||||
; Either "Trace", "Debug", "Info", "Warn", "Error" or "None", default is "Info"
|
||||
LEVEL = Debug
|
||||
|
||||
[service]
|
||||
ENABLE_BASIC_AUTHENTICATION = true
|
||||
|
||||
[webhook]
|
||||
ALLOWED_HOST_LIST = gitea-publisher
|
||||
19
integration/gitea/container-files/opt/setup/entrypoint.sh
Normal file
19
integration/gitea/container-files/opt/setup/entrypoint.sh
Normal file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Run setup to ensure permissions, migrations, and the admin user are ready.
|
||||
# The setup script is now idempotent.
|
||||
/opt/setup/setup-gitea.sh
|
||||
|
||||
# Start the webhook setup script in the background.
|
||||
# It will wait for the main Gitea process to be ready before creating the webhook.
|
||||
/opt/setup/setup-webhook.sh &
|
||||
|
||||
echo "Starting Gitea..."
|
||||
|
||||
# The original systemd service ran as user 'gitea' and group 'gitea'
|
||||
# with a working directory of '/var/lib/gitea'.
|
||||
# We will switch to that user and run the web command.
|
||||
# Using exec means Gitea will become PID 1, allowing it to receive signals correctly.
|
||||
cd /var/lib/gitea
|
||||
exec su -s /bin/bash gitea -c "/usr/bin/gitea web --config /etc/gitea/conf/app.ini"
|
||||
@@ -0,0 +1,2 @@
|
||||
#!/bin/bash
|
||||
# This script is now empty as dummy data setup is handled by pytest fixtures.
|
||||
100
integration/gitea/container-files/opt/setup/setup-gitea.sh
Normal file
100
integration/gitea/container-files/opt/setup/setup-gitea.sh
Normal file
@@ -0,0 +1,100 @@
|
||||
#!/bin/bash
|
||||
set -x
|
||||
set -e
|
||||
|
||||
# Set ownership on the volume mounts. This allows the 'gitea' user to write to them.
|
||||
# We use -R to ensure all subdirectories (like /var/lib/gitea/data) are covered.
|
||||
chown -R gitea:gitea /var/lib/gitea /var/log/gitea
|
||||
|
||||
# Set ownership on the config directory.
|
||||
chown -R gitea:gitea /etc/gitea
|
||||
|
||||
# Run database migrations to initialize the sqlite3 db based on app.ini.
|
||||
su -s /bin/bash gitea -c 'gitea migrate'
|
||||
|
||||
# Create a default admin user if it doesn't exist
|
||||
if ! su -s /bin/bash gitea -c 'gitea admin user list' | awk 'NR>1 && $2 == "admin" {found=1} END {exit !found}'; then
|
||||
echo "Creating admin user..."
|
||||
su -s /bin/bash gitea -c 'gitea admin user create --username admin --password opensuse --email admin@example.com --must-change-password=false --admin'
|
||||
else
|
||||
echo "Admin user already exists."
|
||||
fi
|
||||
|
||||
# Generate an access token for the admin user
|
||||
ADMIN_TOKEN_FILE="/var/lib/gitea/admin.token"
|
||||
if [ -f "$ADMIN_TOKEN_FILE" ]; then
|
||||
echo "Admin token already exists at $ADMIN_TOKEN_FILE."
|
||||
else
|
||||
echo "Generating admin token..."
|
||||
ADMIN_TOKEN=$(su -s /bin/bash gitea -c "gitea admin user generate-access-token -raw -u admin -t admin-token")
|
||||
if [ -n "$ADMIN_TOKEN" ]; then
|
||||
printf "%s" "$ADMIN_TOKEN" > "$ADMIN_TOKEN_FILE"
|
||||
chmod 777 "$ADMIN_TOKEN_FILE"
|
||||
chown gitea:gitea "$ADMIN_TOKEN_FILE"
|
||||
echo "Admin token generated and saved to $ADMIN_TOKEN_FILE."
|
||||
else
|
||||
echo "Failed to generate admin token."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Generate SSH key for the admin user if it doesn't exist
|
||||
SSH_KEY_DIR="/var/lib/gitea/ssh-keys"
|
||||
mkdir -p "$SSH_KEY_DIR"
|
||||
if [ ! -f "$SSH_KEY_DIR/id_ed25519" ]; then
|
||||
echo "Generating SSH key for admin user..."
|
||||
ssh-keygen -t ed25519 -N "" -f "$SSH_KEY_DIR/id_ed25519"
|
||||
chown -R gitea:gitea "$SSH_KEY_DIR"
|
||||
chmod 700 "$SSH_KEY_DIR"
|
||||
chmod 600 "$SSH_KEY_DIR/id_ed25519"
|
||||
chmod 644 "$SSH_KEY_DIR/id_ed25519.pub"
|
||||
fi
|
||||
|
||||
# Create a autogits_obs_staging_bot user if it doesn't exist
|
||||
if ! su -s /bin/bash gitea -c 'gitea admin user list' | awk 'NR>1 && $2 == "autogits_obs_staging_bot" {found=1} END {exit !found}'; then
|
||||
echo "Creating autogits_obs_staging_bot user..."
|
||||
su -s /bin/bash gitea -c 'gitea admin user create --username autogits_obs_staging_bot --password opensuse --email autogits_obs_staging_bot@example.com --must-change-password=false'
|
||||
else
|
||||
echo "autogits_obs_staging_bot user already exists."
|
||||
fi
|
||||
|
||||
# Generate an access token for the autogits_obs_staging_bot user
|
||||
BOT_TOKEN_FILE="/var/lib/gitea/autogits_obs_staging_bot.token"
|
||||
if [ -f "$BOT_TOKEN_FILE" ]; then
|
||||
echo "autogits_obs_staging_bot token already exists at $BOT_TOKEN_FILE."
|
||||
else
|
||||
echo "Generating autogits_obs_staging_bot token..."
|
||||
BOT_TOKEN=$(su -s /bin/bash gitea -c "gitea admin user generate-access-token -raw -u autogits_obs_staging_bot -t autogits_obs_staging_bot-token")
|
||||
if [ -n "$BOT_TOKEN" ]; then
|
||||
printf "%s" "$BOT_TOKEN" > "$BOT_TOKEN_FILE"
|
||||
chmod 666 "$BOT_TOKEN_FILE"
|
||||
chown gitea:gitea "$BOT_TOKEN_FILE"
|
||||
echo "autogits_obs_staging_bot token generated and saved to $BOT_TOKEN_FILE."
|
||||
else
|
||||
echo "Failed to generate autogits_obs_staging_bot token."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create a workflow-pr user if it doesn't exist
|
||||
if ! su -s /bin/bash gitea -c 'gitea admin user list' | awk 'NR>1 && $2 == "workflow-pr" {found=1} END {exit !found}'; then
|
||||
echo "Creating workflow-pr user..."
|
||||
su -s /bin/bash gitea -c 'gitea admin user create --username workflow-pr --password opensuse --email workflow-pr@example.com --must-change-password=false'
|
||||
else
|
||||
echo "workflow-pr user already exists."
|
||||
fi
|
||||
|
||||
# Generate an access token for the workflow-pr user
|
||||
BOT_TOKEN_FILE="/var/lib/gitea/workflow-pr.token"
|
||||
if [ -f "$BOT_TOKEN_FILE" ]; then
|
||||
echo "workflow-pr token already exists at $BOT_TOKEN_FILE."
|
||||
else
|
||||
echo "Generating workflow-pr token..."
|
||||
BOT_TOKEN=$(su -s /bin/bash gitea -c "gitea admin user generate-access-token -raw -u workflow-pr -t workflow-pr-token")
|
||||
if [ -n "$BOT_TOKEN" ]; then
|
||||
printf "%s" "$BOT_TOKEN" > "$BOT_TOKEN_FILE"
|
||||
chmod 666 "$BOT_TOKEN_FILE"
|
||||
chown gitea:gitea "$BOT_TOKEN_FILE"
|
||||
echo "workflow-pr token generated and saved to $BOT_TOKEN_FILE."
|
||||
else
|
||||
echo "Failed to generate workflow-pr token."
|
||||
fi
|
||||
fi
|
||||
92
integration/gitea/container-files/opt/setup/setup-webhook.sh
Normal file
92
integration/gitea/container-files/opt/setup/setup-webhook.sh
Normal file
@@ -0,0 +1,92 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
GITEA_URL="http://localhost:3000"
|
||||
WEBHOOK_URL="http://gitea-publisher:8002/rabbitmq-forwarder"
|
||||
TOKEN_NAME="webhook-creator"
|
||||
|
||||
echo "Webhook setup script started in background."
|
||||
|
||||
# Wait 10s for the main Gitea process to start
|
||||
sleep 10
|
||||
|
||||
# Wait for Gitea API to be ready
|
||||
echo "Waiting for Gitea API at $GITEA_URL..."
|
||||
while ! curl -s -f "$GITEA_URL/api/v1/version" > /dev/null; do
|
||||
echo "Gitea API not up yet, waiting 5s..."
|
||||
sleep 5
|
||||
done
|
||||
echo "Gitea API is up."
|
||||
|
||||
# The `gitea admin` command needs to be run as the gitea user.
|
||||
# The -raw flag gives us the token directly.
|
||||
echo "Generating or retrieving admin token..."
|
||||
TOKEN_FILE="/var/lib/gitea/admin.token"
|
||||
|
||||
if [ -f "$TOKEN_FILE" ]; then
|
||||
TOKEN=$(cat "$TOKEN_FILE" | tr -d '\n\r ')
|
||||
echo "Admin token loaded from $TOKEN_FILE."
|
||||
else
|
||||
TOKEN=$(su -s /bin/bash gitea -c "gitea admin user generate-access-token -raw -u admin -t $TOKEN_NAME")
|
||||
if [ -n "$TOKEN" ]; then
|
||||
printf "%s" "$TOKEN" > "$TOKEN_FILE"
|
||||
chmod 666 "$TOKEN_FILE"
|
||||
chown gitea:gitea "$TOKEN_FILE"
|
||||
echo "Admin token generated and saved to $TOKEN_FILE."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$TOKEN" ]; then
|
||||
echo "Failed to generate or retrieve admin token. This might be because the token already exists in Gitea but not in $TOKEN_FILE. Exiting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Run the dummy data setup script
|
||||
/opt/setup/setup-dummy-data.sh "$GITEA_URL" "$TOKEN"
|
||||
|
||||
# Add SSH key via API
|
||||
PUB_KEY_FILE="/var/lib/gitea/ssh-keys/id_ed25519.pub"
|
||||
if [ -f "$PUB_KEY_FILE" ]; then
|
||||
echo "Checking for existing SSH key 'bot-key'..."
|
||||
KEYS_URL="$GITEA_URL/api/v1/admin/users/workflow-pr/keys"
|
||||
EXISTING_KEYS=$(curl -s -X GET -H "Authorization: token $TOKEN" "$KEYS_URL")
|
||||
|
||||
if ! echo "$EXISTING_KEYS" | grep -q "\"title\":\"bot-key\""; then
|
||||
echo "Registering SSH key 'bot-key' via API..."
|
||||
KEY_CONTENT=$(cat "$PUB_KEY_FILE")
|
||||
curl -s -X POST "$KEYS_URL" \
|
||||
-H "Authorization: token $TOKEN" \
|
||||
-H "Content-Type: application/json" \
|
||||
-d "{
|
||||
\"key\": \"$KEY_CONTENT\",
|
||||
\"read_only\": false,
|
||||
\"title\": \"bot-key\"
|
||||
}"
|
||||
echo -e "\nSSH key registered."
|
||||
else
|
||||
echo "SSH key 'bot-key' already registered."
|
||||
fi
|
||||
fi
|
||||
|
||||
# Check if the webhook already exists
|
||||
echo "Checking for existing system webhook..."
|
||||
DB_PATH="/var/lib/gitea/data/gitea.db"
|
||||
EXISTS=$(su -s /bin/bash gitea -c "sqlite3 '$DB_PATH' \"SELECT 1 FROM webhook WHERE url = '$WEBHOOK_URL' AND is_system_webhook = 1 LIMIT 1;\"")
|
||||
|
||||
if [ "$EXISTS" = "1" ]; then
|
||||
echo "System webhook for $WEBHOOK_URL already exists. Exiting."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "Creating Gitea system webhook for $WEBHOOK_URL via direct database INSERT..."
|
||||
# The events JSON requires escaped double quotes for the sqlite3 command.
|
||||
EVENTS_JSON='{\"push_only\":false,\"send_everything\":true,\"choose_events\":false,\"branch_filter\":\"*\",\"events\":{\"create\":false,\"delete\":false,\"fork\":false,\"issue_assign\":false,\"issue_comment\":false,\"issue_label\":false,\"issue_milestone\":false,\"issues\":false,\"package\":false,\"pull_request\":false,\"pull_request_assign\":false,\"pull_request_comment\":false,\"pull_request_label\":false,\"pull_request_milestone\":false,\"pull_request_review\":false,\"pull_request_review_request\":false,\"pull_request_sync\":false,\"push\":false,\"release\":false,\"repository\":false,\"status\":false,\"wiki\":false,\"workflow_job\":false,\"workflow_run\":false}}'
|
||||
NOW_UNIX=$(date +%s)
|
||||
|
||||
INSERT_CMD="INSERT INTO webhook (repo_id, owner_id, is_system_webhook, url, http_method, content_type, events, is_active, type, meta, created_unix, updated_unix) VALUES (0, 0, 1, '$WEBHOOK_URL', 'POST', 1, '$EVENTS_JSON', 1, 'gitea', '', $NOW_UNIX, $NOW_UNIX);"
|
||||
|
||||
su -s /bin/bash gitea -c "sqlite3 '$DB_PATH' \"$INSERT_CMD\""
|
||||
|
||||
echo "System webhook created successfully."
|
||||
|
||||
exit 0
|
||||
14
integration/mock-obs/Dockerfile
Normal file
14
integration/mock-obs/Dockerfile
Normal file
@@ -0,0 +1,14 @@
|
||||
# Use a base Python image
|
||||
FROM registry.suse.com/bci/python:3.11
|
||||
|
||||
# Set the working directory
|
||||
WORKDIR /app
|
||||
|
||||
# Copy the server script
|
||||
COPY server.py .
|
||||
|
||||
# Expose the port the server will run on
|
||||
EXPOSE 8080
|
||||
|
||||
# Command to run the server
|
||||
CMD ["python3", "-u", "server.py"]
|
||||
@@ -0,0 +1,18 @@
|
||||
<project name="openSUSE:Leap:16.0:PullRequest">
|
||||
<title>Leap 16.0 PullRequest area</title>
|
||||
<description>Base project to define the pull request builds</description>
|
||||
<person userid="autogits_obs_staging_bot" role="maintainer"/>
|
||||
<person userid="maxlin_factory" role="maintainer"/>
|
||||
<group groupid="maintenance-opensuse.org" role="maintainer"/>
|
||||
<debuginfo>
|
||||
<enable/>
|
||||
</debuginfo>
|
||||
<repository name="standard">
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<arch>x86_64</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
</project>
|
||||
@@ -0,0 +1,59 @@
|
||||
<project name="openSUSE:Leap:16.0">
|
||||
<title>openSUSE Leap 16.0 based on SLFO</title>
|
||||
<description>Leap 16.0 based on SLES 16.0 (specifically SLFO:1.2)</description>
|
||||
<link project="openSUSE:Backports:SLE-16.0"/>
|
||||
<scmsync>http://gitea-test:3000/products/SLFO#main</scmsync>
|
||||
<person userid="dimstar_suse" role="maintainer"/>
|
||||
<person userid="lkocman-factory" role="maintainer"/>
|
||||
<person userid="maxlin_factory" role="maintainer"/>
|
||||
<person userid="factory-auto" role="reviewer"/>
|
||||
<person userid="licensedigger" role="reviewer"/>
|
||||
<group groupid="autobuild-team" role="maintainer"/>
|
||||
<group groupid="factory-maintainers" role="maintainer"/>
|
||||
<group groupid="maintenance-opensuse.org" role="maintainer"/>
|
||||
<group groupid="factory-staging" role="reviewer"/>
|
||||
<build>
|
||||
<disable repository="ports"/>
|
||||
</build>
|
||||
<debuginfo>
|
||||
<enable/>
|
||||
</debuginfo>
|
||||
<repository name="standard" rebuild="local">
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>local</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
<repository name="product">
|
||||
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="product" trigger="manual"/>
|
||||
<path project="openSUSE:Leap:16.0:NonFree" repository="standard"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="images"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>local</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
<repository name="ports">
|
||||
<arch>armv7l</arch>
|
||||
</repository>
|
||||
<repository name="images">
|
||||
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="images" trigger="manual"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
</project>
|
||||
140
integration/mock-obs/server.py
Normal file
140
integration/mock-obs/server.py
Normal file
@@ -0,0 +1,140 @@
|
||||
import http.server
|
||||
import socketserver
|
||||
import os
|
||||
import logging
|
||||
import signal
|
||||
import sys
|
||||
import threading
|
||||
import fnmatch
|
||||
|
||||
PORT = 8080
|
||||
RESPONSE_DIR = "/app/responses"
|
||||
STATE_DIR = "/tmp/mock_obs_state"
|
||||
|
||||
class MockOBSHandler(http.server.SimpleHTTPRequestHandler):
|
||||
def do_GET(self):
|
||||
logging.info(f"GET request for: {self.path}")
|
||||
path_without_query = self.path.split('?')[0]
|
||||
|
||||
# Check for state stored by a PUT request first
|
||||
sanitized_put_path = 'PUT' + path_without_query.replace('/', '_')
|
||||
state_file_path = os.path.join(STATE_DIR, sanitized_put_path)
|
||||
if os.path.exists(state_file_path):
|
||||
logging.info(f"Found stored PUT state for {self.path} at {state_file_path}")
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/xml")
|
||||
file_size = os.path.getsize(state_file_path)
|
||||
self.send_header("Content-Length", str(file_size))
|
||||
self.end_headers()
|
||||
with open(state_file_path, 'rb') as f:
|
||||
self.wfile.write(f.read())
|
||||
return
|
||||
|
||||
# If no PUT state file, fall back to the glob/exact match logic
|
||||
self.handle_request('GET')
|
||||
|
||||
def do_PUT(self):
|
||||
logging.info(f"PUT request for: {self.path}")
|
||||
logging.info(f"Headers: {self.headers}")
|
||||
path_without_query = self.path.split('?')[0]
|
||||
|
||||
body = b''
|
||||
if self.headers.get('Transfer-Encoding', '').lower() == 'chunked':
|
||||
logging.info("Chunked transfer encoding detected")
|
||||
while True:
|
||||
line = self.rfile.readline().strip()
|
||||
if not line:
|
||||
break
|
||||
chunk_length = int(line, 16)
|
||||
if chunk_length == 0:
|
||||
self.rfile.readline()
|
||||
break
|
||||
body += self.rfile.read(chunk_length)
|
||||
self.rfile.read(2) # Read the trailing CRLF
|
||||
else:
|
||||
content_length = int(self.headers.get('Content-Length', 0))
|
||||
body = self.rfile.read(content_length)
|
||||
|
||||
logging.info(f"Body: {body.decode('utf-8')}")
|
||||
sanitized_path = 'PUT' + path_without_query.replace('/', '_')
|
||||
state_file_path = os.path.join(STATE_DIR, sanitized_path)
|
||||
|
||||
logging.info(f"Saving state for {self.path} to {state_file_path}")
|
||||
os.makedirs(os.path.dirname(state_file_path), exist_ok=True)
|
||||
with open(state_file_path, 'wb') as f:
|
||||
f.write(body)
|
||||
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "text/plain")
|
||||
response_body = b"OK"
|
||||
self.send_header("Content-Length", str(len(response_body)))
|
||||
self.end_headers()
|
||||
self.wfile.write(response_body)
|
||||
|
||||
def do_POST(self):
|
||||
logging.info(f"POST request for: {self.path}")
|
||||
self.handle_request('POST')
|
||||
|
||||
def do_DELETE(self):
|
||||
logging.info(f"DELETE request for: {self.path}")
|
||||
self.handle_request('DELETE')
|
||||
|
||||
def handle_request(self, method):
|
||||
path_without_query = self.path.split('?')[0]
|
||||
sanitized_request_path = method + path_without_query.replace('/', '_')
|
||||
logging.info(f"Handling request, looking for match for: {sanitized_request_path}")
|
||||
|
||||
response_file = None
|
||||
# Check for glob match first
|
||||
if os.path.exists(RESPONSE_DIR):
|
||||
for filename in os.listdir(RESPONSE_DIR):
|
||||
if fnmatch.fnmatch(sanitized_request_path, filename):
|
||||
response_file = os.path.join(RESPONSE_DIR, filename)
|
||||
logging.info(f"Found matching response file (glob): {response_file}")
|
||||
break
|
||||
|
||||
# Fallback to exact match if no glob match
|
||||
if response_file is None:
|
||||
exact_file = os.path.join(RESPONSE_DIR, sanitized_request_path)
|
||||
if os.path.exists(exact_file):
|
||||
response_file = exact_file
|
||||
logging.info(f"Found matching response file (exact): {response_file}")
|
||||
|
||||
if response_file:
|
||||
logging.info(f"Serving content from {response_file}")
|
||||
self.send_response(200)
|
||||
self.send_header("Content-type", "application/xml")
|
||||
file_size = os.path.getsize(response_file)
|
||||
self.send_header("Content-Length", str(file_size))
|
||||
self.end_headers()
|
||||
with open(response_file, 'rb') as f:
|
||||
self.wfile.write(f.read())
|
||||
else:
|
||||
logging.info(f"Response file not found for {sanitized_request_path}. Sending 404.")
|
||||
self.send_response(404)
|
||||
self.send_header("Content-type", "text/plain")
|
||||
body = f"Mock response not found for {sanitized_request_path}".encode('utf-8')
|
||||
self.send_header("Content-Length", str(len(body)))
|
||||
self.end_headers()
|
||||
self.wfile.write(body)
|
||||
|
||||
if __name__ == "__main__":
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
|
||||
|
||||
if not os.path.exists(STATE_DIR):
|
||||
logging.info(f"Creating state directory: {STATE_DIR}")
|
||||
os.makedirs(STATE_DIR)
|
||||
if not os.path.exists(RESPONSE_DIR):
|
||||
os.makedirs(RESPONSE_DIR)
|
||||
|
||||
with socketserver.TCPServer(("", PORT), MockOBSHandler) as httpd:
|
||||
logging.info(f"Serving mock OBS API on port {PORT}")
|
||||
|
||||
def graceful_shutdown(sig, frame):
|
||||
logging.info("Received SIGTERM, shutting down gracefully...")
|
||||
threading.Thread(target=httpd.shutdown).start()
|
||||
|
||||
signal.signal(signal.SIGTERM, graceful_shutdown)
|
||||
|
||||
httpd.serve_forever()
|
||||
logging.info("Server has shut down.")
|
||||
1
integration/obs-staging-bot/Dockerfile
Symbolic link
1
integration/obs-staging-bot/Dockerfile
Symbolic link
@@ -0,0 +1 @@
|
||||
./Dockerfile.package
|
||||
18
integration/obs-staging-bot/Dockerfile.local
Normal file
18
integration/obs-staging-bot/Dockerfile.local
Normal file
@@ -0,0 +1,18 @@
|
||||
# Use a base Python image
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Install any necessary dependencies for the bot
|
||||
# e.g., git, curl, etc.
|
||||
RUN zypper -n in git-core curl binutils
|
||||
|
||||
# Copy the bot binary and its entrypoint script
|
||||
COPY obs-staging-bot/obs-staging-bot /usr/local/bin/obs-staging-bot
|
||||
COPY integration/obs-staging-bot/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Create a non-root user to run the bot
|
||||
RUN useradd -m -u 1001 bot
|
||||
USER 1001
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
19
integration/obs-staging-bot/Dockerfile.package
Normal file
19
integration/obs-staging-bot/Dockerfile.package
Normal file
@@ -0,0 +1,19 @@
|
||||
# Use a base Python image
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
RUN zypper ar -f http://download.opensuse.org/repositories/devel:/Factory:/git-workflow/15.7/devel:Factory:git-workflow.repo
|
||||
RUN zypper --gpg-auto-import-keys ref
|
||||
|
||||
# Install any necessary dependencies for the bot
|
||||
# e.g., git, curl, etc.
|
||||
RUN zypper -n in git-core curl autogits-obs-staging-bot binutils
|
||||
|
||||
COPY integration/obs-staging-bot/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Create a non-root user to run the bot
|
||||
RUN useradd -m -u 1001 bot
|
||||
USER 1001
|
||||
|
||||
# Set the entrypoint
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
28
integration/obs-staging-bot/entrypoint.sh
Normal file
28
integration/obs-staging-bot/entrypoint.sh
Normal file
@@ -0,0 +1,28 @@
|
||||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
# This script waits for the Gitea admin token to be created,
|
||||
# exports it as an environment variable, and then executes the main container command.
|
||||
|
||||
TOKEN_FILE="/gitea-data/autogits_obs_staging_bot.token"
|
||||
|
||||
echo "OBS Staging Bot: Waiting for Gitea autogits_obs_staging_bot token at $TOKEN_FILE..."
|
||||
while [ ! -s "$TOKEN_FILE" ]; do
|
||||
sleep 2
|
||||
done
|
||||
|
||||
export GITEA_TOKEN=$(cat "$TOKEN_FILE" | tr -d '\n\r ')
|
||||
echo "OBS Staging Bot: GITEA_TOKEN exported."
|
||||
|
||||
# Execute the bot as the current user (root), using 'env' to pass required variables.
|
||||
echo "OBS Staging Bot: Executing bot..."
|
||||
|
||||
exe=$(which obs-staging-bot)
|
||||
exe=${exe:-/usr/local/bin/obs-staging-bot}
|
||||
|
||||
package=$(rpm -qa | grep autogits-obs-staging-bot) || :
|
||||
|
||||
echo "!!!!!!!!!!!!!!!! using binary $exe; installed package: $package"
|
||||
which strings > /dev/null 2>&1 && strings "$exe" | grep -A 2 vcs.revision= | head -4 || :
|
||||
|
||||
exec $exe "$@"
|
||||
136
integration/podman-compose.yml
Normal file
136
integration/podman-compose.yml
Normal file
@@ -0,0 +1,136 @@
|
||||
version: "3.8"
|
||||
|
||||
networks:
|
||||
gitea-network:
|
||||
driver: bridge
|
||||
|
||||
services:
|
||||
gitea:
|
||||
build: ./gitea
|
||||
container_name: gitea-test
|
||||
environment:
|
||||
- GITEA_WORK_DIR=/var/lib/gitea
|
||||
networks:
|
||||
- gitea-network
|
||||
ports:
|
||||
# Map the HTTP and SSH ports defined in your app.ini
|
||||
- "3000:3000"
|
||||
- "3022:3022"
|
||||
volumes:
|
||||
# Persist Gitea's data (repositories, sqlite db, etc.) to a local directory
|
||||
# The :z flag allows sharing between containers
|
||||
- ./gitea-data:/var/lib/gitea:z
|
||||
# Persist Gitea's logs to a local directory
|
||||
- ./gitea-logs:/var/log/gitea:Z
|
||||
restart: unless-stopped
|
||||
|
||||
rabbitmq:
|
||||
image: rabbitmq:3.13.7-management
|
||||
container_name: rabbitmq-test
|
||||
healthcheck:
|
||||
test: ["CMD", "rabbitmq-diagnostics", "check_running", "-q"]
|
||||
interval: 30s
|
||||
timeout: 30s
|
||||
retries: 3
|
||||
networks:
|
||||
- gitea-network
|
||||
ports:
|
||||
# AMQP protocol port with TLS
|
||||
- "5671:5671"
|
||||
# HTTP management UI
|
||||
- "15672:15672"
|
||||
volumes:
|
||||
# Persist RabbitMQ data
|
||||
- ./rabbitmq-data:/var/lib/rabbitmq:Z
|
||||
# Mount TLS certs
|
||||
- ./rabbitmq-config/certs:/etc/rabbitmq/certs:Z
|
||||
# Mount rabbitmq config
|
||||
- ./rabbitmq-config/rabbitmq.conf:/etc/rabbitmq/rabbitmq.conf:Z
|
||||
# Mount exchange definitions
|
||||
- ./rabbitmq-config/definitions.json:/etc/rabbitmq/definitions.json:Z
|
||||
restart: unless-stopped
|
||||
|
||||
gitea-publisher:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: integration/gitea-events-rabbitmq-publisher/Dockerfile${GIWTF_IMAGE_SUFFIX}
|
||||
container_name: gitea-publisher
|
||||
networks:
|
||||
- gitea-network
|
||||
depends_on:
|
||||
gitea:
|
||||
condition: service_started
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- RABBITMQ_HOST=rabbitmq-test
|
||||
- RABBITMQ_USERNAME=gitea
|
||||
- RABBITMQ_PASSWORD=gitea
|
||||
- SSL_CERT_FILE=/usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
command: [ "-listen", "0.0.0.0:8002", "-topic-domain", "suse", "-debug" ]
|
||||
restart: unless-stopped
|
||||
|
||||
workflow-pr:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: integration/workflow-pr/Dockerfile${GIWTF_IMAGE_SUFFIX}
|
||||
container_name: workflow-pr
|
||||
networks:
|
||||
- gitea-network
|
||||
depends_on:
|
||||
gitea:
|
||||
condition: service_started
|
||||
rabbitmq:
|
||||
condition: service_healthy
|
||||
environment:
|
||||
- AMQP_USERNAME=gitea
|
||||
- AMQP_PASSWORD=gitea
|
||||
- SSL_CERT_FILE=/usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
volumes:
|
||||
- ./gitea-data:/var/lib/gitea:ro,z
|
||||
- ./workflow-pr/workflow-pr.json:/etc/workflow-pr.json:ro,z
|
||||
- ./workflow-pr-repos:/var/lib/workflow-pr/repos:Z
|
||||
command: [
|
||||
"-check-on-start",
|
||||
"-debug",
|
||||
"-gitea-url", "http://gitea-test:3000",
|
||||
"-url", "amqps://rabbitmq-test:5671",
|
||||
"-config", "/etc/workflow-pr.json",
|
||||
"-repo-path", "/var/lib/workflow-pr/repos"
|
||||
]
|
||||
restart: unless-stopped
|
||||
|
||||
mock-obs:
|
||||
build: ./mock-obs
|
||||
container_name: mock-obs
|
||||
networks:
|
||||
- gitea-network
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./mock-obs/responses:/app/responses:z # Use :z for shared SELinux label
|
||||
restart: unless-stopped
|
||||
|
||||
obs-staging-bot:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: integration/obs-staging-bot/Dockerfile${GIWTF_IMAGE_SUFFIX}
|
||||
container_name: obs-staging-bot
|
||||
networks:
|
||||
- gitea-network
|
||||
depends_on:
|
||||
gitea:
|
||||
condition: service_started
|
||||
mock-obs:
|
||||
condition: service_started
|
||||
environment:
|
||||
- OBS_USER=mock
|
||||
- OBS_PASSWORD=mock-long-password
|
||||
volumes:
|
||||
- ./gitea-data:/gitea-data:ro,z
|
||||
command:
|
||||
- "-debug"
|
||||
- "-gitea-url=http://gitea-test:3000"
|
||||
- "-obs=http://mock-obs:8080"
|
||||
- "-obs-web=http://mock-obs:8080"
|
||||
restart: unless-stopped
|
||||
30
integration/rabbitmq-config/certs/cert.pem
Normal file
30
integration/rabbitmq-config/certs/cert.pem
Normal file
@@ -0,0 +1,30 @@
|
||||
-----BEGIN CERTIFICATE-----
|
||||
MIIFKzCCAxOgAwIBAgIUJsg/r0ZyIVxtAkrlZKOr4LvYEvMwDQYJKoZIhvcNAQEL
|
||||
BQAwGDEWMBQGA1UEAwwNcmFiYml0bXEtdGVzdDAeFw0yNjAxMjQxMjQyMjNaFw0z
|
||||
NjAxMjIxMjQyMjNaMBgxFjAUBgNVBAMMDXJhYmJpdG1xLXRlc3QwggIiMA0GCSqG
|
||||
SIb3DQEBAQUAA4ICDwAwggIKAoICAQC9OjTq4DgqVo0mRpS8DGRR6SFrSpb2bqnl
|
||||
YI7xSI3y67i/oP4weiZSawk2+euxhsN4FfOlsAgvpg4WyRQH5PwnXOA1Lxz51qp1
|
||||
t0VumE3B1RDheiBTE8loG1FvmikOiek2gzz76nK0R1sbKY1+/NVJpMs6dL6NzJXG
|
||||
N6aCpWTk7oeY+lW5bPBG0VRA7RUG80w9R9RDtqYc0SYUmm43tjjxPZ81rhCXFx/F
|
||||
v1kxnNTQJdATNrTn9SofymSfm42f4loOGyGBsqJYybKXOPDxrM1erBN5eCwTpJMS
|
||||
4J30aMSdQTzza2Z4wi2LR0vq/FU/ouqzlRp7+7tNJbVAsqhiUa2eeAVkFwZl9wRw
|
||||
lddY0W85U507nw5M3iQv2GTOhJRXwhWpzDUFQ0fT56hAY/V+VbF1iHGAVIz4XlUj
|
||||
gC21wuXz0xRdqP8cCd8UHLSbp8dmie161GeKVwO037aP+1hZJbm7ePsS5Na+qYG1
|
||||
LCy0GhfQn71BsYUaGJtfRcaMwIbqaNIYn+Y6S1FVjxDPXCxFXDrIcFvldmJYTyeK
|
||||
7KrkO2P1RbEiwYyPPUhthbb1Agi9ZutZsnadmPRk27t9bBjNnWaY2z17hijnzVVz
|
||||
jOHuPlpb7cSaagVzLTT0zrZ+ifnZWwdl0S2ZrjBAeVrkNt7DOCUqwBnuBqYiRZFt
|
||||
A1QicHxaEQIDAQABo20wazAdBgNVHQ4EFgQU3l25Ghab2k7UhwxftZ2vZ1HO9Sow
|
||||
HwYDVR0jBBgwFoAU3l25Ghab2k7UhwxftZ2vZ1HO9SowDwYDVR0TAQH/BAUwAwEB
|
||||
/zAYBgNVHREEETAPgg1yYWJiaXRtcS10ZXN0MA0GCSqGSIb3DQEBCwUAA4ICAQB9
|
||||
ilcsRqIvnyN25Oh668YC/xxyeNTIaIxjMLyJaMylBRjNwo1WfbdpXToaEXgot5gK
|
||||
5HGlu3OIBBwBryNAlBtf/usxzLzmkEsm1Dsn9sJNY1ZTkD8MO9yyOtLqBlqAsIse
|
||||
oPVjzSdjk1fP3uyoG/ZUVAFZHZD3/9BEsftfS13oUVxo7vYz1DSyUATT/4QTYMQB
|
||||
PytL6EKJ0dLyuy7rIkZVkaUi+P7GuDXj25Mi6Zkxaw2QnssSuoqy1bAMkzEyNFK5
|
||||
0wlNWEY8H3jRZuAz1T4AXb9sjeCgBKZoWXgmGbzleOophdzvlq66UGAWPWYFGp8Q
|
||||
4GJognovhKzSY9+3n+rMPLAXSao48SYDlyTOZeBo1DTluR5QjVd+NWbEdIsA6buQ
|
||||
a6uPTSVKsulm7hyUlEZp+SsYAtVoZx3jzKKjZXjnaxOfUFWx6pTxNXvxR7pQ/8Ls
|
||||
IfduGy4VjKVQdyuwCE7eVEPDK6d53WWs6itziuj7gfq8mHvZivIA65z05lTwqkvb
|
||||
1WS2aht+zacqVSYyNrK+/kJA2CST3ggc1EO73lRvbfO9LJZWMdO+f/tkXH4zkfmL
|
||||
A3JtJcLOWuv+ZrZvHMpKlBFNMySxE3IeGX+Ad9bGyhZvZULut95/QD7Xy4cPRZHF
|
||||
R3SRn0rn/BeTly+5fkEoFk+ttah8IbwzhduPyPIxng==
|
||||
-----END CERTIFICATE-----
|
||||
52
integration/rabbitmq-config/certs/key.pem
Normal file
52
integration/rabbitmq-config/certs/key.pem
Normal file
@@ -0,0 +1,52 @@
|
||||
-----BEGIN PRIVATE KEY-----
|
||||
MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQC9OjTq4DgqVo0m
|
||||
RpS8DGRR6SFrSpb2bqnlYI7xSI3y67i/oP4weiZSawk2+euxhsN4FfOlsAgvpg4W
|
||||
yRQH5PwnXOA1Lxz51qp1t0VumE3B1RDheiBTE8loG1FvmikOiek2gzz76nK0R1sb
|
||||
KY1+/NVJpMs6dL6NzJXGN6aCpWTk7oeY+lW5bPBG0VRA7RUG80w9R9RDtqYc0SYU
|
||||
mm43tjjxPZ81rhCXFx/Fv1kxnNTQJdATNrTn9SofymSfm42f4loOGyGBsqJYybKX
|
||||
OPDxrM1erBN5eCwTpJMS4J30aMSdQTzza2Z4wi2LR0vq/FU/ouqzlRp7+7tNJbVA
|
||||
sqhiUa2eeAVkFwZl9wRwlddY0W85U507nw5M3iQv2GTOhJRXwhWpzDUFQ0fT56hA
|
||||
Y/V+VbF1iHGAVIz4XlUjgC21wuXz0xRdqP8cCd8UHLSbp8dmie161GeKVwO037aP
|
||||
+1hZJbm7ePsS5Na+qYG1LCy0GhfQn71BsYUaGJtfRcaMwIbqaNIYn+Y6S1FVjxDP
|
||||
XCxFXDrIcFvldmJYTyeK7KrkO2P1RbEiwYyPPUhthbb1Agi9ZutZsnadmPRk27t9
|
||||
bBjNnWaY2z17hijnzVVzjOHuPlpb7cSaagVzLTT0zrZ+ifnZWwdl0S2ZrjBAeVrk
|
||||
Nt7DOCUqwBnuBqYiRZFtA1QicHxaEQIDAQABAoICAA+AWvDpzNgVDouV6R3NkxNN
|
||||
upXgPqUx9BuNETCtbal6i4AxR1l/zC9gwti82QTKQi2OeM74MHd8zjcqIkiyRsDP
|
||||
wDNDKIfEAONTT+4LLoWEN5WNDGRZ4Nw1LrLqiVX+ULtNPXvynRJtLQa43PVL74oQ
|
||||
pLBle23A1n0uNmcJ9w21B6ktysN9q+JVSCZodZpD6Jk1jus8JXgDXy/9Za2NMTV8
|
||||
A5ShbYz/ETSBJCSnERz7GARW7TN6V0jS6vLTSqMQJyn0KYbHNDr7TPTL7psRuaI5
|
||||
jP/cqxmx1/WKLo5k3cR3IW/cesDGQXZhMRQvNymXJkxvWMPS36lmfyZtbFNflw4Z
|
||||
9OD+2RKt5jFDJjG8fYiYoYBdLiTj2Wdvo4mbRPNkTL75o65riDkDCQuZhDXFBm3s
|
||||
B1aDv5y1AXrzNZ5JSikszKgbLNPYB0rI3unp6i0P1985w6dyel0MGG+ouaeiyrxS
|
||||
9IgJDnE4BJ79mEzHTXtbZ/+3aGAK/Y6mU8Pz2s6/+6ccT0miievsMS+si1KESF31
|
||||
WLnsMdcrJcxqcm7Ypo24G0yBJluSDKtD1cqQUGN1MKp+EEv1SCH+4csaa3ooRB0o
|
||||
YveySjqxtmhVpQuY3egCOaXhPmX7lgYwoe+G4UIkUMwPn20WMg+jFxgPASdh4lqE
|
||||
mzpePP7STvEZAr+rrLu1AoIBAQDmCEiKOsUTtJlX3awOIRtCkIqBxS1E6rpyjfxK
|
||||
A6+zpXnE++8MhIJ07+9bPdOshGjS3JbJ+hu+IocbNg++rjRArYQnJh8/qBZ2GB2v
|
||||
Ryfptsoxtk/xUsmOfchvk4tOjvDHZrJehUtGc+LzX/WUqpgtEk1Gnx7RGRuDNnqS
|
||||
Q1+yU4NubHwOHPswBBXOnVtopcAHFpKhbKRFOHOwMZN99qcWVIkv4J9c6emcPMLI
|
||||
I/QPIvwB6WmbLa0o3JNXlD4kPdqCgNW36KEFiW8m+4tgzF3HWYSAyIeBRFG7ouE6
|
||||
yk5hiptPKhZlTmTAkQSssCXksiTw1rsspFULZSRyaaaPunvVAoIBAQDSlrKu+B2h
|
||||
AJtxWy5MQDOiroqT3KDneIGXPYgH3/tiDmxy0CIEbSb5SqZ6zAmihs3dWWCmc1JH
|
||||
YObRrqIxu+qVi4K+Uz8l7WBrS7DkjZjajq+y/mrZYUNRoL2q9mnNqRNan7zxWDJc
|
||||
U4u2NH9P4LOz6ttE4OG9SC3/gZLoepA+ANZatu93749IT7z8ske0MVPP76jVI1Gl
|
||||
D7cPIlzcBUdJgNV8UOkxeqU3+S6Jn17Tkx5qMWND/2BCN4voQ4pfGWSkbaHlMLh1
|
||||
2SbVuR+HYPY3aPJeSY7MEPoc7d2SSVOcVDr2AQwSDSCCgIFZOZlawehUz9R51hK8
|
||||
LlaccFWXhS9NAoIBAEFZNRJf48DXW4DErq5M5WuhmFeJZnTfohwNDhEQvwdwCQnW
|
||||
8HBD7LO/veXTyKCH9SeCFyxF6z+2m181mn93Cc0d/h8JC3OQEuF1tGko88PHc+Vv
|
||||
f4J1HGFohlp8NeUZYnmjSSTlBR98qIqvRhr348daHa3kYmLQmSpLfcKzdSo542qp
|
||||
UwzHWuynHHLX7THrdIQO+5T0Qi6P/P2e9+GfApSra1W4oE1K/lyuPj+RRzJNo/3/
|
||||
C0tUTI8BKrKEoKq3D65nX0+hvKzQAE24xD25kSKi4aucTDKC8B04BngnJOE8+SYi
|
||||
NL6O6Lxz9joAyKMRoMDyn7Xs8WQNVa9TKEhImAkCggEBAMljmIm/egZIoF7thf8h
|
||||
vr+rD5eL/Myf776E95wgVTVW+dtqs71r7UOmYkM48VXeeO1f1hAYZO0h/Fs2GKJb
|
||||
RWGyQ1xkHBXXRsgVYJuR1kXdAqW4rNIqM8jSYdAnStOFB5849+YOJEsrEocy+TWY
|
||||
fAJpbTwXm4n6hxK8BZQR8fN5tYSXQbd+/5V1vBQlInFuYuqOFPWPizrBJp1wjUFU
|
||||
QvJGJON4NSo+UdaPlDPEl1jabtG7XWTfylxI5qE+RgvgKuEcfyDBUQZSntLw8Pf0
|
||||
gEJJOM92pPr+mVIlICoPucfcvW4ZXkO9DgP/hLOhY8jpe5fwERBa6xvPbMC6pP/8
|
||||
PFkCggEBAOLtvboBThe57QRphsKHmCtRJHmT4oZzhMYsE+5GMGYzPNWod1hSyfXn
|
||||
EB8iTmAFP5r7FdC10B8mMpACXuDdi2jbmlYOTU6xNTprSKtv8r8CvorWJdsQwRsy
|
||||
pZ7diSCeyi0z/sIx//ov0b3WD0E8BG/HWsFbX0p5xXpaljYEv5dK7xUiWgBW+15a
|
||||
N1AeVcPiXRDwhQMVcvVOvzgwKsw+Rpls/9W4hihcBHaiMcBUDFWxJtnf4ZAGAZS3
|
||||
/694MOYlmfgT/cDqF9oOsCdxM0w24kL0dcUM7zPk314ixAAfUwXaxisBhS2roJ88
|
||||
HsuK9JPSK/AS0IqUtKiq4LZ9ErixYF0=
|
||||
-----END PRIVATE KEY-----
|
||||
35
integration/rabbitmq-config/definitions.json
Executable file
35
integration/rabbitmq-config/definitions.json
Executable file
@@ -0,0 +1,35 @@
|
||||
{
|
||||
"users": [
|
||||
{
|
||||
"name": "gitea",
|
||||
"password_hash": "5IdZmMJhNb4otX/nz9Xtmkpj9khl6+5eAmXNs/oHYwQNO3jg",
|
||||
"hashing_algorithm": "rabbit_password_hashing_sha256",
|
||||
"tags": "administrator"
|
||||
}
|
||||
],
|
||||
"vhosts": [
|
||||
{
|
||||
"name": "/"
|
||||
}
|
||||
],
|
||||
"permissions": [
|
||||
{
|
||||
"user": "gitea",
|
||||
"vhost": "/",
|
||||
"configure": ".*",
|
||||
"write": ".*",
|
||||
"read": ".*"
|
||||
}
|
||||
],
|
||||
"exchanges": [
|
||||
{
|
||||
"name": "pubsub",
|
||||
"vhost": "/",
|
||||
"type": "topic",
|
||||
"durable": true,
|
||||
"auto_delete": false,
|
||||
"internal": false,
|
||||
"arguments": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
83
integration/test-plan.md
Normal file
83
integration/test-plan.md
Normal file
@@ -0,0 +1,83 @@
|
||||
# Test Plan: workflow-pr Bot
|
||||
|
||||
## 1. Introduction
|
||||
|
||||
This document outlines the test plan for the `workflow-pr` bot. The bot is responsible for synchronizing pull requests between ProjectGit and PackageGit repositories, managing reviews, and handling merges. This test plan aims to ensure the bot's functionality, reliability, and performance.
|
||||
|
||||
## 2. Scope
|
||||
|
||||
### In Scope
|
||||
|
||||
* Pull Request synchronization (creation, update, closing).
|
||||
* Reviewer management (adding, re-adding, mandatory vs. advisory).
|
||||
* Merge management, including `ManualMergeOnly` and `ManualMergeProject` flags.
|
||||
* Configuration parsing (`workflow.config`).
|
||||
* Label management (`staging/Auto`, `review/Pending`, `review/Done`).
|
||||
* Maintainership and permissions handling.
|
||||
|
||||
### Out of Scope
|
||||
|
||||
* Package deletion requests (planned feature).
|
||||
* Underlying infrastructure (Gitea, RabbitMQ, OBS).
|
||||
* Performance and load testing.
|
||||
* Closing a PackageGit PR (currently disabled).
|
||||
|
||||
## 3. Test Objectives
|
||||
|
||||
* Verify that pull requests are correctly synchronized between ProjectGit and PackageGit.
|
||||
* Ensure that reviewers are correctly added to pull requests based on the configuration.
|
||||
* Validate that pull requests are merged only when all conditions are met.
|
||||
* Confirm that the bot correctly handles various configurations in `workflow.config`.
|
||||
* Verify that labels are correctly applied to pull requests.
|
||||
* Ensure that maintainership and permissions are correctly enforced.
|
||||
|
||||
## 4. Test Strategy
|
||||
|
||||
The testing will be conducted in a dedicated test environment that mimics the production environment. The strategy will involve a combination of:
|
||||
|
||||
* **Component Testing:** Testing individual components of the bot in isolation using unit tests written in Go.
|
||||
* **Integration Testing:** Testing the bot's interaction with Gitea, RabbitMQ, and a mock OBS server using `pytest`.
|
||||
* **End-to-End Testing:** Testing the complete workflow from creating a pull request to merging it using `pytest`.
|
||||
|
||||
### Test Automation
|
||||
|
||||
* **Unit Tests:** Go's built-in testing framework will be used to write unit tests for individual functions and methods.
|
||||
* **Integration and End-to-End Tests:** `pytest` will be used to write integration and end-to-end tests that use the Gitea API to create pull requests and verify the bot's behavior.
|
||||
|
||||
### Success Metrics
|
||||
|
||||
* **Test Coverage:** The goal is to achieve at least 80% test coverage for the bot's codebase.
|
||||
* **Bug Detection Rate:** The number of bugs found during the testing phase.
|
||||
* **Test Pass Rate:** The percentage of test cases that pass without any issues.
|
||||
|
||||
|
||||
## 5. Test Cases
|
||||
|
||||
| Test Case ID | Description | Steps to Reproduce | Expected Results | Priority |
|
||||
| :--- | :--- | :--- | :--- | :--- |
|
||||
| **TC-SYNC-001** | **Create ProjectGit PR from PackageGit PR** | 1. Create a new PR in a PackageGit repository. | 1. A new PR is created in the corresponding ProjectGit repository with the title "Forwarded PRs: <package_name>".<br>2. The ProjectGit PR description contains a link to the PackageGit PR (e.g., `PR: org/package_repo!pr_number`).<br>3. The package submodule in the ProjectGit PR points to the PackageGit PR's commit. | High |
|
||||
| **TC-SYNC-002** | **Update ProjectGit PR from PackageGit PR** | 1. Push a new commit to an existing PackageGit PR. | 1. The corresponding ProjectGit PR's head branch is updated with the new commit. | High |
|
||||
| **TC-SYNC-003** | **WIP Flag Synchronization** | 1. Mark a PackageGit PR as "Work In Progress".<br>2. Remove the WIP flag from the PackageGit PR. | 1. The corresponding ProjectGit PR is also marked as "Work In Progress".<br>2. The WIP flag on the ProjectGit PR is removed. | Medium |
|
||||
| **TC-SYNC-004** | **WIP Flag (multiple referenced package PRs)** | 1. Create a ProjectGit PR that references multiple PackageGit PRs.<br>2. Mark one of the PackageGit PRs as "Work In Progress".<br>3. Remove the "Work In Progress" flag from all PackageGit PRs. | 1. The ProjectGit PR is marked as "Work In Progress".<br>2. The "Work In Progress" flag is removed from the ProjectGit PR only after it has been removed from all associated PackageGit PRs. | Medium |
|
||||
| **TC-SYNC-005** | **NoProjectGitPR = true, edits disabled** | 1. Set `NoProjectGitPR = true` in `workflow.config`.<br>2. Create a PackageGit PR without "Allow edits from maintainers" enabled. <br>3. Push a new commit to the PackageGit PR. | 1. No ProjectGit PR is created.<br>2. The bot adds a warning comment to the PackageGit PR explaining that it cannot update the PR. | High |
|
||||
| **TC-SYNC-006** | **NoProjectGitPR = true, edits enabled** | 1. Set `NoProjectGitPR = true` in `workflow.config`.<br>2. Create a PackageGit PR with "Allow edits from maintainers" enabled.<br>3. Push a new commit to the PackageGit PR. | 1. No ProjectGit PR is created.<br>2. The submodule commit on the project PR is updated with the new commit from the PackageGit PR. | High |
|
||||
| **TC-COMMENT-001** | **Detect duplicate comments** | 1. Create a PackageGit PR.<br>2. Wait for the `workflow-pr` bot to act on the PR.<br>3. Edit the body of the PR to trigger the bot a second time. | 1. The bot should not post a duplicate comment. | High |
|
||||
| **TC-REVIEW-001** | **Add mandatory reviewers** | 1. Create a new PackageGit PR. | 1. All mandatory reviewers are added to both the PackageGit and ProjectGit PRs. | High |
|
||||
| **TC-REVIEW-002** | **Add advisory reviewers** | 1. Create a new PackageGit PR with advisory reviewers defined in the configuration. | 1. Advisory reviewers are added to the PR, but their approval is not required for merging. | Medium |
|
||||
| **TC-REVIEW-003** | **Re-add reviewers** | 1. Push a new commit to a PackageGit PR after it has been approved. | 1. The original reviewers are re-added to the PR. | Medium |
|
||||
| **TC-REVIEW-004** | **Package PR created by a maintainer** | 1. Create a PackageGit PR from the account of a package maintainer. | 1. No review is requested from other package maintainers. | High |
|
||||
| **TC-REVIEW-005** | **Package PR created by an external user (approve)** | 1. Create a PackageGit PR from the account of a user who is not a package maintainer.<br>2. One of the package maintainers approves the PR. | 1. All package maintainers are added as reviewers.<br>2. Once one maintainer approves the PR, the other maintainers are removed as reviewers. | High |
|
||||
| **TC-REVIEW-006** | **Package PR created by an external user (reject)** | 1. Create a PackageGit PR from the account of a user who is not a package maintainer.<br>2. One of the package maintainers rejects the PR. | 1. All package maintainers are added as reviewers.<br>2. Once one maintainer rejects the PR, the other maintainers are removed as reviewers. | High |
|
||||
| **TC-REVIEW-007** | **Package PR created by a maintainer with ReviewRequired=true** | 1. Set `ReviewRequired = true` in `workflow.config`.<br>2. Create a PackageGit PR from the account of a package maintainer. | 1. A review is requested from other package maintainers if available. | High |
|
||||
| **TC-MERGE-001** | **Automatic Merge** | 1. Create a PackageGit PR.<br>2. Ensure all mandatory reviews are completed on both project and package PRs. | 1. The PR is automatically merged. | High |
|
||||
| **TC-MERGE-002** | **ManualMergeOnly with Package Maintainer** | 1. Create a PackageGit PR with `ManualMergeOnly` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the package PR from the account of a package maintainer for that package. | 1. The PR is merged. | High |
|
||||
| **TC-MERGE-003** | **ManualMergeOnly with unauthorized user** | 1. Create a PackageGit PR with `ManualMergeOnly` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the package PR from the account of a user who is not a maintainer for that package. | 1. The PR is not merged. | High |
|
||||
| **TC-MERGE-004** | **ManualMergeOnly with multiple packages** | 1. Create a ProjectGit PR that references multiple PackageGit PRs with `ManualMergeOnly` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on each package PR from the account of a package maintainer. | 1. The PR is merged only after "merge ok" is commented on all associated PackageGit PRs. | High |
|
||||
| **TC-MERGE-005** | **ManualMergeOnly with Project Maintainer** | 1. Create a PackageGit PR with `ManualMergeOnly` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the package PR from the account of a project maintainer. | 1. The PR is merged. | High |
|
||||
| **TC-MERGE-006** | **ManualMergeProject with Project Maintainer** | 1. Create a PackageGit PR with `ManualMergeProject` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the project PR from the account of a project maintainer. | 1. The PR is merged. | High |
|
||||
| **TC-MERGE-007** | **ManualMergeProject with unauthorized user** | 1. Create a PackageGit PR with `ManualMergeProject` set to `true`.<br>2. Ensure all mandatory reviews are completed on both project and package PRs.<br>3. Comment "merge ok" on the project PR from the account of a package maintainer. | 1. The PR is not merged. | High |
|
||||
| **TC-CONFIG-001** | **Invalid Configuration** | 1. Provide an invalid `workflow.config` file. | 1. The bot reports an error and does not process any PRs. | High |
|
||||
| **TC-LABEL-001** | **Apply `staging/Auto` label** | 1. Create a new PackageGit PR. | 1. The `staging/Auto` label is applied to the ProjectGit PR. | High |
|
||||
| **TC-LABEL-002** | **Apply `review/Pending` label** | 1. Create a new PackageGit PR. | 1. The `review/Pending` label is applied to the ProjectGit PR when there are pending reviews. | Medium |
|
||||
| **TC-LABEL-003** | **Apply `review/Done` label** | 1. Ensure all mandatory reviews for a PR are completed. | 1. The `review/Done` label is applied to the ProjectGit PR when all mandatory reviews are completed. | Medium |
|
||||
|
||||
0
integration/tests/__init__.py
Normal file
0
integration/tests/__init__.py
Normal file
78
integration/tests/conftest.py
Normal file
78
integration/tests/conftest.py
Normal file
@@ -0,0 +1,78 @@
|
||||
"""
|
||||
This module contains pytest fixtures for setting up the test environment.
|
||||
"""
|
||||
|
||||
import pytest
|
||||
import requests
|
||||
import time
|
||||
import os
|
||||
|
||||
# Assuming GiteaAPIClient is in tests/lib/common_test_utils.py
|
||||
from tests.lib.common_test_utils import GiteaAPIClient
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def gitea_env():
|
||||
"""
|
||||
Sets up the Gitea environment with dummy data and provides a GiteaAPIClient instance.
|
||||
"""
|
||||
gitea_url = "http://127.0.0.1:3000"
|
||||
|
||||
# Read admin token
|
||||
admin_token_path = "./gitea-data/admin.token" # Corrected path
|
||||
admin_token = None
|
||||
try:
|
||||
with open(admin_token_path, "r") as f:
|
||||
admin_token = f.read().strip()
|
||||
except FileNotFoundError:
|
||||
raise Exception(f"Admin token file not found at {admin_token_path}. Ensure it's generated and accessible.")
|
||||
|
||||
# Headers for authenticated requests
|
||||
auth_headers = {"Authorization": f"token {admin_token}", "Content-Type": "application/json"}
|
||||
|
||||
# Wait for Gitea to be available
|
||||
print(f"Waiting for Gitea at {gitea_url}...")
|
||||
max_retries = 30
|
||||
for i in range(max_retries):
|
||||
try:
|
||||
# Check a specific API endpoint that indicates readiness
|
||||
response = requests.get(f"{gitea_url}/api/v1/version", headers=auth_headers, timeout=5)
|
||||
if response.status_code == 200:
|
||||
print("Gitea API is available.")
|
||||
break
|
||||
except requests.exceptions.ConnectionError:
|
||||
pass
|
||||
print(f"Gitea not ready ({response.status_code if 'response' in locals() else 'ConnectionError'}), retrying in 5 seconds... ({i+1}/{max_retries})")
|
||||
time.sleep(5)
|
||||
else:
|
||||
raise Exception("Gitea did not become available within the expected time.")
|
||||
|
||||
client = GiteaAPIClient(base_url=gitea_url, token=admin_token)
|
||||
|
||||
# Setup dummy data
|
||||
print("--- Starting Gitea Dummy Data Setup from Pytest Fixture ---")
|
||||
client.create_org("products")
|
||||
client.create_org("pool")
|
||||
|
||||
client.create_repo("products", "SLFO")
|
||||
client.create_repo("pool", "pkgA")
|
||||
client.create_repo("pool", "pkgB")
|
||||
|
||||
# The add_submodules method also creates workflow.config and staging.config
|
||||
client.add_submodules("products", "SLFO")
|
||||
|
||||
client.add_collaborator("products", "SLFO", "autogits_obs_staging_bot", "write")
|
||||
client.add_collaborator("products", "SLFO", "workflow-pr", "write")
|
||||
client.add_collaborator("pool", "pkgA", "workflow-pr", "write")
|
||||
client.add_collaborator("pool", "pkgB", "workflow-pr", "write")
|
||||
|
||||
client.update_repo_settings("products", "SLFO")
|
||||
client.update_repo_settings("pool", "pkgA")
|
||||
client.update_repo_settings("pool", "pkgB")
|
||||
print("--- Gitea Dummy Data Setup Complete ---")
|
||||
time.sleep(5) # Add a small delay for Gitea to fully process changes
|
||||
|
||||
yield client
|
||||
|
||||
# Teardown (optional, depending on test strategy)
|
||||
# For now, we'll leave resources for inspection. If a clean slate is needed for each test,
|
||||
# this fixture's scope would be 'function' and teardown logic would be added here.
|
||||
23
integration/tests/data/build_result.xml.template
Normal file
23
integration/tests/data/build_result.xml.template
Normal file
@@ -0,0 +1,23 @@
|
||||
<resultlist state="0fef640bfb56c3e76fcfb698b19b59c0">
|
||||
<result project="SUSE:SLFO:Main:PullRequest:1881" repository="standard" arch="aarch64" code="unpublished" state="unpublished">
|
||||
<scmsync>https://src.suse.de/products/SLFO.git?onlybuild=openjpeg2#d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scmsync>
|
||||
<scminfo>d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scminfo>
|
||||
<status package="openjpeg2" code="succeeded"/>
|
||||
</result>
|
||||
<result project="SUSE:SLFO:Main:PullRequest:1881" repository="standard" arch="ppc64le" code="unpublished" state="unpublished">
|
||||
<scmsync>https://src.suse.de/products/SLFO.git?onlybuild=openjpeg2#d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scmsync>
|
||||
<scminfo>d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scminfo>
|
||||
<status package="openjpeg2" code="succeeded"/>
|
||||
</result>
|
||||
<result project="SUSE:SLFO:Main:PullRequest:1881" repository="standard" arch="x86_64" code="unpublished" state="unpublished">
|
||||
<scmsync>https://src.suse.de/products/SLFO.git?onlybuild=openjpeg2#d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scmsync>
|
||||
<scminfo>d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scminfo>
|
||||
<status package="openjpeg2" code="succeeded"/>
|
||||
</result>
|
||||
<result project="SUSE:SLFO:Main:PullRequest:1881" repository="standard" arch="s390x" code="unpublished" state="unpublished">
|
||||
<scmsync>https://src.suse.de/products/SLFO.git?onlybuild=openjpeg2#d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scmsync>
|
||||
<scminfo>d99ac14dedf9f44e1744c71aaf221d15f6bed479ca11f15738e98f3bf9ae05a1</scminfo>
|
||||
<status package="openjpeg2" code="succeeded"/>
|
||||
</result>
|
||||
</resultlist>
|
||||
|
||||
@@ -0,0 +1,18 @@
|
||||
<project name="openSUSE:Leap:16.0:PullRequest">
|
||||
<title>Leap 16.0 PullRequest area</title>
|
||||
<description>Base project to define the pull request builds</description>
|
||||
<person userid="autogits_obs_staging_bot" role="maintainer"/>
|
||||
<person userid="maxlin_factory" role="maintainer"/>
|
||||
<group groupid="maintenance-opensuse.org" role="maintainer"/>
|
||||
<debuginfo>
|
||||
<enable/>
|
||||
</debuginfo>
|
||||
<repository name="standard">
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<arch>x86_64</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
</project>
|
||||
59
integration/tests/data/source_openSUSE_Leap_16.0__meta
Normal file
59
integration/tests/data/source_openSUSE_Leap_16.0__meta
Normal file
@@ -0,0 +1,59 @@
|
||||
<project name="openSUSE:Leap:16.0">
|
||||
<title>openSUSE Leap 16.0 based on SLFO</title>
|
||||
<description>Leap 16.0 based on SLES 16.0 (specifically SLFO:1.2)</description>
|
||||
<link project="openSUSE:Backports:SLE-16.0"/>
|
||||
<scmsync>http://gitea-test:3000/products/SLFO#main</scmsync>
|
||||
<person userid="dimstar_suse" role="maintainer"/>
|
||||
<person userid="lkocman-factory" role="maintainer"/>
|
||||
<person userid="maxlin_factory" role="maintainer"/>
|
||||
<person userid="factory-auto" role="reviewer"/>
|
||||
<person userid="licensedigger" role="reviewer"/>
|
||||
<group groupid="autobuild-team" role="maintainer"/>
|
||||
<group groupid="factory-maintainers" role="maintainer"/>
|
||||
<group groupid="maintenance-opensuse.org" role="maintainer"/>
|
||||
<group groupid="factory-staging" role="reviewer"/>
|
||||
<build>
|
||||
<disable repository="ports"/>
|
||||
</build>
|
||||
<debuginfo>
|
||||
<enable/>
|
||||
</debuginfo>
|
||||
<repository name="standard" rebuild="local">
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>local</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
<repository name="product">
|
||||
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="product" trigger="manual"/>
|
||||
<path project="openSUSE:Leap:16.0:NonFree" repository="standard"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="images"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>local</arch>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
<repository name="ports">
|
||||
<arch>armv7l</arch>
|
||||
</repository>
|
||||
<repository name="images">
|
||||
<releasetarget project="openSUSE:Leap:16.0:ToTest" repository="images" trigger="manual"/>
|
||||
<path project="openSUSE:Leap:16.0" repository="standard"/>
|
||||
<path project="openSUSE:Backports:SLE-16.0" repository="standard"/>
|
||||
<path project="SUSE:SLFO:1.2" repository="standard"/>
|
||||
<arch>i586</arch>
|
||||
<arch>x86_64</arch>
|
||||
<arch>aarch64</arch>
|
||||
<arch>ppc64le</arch>
|
||||
<arch>s390x</arch>
|
||||
</repository>
|
||||
</project>
|
||||
301
integration/tests/lib/common_test_utils.py
Normal file
301
integration/tests/lib/common_test_utils.py
Normal file
@@ -0,0 +1,301 @@
|
||||
import os
|
||||
import time
|
||||
import pytest
|
||||
import requests
|
||||
import json
|
||||
import xml.etree.ElementTree as ET
|
||||
from pathlib import Path
|
||||
import base64
|
||||
|
||||
TEST_DATA_DIR = Path(__file__).parent.parent / "data"
|
||||
BUILD_RESULT_TEMPLATE = TEST_DATA_DIR / "build_result.xml.template"
|
||||
MOCK_RESPONSES_DIR = Path(__file__).parent.parent.parent / "mock-obs" / "responses"
|
||||
MOCK_BUILD_RESULT_FILE = (
|
||||
MOCK_RESPONSES_DIR / "GET_build_openSUSE:Leap:16.0:PullRequest:*__result"
|
||||
)
|
||||
MOCK_BUILD_RESULT_FILE1 = MOCK_RESPONSES_DIR / "GET_build_openSUSE:Leap:16.0__result"
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def mock_build_result():
|
||||
"""
|
||||
Fixture to create a mock build result file from the template.
|
||||
Returns a factory function that the test can call with parameters.
|
||||
"""
|
||||
|
||||
def _create_result_file(package_name: str, code: str):
|
||||
tree = ET.parse(BUILD_RESULT_TEMPLATE)
|
||||
root = tree.getroot()
|
||||
for status_tag in root.findall(".//status"):
|
||||
status_tag.set("package", package_name)
|
||||
status_tag.set("code", code)
|
||||
|
||||
MOCK_RESPONSES_DIR.mkdir(exist_ok=True)
|
||||
tree.write(MOCK_BUILD_RESULT_FILE)
|
||||
tree.write(MOCK_BUILD_RESULT_FILE1)
|
||||
return str(MOCK_BUILD_RESULT_FILE)
|
||||
|
||||
yield _create_result_file
|
||||
|
||||
if MOCK_BUILD_RESULT_FILE.exists():
|
||||
MOCK_BUILD_RESULT_FILE.unlink()
|
||||
MOCK_BUILD_RESULT_FILE1.unlink()
|
||||
|
||||
|
||||
class GiteaAPIClient:
|
||||
def __init__(self, base_url, token):
|
||||
self.base_url = base_url
|
||||
self.headers = {"Authorization": f"token {token}", "Content-Type": "application/json"}
|
||||
|
||||
def _request(self, method, path, **kwargs):
|
||||
url = f"{self.base_url}/api/v1/{path}"
|
||||
response = requests.request(method, url, headers=self.headers, **kwargs)
|
||||
try:
|
||||
response.raise_for_status()
|
||||
except requests.exceptions.HTTPError as e:
|
||||
print(f"HTTPError in _request: {e}")
|
||||
print(f"Response Content: {e.response.text}")
|
||||
raise
|
||||
return response
|
||||
|
||||
def create_org(self, org_name):
|
||||
print(f"--- Checking organization: {org_name} ---")
|
||||
try:
|
||||
self._request("GET", f"orgs/{org_name}")
|
||||
print(f"Organization '{org_name}' already exists.")
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Creating organization '{org_name}'...")
|
||||
data = {"username": org_name, "full_name": org_name}
|
||||
self._request("POST", "orgs", json=data)
|
||||
print(f"Organization '{org_name}' created.")
|
||||
else:
|
||||
raise
|
||||
|
||||
def create_repo(self, org_name, repo_name):
|
||||
print(f"--- Checking repository: {org_name}/{repo_name} ---")
|
||||
try:
|
||||
self._request("GET", f"repos/{org_name}/{repo_name}")
|
||||
print(f"Repository '{org_name}/{repo_name}' already exists.")
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Creating repository '{org_name}/{repo_name}'...")
|
||||
data = {
|
||||
"name": repo_name,
|
||||
"auto_init": True,
|
||||
"default_branch": "main",
|
||||
"gitignores": "Go",
|
||||
"license": "MIT",
|
||||
"private": False,
|
||||
"readme": "Default"
|
||||
}
|
||||
self._request("POST", f"orgs/{org_name}/repos", json=data)
|
||||
print(f"Repository '{org_name}/{repo_name}' created with a README.")
|
||||
time.sleep(1) # Added delay to allow Git operations to become available
|
||||
else:
|
||||
raise
|
||||
|
||||
def add_collaborator(self, org_name, repo_name, collaborator_name, permission="write"):
|
||||
print(f"--- Adding {collaborator_name} as a collaborator to {org_name}/{repo_name} with '{permission}' permission ---")
|
||||
data = {"permission": permission}
|
||||
# Gitea API returns 204 No Content on success and doesn't fail if already present.
|
||||
self._request("PUT", f"repos/{org_name}/{repo_name}/collaborators/{collaborator_name}", json=data)
|
||||
print(f"Attempted to add {collaborator_name} to {org_name}/{repo_name}.")
|
||||
|
||||
def add_submodules(self, org_name, repo_name):
|
||||
print(f"--- Adding submodules to {org_name}/{repo_name} using diffpatch ---")
|
||||
parent_repo_path = f"repos/{org_name}/{repo_name}"
|
||||
|
||||
try:
|
||||
self._request("GET", f"{parent_repo_path}/contents/.gitmodules")
|
||||
print("Submodules appear to be already added. Skipping.")
|
||||
return
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code != 404:
|
||||
raise
|
||||
|
||||
# Get latest commit SHAs for the submodules
|
||||
pkg_a_sha = self._request("GET", "repos/pool/pkgA/branches/main").json()["commit"]["id"]
|
||||
pkg_b_sha = self._request("GET", "repos/pool/pkgB/branches/main").json()["commit"]["id"]
|
||||
|
||||
if not pkg_a_sha or not pkg_b_sha:
|
||||
raise Exception("Error: Could not get submodule commit SHAs. Cannot apply patch.")
|
||||
|
||||
diff_content = f"""diff --git a/.gitmodules b/.gitmodules
|
||||
new file mode 100644
|
||||
index 0000000..f1838bd
|
||||
--- /dev/null
|
||||
+++ b/.gitmodules
|
||||
@@ -0,0 +1,6 @@
|
||||
+[submodule "pkgA"]
|
||||
+ path = pkgA
|
||||
+ url = ../../pool/pkgA.git
|
||||
+[submodule "pkgB"]
|
||||
+ path = pkgB
|
||||
+ url = ../../pool/pkgB.git
|
||||
diff --git a/pkgA b/pkgA
|
||||
new file mode 160000
|
||||
index 0000000..{pkg_a_sha}
|
||||
--- /dev/null
|
||||
+++ b/pkgA
|
||||
@@ -0,0 +1 @@
|
||||
+Subproject commit {pkg_a_sha}
|
||||
diff --git a/pkgB b/pkgB
|
||||
new file mode 160000
|
||||
index 0000000..{pkg_b_sha}
|
||||
--- /dev/null
|
||||
+++ b/pkgB
|
||||
@@ -0,0 +1 @@
|
||||
+Subproject commit {pkg_b_sha}
|
||||
diff --git a/workflow.config b/workflow.config
|
||||
new file mode 100644
|
||||
--- /dev/null
|
||||
+++ b/workflow.config
|
||||
@@ -0,0 +7 @@
|
||||
+{{
|
||||
+ "Workflows": ["pr"],
|
||||
+ "GitProjectName": "products/SLFO#main",
|
||||
+ "Organization": "pool",
|
||||
+ "Branch": "main",
|
||||
+ "ManualMergeProject": true,
|
||||
+ "Reviewers": [ "-autogits_obs_staging_bot" ]
|
||||
+}}
|
||||
diff --git a/staging.config b/staging.config
|
||||
new file mode 100644
|
||||
--- /dev/null
|
||||
+++ b/staging.config
|
||||
@@ -0,0 +3 @@
|
||||
+{{
|
||||
+ "ObsProject": "openSUSE:Leap:16.0",
|
||||
+ "StagingProject": "openSUSE:Leap:16.0:PullRequest"
|
||||
+}}
|
||||
"""
|
||||
message = "Add pkgA and pkgB as submodules and config files"
|
||||
data = {
|
||||
"branch": "main",
|
||||
"content": diff_content,
|
||||
"message": message
|
||||
}
|
||||
print(f"Applying submodule patch to {org_name}/{repo_name}...")
|
||||
self._request("POST", f"{parent_repo_path}/diffpatch", json=data)
|
||||
print("Submodule patch applied.")
|
||||
|
||||
def update_repo_settings(self, org_name, repo_name):
|
||||
print(f"--- Updating repository settings for: {org_name}/{repo_name} ---")
|
||||
repo_data = self._request("GET", f"repos/{org_name}/{repo_name}").json()
|
||||
|
||||
# Ensure these are boolean values, not string
|
||||
repo_data["allow_manual_merge"] = True
|
||||
repo_data["autodetect_manual_merge"] = True
|
||||
|
||||
self._request("PATCH", f"repos/{org_name}/{repo_name}", json=repo_data)
|
||||
print(f"Repository settings for '{org_name}/{repo_name}' updated.")
|
||||
|
||||
|
||||
def create_gitea_pr(self, repo_full_name: str, diff_content: str, title: str):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/pulls"
|
||||
base_branch = "main"
|
||||
|
||||
# Create a new branch for the PR
|
||||
new_branch_name = f"pr-branch-{int(time.time())}"
|
||||
|
||||
# Get the latest commit SHA of the base branch
|
||||
base_commit_sha = self._request("GET", f"repos/{owner}/{repo}/branches/{base_branch}").json()["commit"]["id"]
|
||||
|
||||
# Create the new branch
|
||||
self._request("POST", f"repos/{owner}/{repo}/branches", json={
|
||||
"new_branch_name": new_branch_name,
|
||||
"old_ref": base_commit_sha # Use the commit SHA directly
|
||||
})
|
||||
|
||||
# Create a new file or modify an existing one in the new branch
|
||||
file_path = f"test-file-{int(time.time())}.txt"
|
||||
file_content = "This is a test file for the PR."
|
||||
self._request("POST", f"repos/{owner}/{repo}/contents/{file_path}", json={
|
||||
"content": base64.b64encode(file_content.encode('utf-8')).decode('ascii'),
|
||||
"message": "Add test file",
|
||||
"branch": new_branch_name
|
||||
})
|
||||
|
||||
# Now create the PR
|
||||
data = {
|
||||
"head": new_branch_name, # Use the newly created branch as head
|
||||
"base": base_branch,
|
||||
"title": title,
|
||||
"body": "Test Pull Request"
|
||||
}
|
||||
response = self._request("POST", url, json=data)
|
||||
return response.json()
|
||||
|
||||
def modify_gitea_pr(self, repo_full_name: str, pr_number: int, diff_content: str, message: str):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
|
||||
# Get PR details to find the head branch
|
||||
pr_details = self._request("GET", f"repos/{owner}/{repo}/pulls/{pr_number}").json()
|
||||
head_branch = pr_details["head"]["ref"]
|
||||
|
||||
file_path = f"modified-file-{int(time.time())}.txt"
|
||||
file_content = "This is a modified test file for the PR."
|
||||
|
||||
self._request("POST", f"repos/{owner}/{repo}/contents/{file_path}", json={
|
||||
"content": base64.b64encode(file_content.encode('utf-8')).decode('ascii'),
|
||||
"message": message,
|
||||
"branch": head_branch
|
||||
})
|
||||
|
||||
def update_gitea_pr_properties(self, repo_full_name: str, pr_number: int, **kwargs):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/pulls/{pr_number}"
|
||||
response = self._request("PATCH", url, json=kwargs)
|
||||
return response.json()
|
||||
|
||||
def get_timeline_events(self, repo_full_name: str, pr_number: int):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/issues/{pr_number}/timeline"
|
||||
|
||||
# Retry logic for timeline events
|
||||
for i in range(10): # Try up to 10 times
|
||||
try:
|
||||
response = self._request("GET", url)
|
||||
timeline_events = response.json()
|
||||
if timeline_events: # Check if timeline_events list is not empty
|
||||
return timeline_events
|
||||
print(f"Attempt {i+1}: Timeline for PR {pr_number} is empty. Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Attempt {i+1}: Timeline for PR {pr_number} not found yet. Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
else:
|
||||
raise # Re-raise other HTTP errors
|
||||
raise Exception(f"Failed to retrieve timeline for PR {pr_number} after multiple retries.")
|
||||
|
||||
def get_comments(self, repo_full_name: str, pr_number: int):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/issues/{pr_number}/comments"
|
||||
|
||||
# Retry logic for comments
|
||||
for i in range(10): # Try up to 10 times
|
||||
try:
|
||||
response = self._request("GET", url)
|
||||
comments = response.json()
|
||||
print(f"Attempt {i+1}: Comments for PR {pr_number} received: {comments}") # Added debug print
|
||||
if comments: # Check if comments list is not empty
|
||||
return comments
|
||||
print(f"Attempt {i+1}: Comments for PR {pr_number} are empty. Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code == 404:
|
||||
print(f"Attempt {i+1}: Comments for PR {pr_number} not found yet. Retrying in 3 seconds...")
|
||||
time.sleep(3)
|
||||
else:
|
||||
raise # Re-raise other HTTP errors
|
||||
raise Exception(f"Failed to retrieve comments for PR {pr_number} after multiple retries.")
|
||||
|
||||
def get_pr_details(self, repo_full_name: str, pr_number: int):
|
||||
owner, repo = repo_full_name.split("/")
|
||||
url = f"repos/{owner}/{repo}/pulls/{pr_number}"
|
||||
response = self._request("GET", url)
|
||||
return response.json()
|
||||
|
||||
153
integration/tests/test_pr_workflow.py
Executable file
153
integration/tests/test_pr_workflow.py
Executable file
@@ -0,0 +1,153 @@
|
||||
import pytest
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from tests.lib.common_test_utils import (
|
||||
GiteaAPIClient,
|
||||
mock_build_result,
|
||||
)
|
||||
|
||||
# =============================================================================
|
||||
# TEST CASES
|
||||
# =============================================================================
|
||||
|
||||
|
||||
def test_pr_workflow_succeeded(gitea_env, mock_build_result):
|
||||
"""End-to-end test for a successful PR workflow."""
|
||||
diff = "diff --git a/test.txt b/test.txt\nnew file mode 100644\nindex 0000000..e69de29\n"
|
||||
pr = gitea_env.create_gitea_pr("pool/pkgA", diff, "Test PR - should succeed")
|
||||
initial_pr_number = pr["number"]
|
||||
|
||||
compose_dir = Path(__file__).parent.parent
|
||||
|
||||
forwarded_pr_number = None
|
||||
print(
|
||||
f"Polling pool/pkgA PR #{initial_pr_number} timeline for forwarded PR event..."
|
||||
)
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("pool/pkgA", initial_pr_number)
|
||||
for event in timeline_events:
|
||||
if event.get("type") == "pull_ref":
|
||||
if not (ref_issue := event.get("ref_issue")):
|
||||
continue
|
||||
url_to_check = ref_issue.get("html_url", "")
|
||||
match = re.search(r"products/SLFO/pulls/(\d+)", url_to_check)
|
||||
if match:
|
||||
forwarded_pr_number = match.group(1)
|
||||
break
|
||||
if forwarded_pr_number:
|
||||
break
|
||||
assert (
|
||||
forwarded_pr_number is not None
|
||||
), "Workflow bot did not create a pull_ref event on the timeline."
|
||||
print(f"Found forwarded PR: products/SLFO #{forwarded_pr_number}")
|
||||
|
||||
print(f"Polling products/SLFO PR #{forwarded_pr_number} for reviewer assignment...")
|
||||
reviewer_added = False
|
||||
for _ in range(15):
|
||||
time.sleep(1)
|
||||
pr_details = gitea_env.get_pr_details("products/SLFO", forwarded_pr_number)
|
||||
if any(
|
||||
r.get("login") == "autogits_obs_staging_bot"
|
||||
for r in pr_details.get("requested_reviewers", [])
|
||||
):
|
||||
reviewer_added = True
|
||||
break
|
||||
assert reviewer_added, "Staging bot was not added as a reviewer."
|
||||
print("Staging bot has been added as a reviewer.")
|
||||
|
||||
mock_build_result(package_name="pkgA", code="succeeded")
|
||||
|
||||
print("Restarting obs-staging-bot...")
|
||||
subprocess.run(
|
||||
["podman-compose", "restart", "obs-staging-bot"],
|
||||
cwd=compose_dir,
|
||||
check=True,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
print(f"Polling products/SLFO PR #{forwarded_pr_number} for final status...")
|
||||
status_comment_found = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("products/SLFO", forwarded_pr_number)
|
||||
for event in timeline_events:
|
||||
print(event.get("body", "not a body"))
|
||||
if event.get("body") and "successful" in event["body"]:
|
||||
status_comment_found = True
|
||||
break
|
||||
if status_comment_found:
|
||||
break
|
||||
assert status_comment_found, "Staging bot did not post a 'successful' comment."
|
||||
|
||||
|
||||
def test_pr_workflow_failed(gitea_env, mock_build_result):
|
||||
"""End-to-end test for a failed PR workflow."""
|
||||
diff = "diff --git a/another_test.txt b/another_test.txt\nnew file mode 100644\nindex 0000000..e69de29\n"
|
||||
pr = gitea_env.create_gitea_pr("pool/pkgA", diff, "Test PR - should fail")
|
||||
initial_pr_number = pr["number"]
|
||||
|
||||
compose_dir = Path(__file__).parent.parent
|
||||
|
||||
forwarded_pr_number = None
|
||||
print(
|
||||
f"Polling pool/pkgA PR #{initial_pr_number} timeline for forwarded PR event..."
|
||||
)
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("pool/pkgA", initial_pr_number)
|
||||
for event in timeline_events:
|
||||
if event.get("type") == "pull_ref":
|
||||
if not (ref_issue := event.get("ref_issue")):
|
||||
continue
|
||||
url_to_check = ref_issue.get("html_url", "")
|
||||
match = re.search(r"products/SLFO/pulls/(\d+)", url_to_check)
|
||||
if match:
|
||||
forwarded_pr_number = match.group(1)
|
||||
break
|
||||
if forwarded_pr_number:
|
||||
break
|
||||
assert (
|
||||
forwarded_pr_number is not None
|
||||
), "Workflow bot did not create a pull_ref event on the timeline."
|
||||
print(f"Found forwarded PR: products/SLFO #{forwarded_pr_number}")
|
||||
|
||||
print(f"Polling products/SLFO PR #{forwarded_pr_number} for reviewer assignment...")
|
||||
reviewer_added = False
|
||||
for _ in range(15):
|
||||
time.sleep(1)
|
||||
pr_details = gitea_env.get_pr_details("products/SLFO", forwarded_pr_number)
|
||||
if any(
|
||||
r.get("login") == "autogits_obs_staging_bot"
|
||||
for r in pr_details.get("requested_reviewers", [])
|
||||
):
|
||||
reviewer_added = True
|
||||
break
|
||||
assert reviewer_added, "Staging bot was not added as a reviewer."
|
||||
print("Staging bot has been added as a reviewer.")
|
||||
|
||||
mock_build_result(package_name="pkgA", code="failed")
|
||||
|
||||
print("Restarting obs-staging-bot...")
|
||||
subprocess.run(
|
||||
["podman-compose", "restart", "obs-staging-bot"],
|
||||
cwd=compose_dir,
|
||||
check=True,
|
||||
capture_output=True,
|
||||
)
|
||||
|
||||
print(f"Polling products/SLFO PR #{forwarded_pr_number} for final status...")
|
||||
status_comment_found = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("products/SLFO", forwarded_pr_number)
|
||||
for event in timeline_events:
|
||||
if event.get("body") and "failed" in event["body"]:
|
||||
status_comment_found = True
|
||||
break
|
||||
if status_comment_found:
|
||||
break
|
||||
assert status_comment_found, "Staging bot did not post a 'failed' comment."
|
||||
117
integration/tests/workflow_pr_sync_test.py
Executable file
117
integration/tests/workflow_pr_sync_test.py
Executable file
@@ -0,0 +1,117 @@
|
||||
import pytest
|
||||
import re
|
||||
import time
|
||||
import subprocess
|
||||
import requests
|
||||
from pathlib import Path
|
||||
from tests.lib.common_test_utils import (
|
||||
GiteaAPIClient,
|
||||
)
|
||||
|
||||
# =============================================================================
|
||||
# TEST CASES
|
||||
# =============================================================================
|
||||
|
||||
pytest.pr = None
|
||||
pytest.pr_details = None
|
||||
pytest.initial_pr_number = None
|
||||
pytest.forwarded_pr_number = None
|
||||
|
||||
|
||||
@pytest.mark.dependency()
|
||||
def test_001_project_pr(gitea_env):
|
||||
"""Forwarded PR correct title"""
|
||||
diff = "diff --git a/another_test.txt b/another_test.txt\nnew file mode 100644\nindex 0000000..e69de29\n"
|
||||
pytest.pr = gitea_env.create_gitea_pr("pool/pkgA", diff, "Test PR")
|
||||
pytest.initial_pr_number = pytest.pr["number"]
|
||||
time.sleep(5) # Give Gitea some time to process the PR and make the timeline available
|
||||
|
||||
compose_dir = Path(__file__).parent.parent
|
||||
|
||||
pytest.forwarded_pr_number = None
|
||||
print(
|
||||
f"Polling pool/pkgA PR #{pytest.initial_pr_number} timeline for forwarded PR event..."
|
||||
)
|
||||
# Instead of polling timeline, check if forwarded PR exists directly
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
timeline_events = gitea_env.get_timeline_events("pool/pkgA", pytest.initial_pr_number)
|
||||
for event in timeline_events:
|
||||
if event.get("type") == "pull_ref":
|
||||
if not (ref_issue := event.get("ref_issue")):
|
||||
continue
|
||||
url_to_check = ref_issue.get("html_url", "")
|
||||
match = re.search(r"products/SLFO/pulls/(\d+)", url_to_check)
|
||||
if match:
|
||||
pytest.forwarded_pr_number = match.group(1)
|
||||
break
|
||||
if pytest.forwarded_pr_number:
|
||||
break
|
||||
assert (
|
||||
pytest.forwarded_pr_number is not None
|
||||
), "Workflow bot did not create a forwarded PR."
|
||||
pytest.pr_details = gitea_env.get_pr_details("products/SLFO", pytest.forwarded_pr_number)
|
||||
assert (
|
||||
pytest.pr_details["title"] == "Forwarded PRs: pkgA"
|
||||
), "Forwarded PR correct title"
|
||||
|
||||
|
||||
@pytest.mark.dependency(depends=["test_001_project_pr"])
|
||||
def test_002_updated_project_pr(gitea_env):
|
||||
"""Forwarded PR head is updated"""
|
||||
diff = "diff --git a/another_test.txt b/another_test.txt\nnew file mode 100444\nindex 0000000..e69de21\n"
|
||||
gitea_env.modify_gitea_pr("pool/pkgA", pytest.initial_pr_number, diff, "Tweaks")
|
||||
sha_old = pytest.pr_details["head"]["sha"]
|
||||
|
||||
sha_changed = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
new_pr_details = gitea_env.get_pr_details("products/SLFO", pytest.forwarded_pr_number)
|
||||
sha_new = new_pr_details["head"]["sha"]
|
||||
if sha_new != sha_old:
|
||||
print(f"Sha changed from {sha_old} to {sha_new}")
|
||||
sha_changed = True
|
||||
break
|
||||
|
||||
assert sha_changed, "Forwarded PR has sha updated"
|
||||
|
||||
|
||||
@pytest.mark.dependency(depends=["test_001_project_pr"])
|
||||
def test_003_wip(gitea_env):
|
||||
"""WIP flag set for PR"""
|
||||
# 1. set WIP flag in PR f"pool/pkgA#{pytest.initial_pr_number}"
|
||||
initial_pr_details = gitea_env.get_pr_details("pool/pkgA", pytest.initial_pr_number)
|
||||
wip_title = "WIP: " + initial_pr_details["title"]
|
||||
|
||||
gitea_env.update_gitea_pr_properties("pool/pkgA", pytest.initial_pr_number, title=wip_title)
|
||||
# 2. in loop check whether WIP flag is set for PR f"products/SLFO #{pytest.forwarded_pr_number}"
|
||||
wip_flag_set = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
forwarded_pr_details = gitea_env.get_pr_details(
|
||||
"products/SLFO", pytest.forwarded_pr_number
|
||||
)
|
||||
if "WIP: " in forwarded_pr_details["title"]:
|
||||
wip_flag_set = True
|
||||
break
|
||||
|
||||
assert wip_flag_set, "WIP flag was not set in the forwarded PR."
|
||||
|
||||
# Remove WIP flag from PR f"pool/pkgA#{pytest.initial_pr_number}"
|
||||
initial_pr_details = gitea_env.get_pr_details("pool/pkgA", pytest.initial_pr_number)
|
||||
non_wip_title = initial_pr_details["title"].replace("WIP: ", "")
|
||||
gitea_env.update_gitea_pr_properties(
|
||||
"pool/pkgA", pytest.initial_pr_number, title=non_wip_title
|
||||
)
|
||||
|
||||
# In loop check whether WIP flag is removed for PR f"products/SLFO #{pytest.forwarded_pr_number}"
|
||||
wip_flag_removed = False
|
||||
for _ in range(20):
|
||||
time.sleep(1)
|
||||
forwarded_pr_details = gitea_env.get_pr_details(
|
||||
"products/SLFO", pytest.forwarded_pr_number
|
||||
)
|
||||
if "WIP: " not in forwarded_pr_details["title"]:
|
||||
wip_flag_removed = True
|
||||
break
|
||||
assert wip_flag_removed, "WIP flag was not removed from the forwarded PR."
|
||||
1
integration/workflow-pr/Dockerfile
Symbolic link
1
integration/workflow-pr/Dockerfile
Symbolic link
@@ -0,0 +1 @@
|
||||
Dockerfile.package
|
||||
17
integration/workflow-pr/Dockerfile.local
Normal file
17
integration/workflow-pr/Dockerfile.local
Normal file
@@ -0,0 +1,17 @@
|
||||
# Use the same base image as the Gitea container
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Add the custom CA to the trust store
|
||||
COPY integration/rabbitmq-config/certs/cert.pem /usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
# Install git and ssh
|
||||
RUN zypper -n in git-core openssh-clients binutils
|
||||
|
||||
# Copy the pre-built binary into the container
|
||||
COPY workflow-pr/workflow-pr /usr/local/bin/workflow-pr
|
||||
COPY integration/workflow-pr/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +4755 /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Set the entrypoint for the container
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
18
integration/workflow-pr/Dockerfile.package
Normal file
18
integration/workflow-pr/Dockerfile.package
Normal file
@@ -0,0 +1,18 @@
|
||||
# Use the same base image as the Gitea container
|
||||
FROM registry.suse.com/bci/bci-base:15.7
|
||||
|
||||
# Add the custom CA to the trust store
|
||||
COPY rabbitmq-config/certs/cert.pem /usr/share/pki/trust/anchors/gitea-rabbitmq-ca.crt
|
||||
RUN update-ca-certificates
|
||||
|
||||
RUN zypper ar -f http://download.opensuse.org/repositories/devel:/Factory:/git-workflow/15.7/devel:Factory:git-workflow.repo
|
||||
RUN zypper --gpg-auto-import-keys ref
|
||||
|
||||
# Install git and ssh
|
||||
RUN zypper -n in git-core openssh-clients autogits-workflow-pr binutils
|
||||
|
||||
COPY integration/workflow-pr/entrypoint.sh /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +4755 /usr/local/bin/entrypoint.sh
|
||||
|
||||
# Set the entrypoint for the container
|
||||
ENTRYPOINT ["/usr/local/bin/entrypoint.sh"]
|
||||
66
integration/workflow-pr/entrypoint.sh
Normal file
66
integration/workflow-pr/entrypoint.sh
Normal file
@@ -0,0 +1,66 @@
|
||||
#!/bin/bash
|
||||
TOKEN_FILE="/var/lib/gitea/workflow-pr.token"
|
||||
|
||||
# Wait for the token file to be created by the gitea setup script
|
||||
echo "Waiting for $TOKEN_FILE..."
|
||||
while [ ! -s "$TOKEN_FILE" ]; do
|
||||
sleep 2
|
||||
done
|
||||
|
||||
# Read token and trim whitespace/newlines
|
||||
GITEA_TOKEN=$(cat "$TOKEN_FILE" | tr -d '\n\r ' )
|
||||
|
||||
if [ -z "$GITEA_TOKEN" ]; then
|
||||
echo "Error: Token file $TOKEN_FILE is empty after trimming."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
export GITEA_TOKEN
|
||||
echo "GITEA_TOKEN exported (length: ${#GITEA_TOKEN})"
|
||||
|
||||
# Wait for the dummy data to be created by the gitea setup script
|
||||
echo "Waiting for workflow.config in products/SLFO..."
|
||||
API_URL="http://gitea-test:3000/api/v1/repos/products/SLFO/contents/workflow.config"
|
||||
HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token $GITEA_TOKEN" "$API_URL")
|
||||
|
||||
while [ "$HTTP_STATUS" != "200" ]; do
|
||||
echo "workflow.config not found yet (HTTP Status: $HTTP_STATUS). Retrying in 5s..."
|
||||
sleep 5
|
||||
HTTP_STATUS=$(curl -s -o /dev/null -w "%{http_code}" -H "Authorization: token $GITEA_TOKEN" "$API_URL")
|
||||
done
|
||||
|
||||
# Wait for the shared SSH key to be generated by the gitea setup script
|
||||
echo "Waiting for /var/lib/gitea/ssh-keys/id_ed25519..."
|
||||
while [ ! -f /var/lib/gitea/ssh-keys/id_ed25519 ]; do
|
||||
sleep 2
|
||||
done
|
||||
|
||||
export AUTOGITS_IDENTITY_FILE="/root/.ssh/id_ed25519"
|
||||
|
||||
# Pre-populate known_hosts with Gitea's SSH host key
|
||||
echo "Preparing SSH environment in /root/.ssh..."
|
||||
mkdir -p /root/.ssh
|
||||
chmod 700 /root/.ssh
|
||||
|
||||
# Copy the private key to the standard location and set permissions
|
||||
cp /var/lib/gitea/ssh-keys/id_ed25519 /root/.ssh/id_ed25519
|
||||
chmod 600 /root/.ssh/id_ed25519
|
||||
|
||||
echo "Scanning Gitea SSH host key..."
|
||||
# We try multiple times because Gitea might still be starting its SSH server
|
||||
for i in {1..10}; do
|
||||
ssh-keyscan -p 3022 gitea-test >> /root/.ssh/known_hosts 2>/dev/null && break
|
||||
echo "Retrying ssh-keyscan in 2s..."
|
||||
sleep 2
|
||||
done
|
||||
chmod 644 /root/.ssh/known_hosts
|
||||
|
||||
exe=$(which workflow-pr)
|
||||
exe=${exe:-/usr/local/bin/workflow-pr}
|
||||
|
||||
package=$(rpm -qa | grep autogits-workflow-pr) || :
|
||||
|
||||
echo "!!!!!!!!!!!!!!!! using binary $exe; installed package: $package"
|
||||
which strings > /dev/null 2>&1 && strings "$exe" | grep -A 2 vcs.revision= | head -4 || :
|
||||
|
||||
exec "$exe" "$@"
|
||||
3
integration/workflow-pr/workflow-pr.json
Normal file
3
integration/workflow-pr/workflow-pr.json
Normal file
@@ -0,0 +1,3 @@
|
||||
[
|
||||
"products/SLFO#main"
|
||||
]
|
||||
@@ -35,6 +35,7 @@ It's a JSON file with following syntax:
|
||||
{
|
||||
"Name": "SLES",
|
||||
"Origin": "SUSE:SLFO:Products:SLES:16.0",
|
||||
"Label": "BootstrapRing",
|
||||
"BuildDisableRepos": ["product"]
|
||||
}
|
||||
]
|
||||
@@ -48,6 +49,7 @@ It's a JSON file with following syntax:
|
||||
| *QA* | Crucial for generating a product build (such as an ISO or FTP tree) that incorporates the packages. | no | array of objects | | |
|
||||
| *QA > Name* | Suffix for the QA OBS staging project. The project is named *StagingProject:<PR_Number>:Name*. | no | string | | |
|
||||
| *QA > Origin* | OBS reference project | no | string | | |
|
||||
| *QA > Label* | Setup the project only when the given gitea label is set on pull request | no | string | | |
|
||||
| *QA > BuildDisableRepos* | The names of OBS repositories to build-disable, if any. | no | array of strings | | [] |
|
||||
|
||||
|
||||
@@ -69,4 +71,10 @@ Details
|
||||
* In this case, the **scmsync** tag is inherited from the `QA > Origin` project.
|
||||
* It is desirable in some cases to avoid building some specific build service repositories when not needed. In this case, `QA > BuildDisableRepos` can be specified.
|
||||
These repositories would be disabled in the project meta when generating the QA project.
|
||||
|
||||
* QA projects can build on each other. In this case it is important that the order to setup is correct
|
||||
in the staging.config file.
|
||||
* Based on Label settings QA projects can get created or removed. The staging bot is also checking that these
|
||||
projects build successfully.
|
||||
* It is possible to include the sources from the staging project also in the QA project. Define a template using
|
||||
a project link pointing to the project defined as "StagingProject". You must *not* use scmsync directly in the
|
||||
same project then, but you can use it indirectly via a second project link
|
||||
|
||||
@@ -19,6 +19,7 @@ package main
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -114,161 +115,105 @@ type DisableFlag struct {
|
||||
Name string `xml:"repository,attr"`
|
||||
}
|
||||
|
||||
func ProcessBuildStatus(project, refProject *common.BuildResultList) BuildStatusSummary {
|
||||
if _, finished := refProject.BuildResultSummary(); !finished {
|
||||
common.LogDebug("refProject not finished building??")
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
|
||||
func ProcessBuildStatus(project *common.BuildResultList) BuildStatusSummary {
|
||||
if _, finished := project.BuildResultSummary(); !finished {
|
||||
common.LogDebug("Still building...")
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
// the repositories should be setup equally between the projects. We
|
||||
// need to verify that packages that are building in `refProject` are not
|
||||
// failing in the `project`
|
||||
BuildResultSorter := func(a, b *common.BuildResult) int {
|
||||
if c := strings.Compare(a.Repository, b.Repository); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := strings.Compare(a.Arch, b.Arch); c != 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
panic("Should not happen -- BuiltResultSorter equal repos?")
|
||||
}
|
||||
slices.SortFunc(project.Result, BuildResultSorter)
|
||||
if refProject == nil {
|
||||
// just return if buid finished and have some successes, since new package
|
||||
common.LogInfo("New package. Only need some success...")
|
||||
SomeSuccess := false
|
||||
for i := 0; i < len(project.Result); i++ {
|
||||
repoRes := project.Result[i]
|
||||
repoResStatus, ok := common.ObsRepoStatusDetails[repoRes.Code]
|
||||
if !ok {
|
||||
common.LogDebug("cannot find code:", repoRes.Code)
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
if !repoResStatus.Finished {
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
for _, pkg := range repoRes.Status {
|
||||
pkgStatus, ok := common.ObsBuildStatusDetails[pkg.Code]
|
||||
if !ok {
|
||||
common.LogInfo("Unknown package build status:", pkg.Code, "for", pkg.Package)
|
||||
common.LogDebug("Details:", pkg.Details)
|
||||
}
|
||||
|
||||
if pkgStatus.Success {
|
||||
SomeSuccess = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if SomeSuccess {
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
return BuildStatusSummaryFailed
|
||||
}
|
||||
|
||||
slices.SortFunc(refProject.Result, BuildResultSorter)
|
||||
|
||||
common.LogDebug("comparing results", len(project.Result), "vs. ref", len(refProject.Result))
|
||||
SomeSuccess := false
|
||||
common.LogDebug("build results", len(project.Result))
|
||||
for i := 0; i < len(project.Result); i++ {
|
||||
common.LogDebug("searching for", project.Result[i].Repository, "/", project.Result[i].Arch)
|
||||
j := 0
|
||||
found:
|
||||
for ; j < len(refProject.Result); j++ {
|
||||
if project.Result[i].Repository != refProject.Result[j].Repository ||
|
||||
project.Result[i].Arch != refProject.Result[j].Arch {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := 0; j < len(project.Result); j++ {
|
||||
common.LogDebug(" found match for @ idx:", j)
|
||||
res, success := ProcessRepoBuildStatus(project.Result[i].Status, refProject.Result[j].Status)
|
||||
res := ProcessRepoBuildStatus(project.Result[i].Status)
|
||||
switch res {
|
||||
case BuildStatusSummarySuccess:
|
||||
SomeSuccess = SomeSuccess || success
|
||||
break found
|
||||
case BuildStatusSummaryFailed:
|
||||
return BuildStatusSummaryFailed
|
||||
default:
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
if j >= len(refProject.Result) {
|
||||
common.LogDebug("Cannot find results...")
|
||||
common.LogDebug(project.Result[i])
|
||||
common.LogDebug(refProject.Result)
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
}
|
||||
|
||||
if SomeSuccess {
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
return BuildStatusSummaryFailed
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func ProcessRepoBuildStatus(results, ref []*common.PackageBuildStatus) (status BuildStatusSummary, SomeSuccess bool) {
|
||||
PackageBuildStatusSorter := func(a, b *common.PackageBuildStatus) int {
|
||||
return strings.Compare(a.Package, b.Package)
|
||||
}
|
||||
func ProcessRepoBuildStatus(results []*common.PackageBuildStatus) (status BuildStatusSummary) {
|
||||
|
||||
PackageBuildStatusSorter := func(a, b *common.PackageBuildStatus) int {
|
||||
return strings.Compare(a.Package, b.Package)
|
||||
}
|
||||
|
||||
common.LogDebug("******** REF: ")
|
||||
data, _ := xml.MarshalIndent(ref, "", " ")
|
||||
common.LogDebug(string(data))
|
||||
common.LogDebug("******* RESULTS: ")
|
||||
data, _ = xml.MarshalIndent(results, "", " ")
|
||||
data, _ := xml.MarshalIndent(results, "", " ")
|
||||
common.LogDebug(string(data))
|
||||
common.LogDebug("*******")
|
||||
|
||||
// compare build result
|
||||
slices.SortFunc(results, PackageBuildStatusSorter)
|
||||
slices.SortFunc(ref, PackageBuildStatusSorter)
|
||||
|
||||
j := 0
|
||||
SomeSuccess = false
|
||||
for i := 0; i < len(results); i++ {
|
||||
res, ok := common.ObsBuildStatusDetails[results[i].Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown package result code:", results[i].Code, "for package:", results[i].Package)
|
||||
return BuildStatusSummaryUnknown, SomeSuccess
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
|
||||
if !res.Finished {
|
||||
return BuildStatusSummaryBuilding, SomeSuccess
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
if !res.Success {
|
||||
// not failed if reference project also failed for same package here
|
||||
for ; j < len(results) && strings.Compare(results[i].Package, ref[j].Package) < 0; j++ {
|
||||
}
|
||||
|
||||
if j < len(results) && results[i].Package == ref[j].Package {
|
||||
refRes, ok := common.ObsBuildStatusDetails[ref[j].Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown ref package result code:", ref[j].Code, "package:", ref[j].Package)
|
||||
return BuildStatusSummaryUnknown, SomeSuccess
|
||||
}
|
||||
|
||||
if !refRes.Finished {
|
||||
common.LogDebug("Not finished building in reference project?")
|
||||
}
|
||||
|
||||
if refRes.Success {
|
||||
return BuildStatusSummaryFailed, SomeSuccess
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SomeSuccess = true
|
||||
return BuildStatusSummaryFailed
|
||||
}
|
||||
}
|
||||
|
||||
return BuildStatusSummarySuccess, SomeSuccess
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func GetPackageBuildStatus(project *common.BuildResultList, packageName string) (bool, BuildStatusSummary) {
|
||||
var packageStatuses []*common.PackageBuildStatus
|
||||
|
||||
// Collect all statuses for the package
|
||||
for _, result := range project.Result {
|
||||
for _, pkgStatus := range result.Status {
|
||||
if pkgStatus.Package == packageName {
|
||||
packageStatuses = append(packageStatuses, pkgStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(packageStatuses) == 0 {
|
||||
return true, BuildStatusSummaryUnknown // true for 'missing'
|
||||
}
|
||||
|
||||
// Check for any failures
|
||||
for _, pkgStatus := range packageStatuses {
|
||||
res, ok := common.ObsBuildStatusDetails[pkgStatus.Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown package result code:", pkgStatus.Code, "for package:", pkgStatus.Package)
|
||||
return false, BuildStatusSummaryUnknown
|
||||
}
|
||||
if !res.Success {
|
||||
return false, BuildStatusSummaryFailed
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any unfinished builds
|
||||
for _, pkgStatus := range packageStatuses {
|
||||
res, _ := common.ObsBuildStatusDetails[pkgStatus.Code]
|
||||
// 'ok' is already checked in the loop above
|
||||
if !res.Finished {
|
||||
return false, BuildStatusSummaryBuilding
|
||||
}
|
||||
}
|
||||
|
||||
// If we got here, all are finished and successful
|
||||
return false, BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingPrj, buildPrj string, stagingMasterPrj string) (*common.ProjectMeta, error) {
|
||||
@@ -327,9 +272,9 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
common.LogError("error fetching project meta for", buildPrj, ". Err:", err)
|
||||
return nil, err
|
||||
}
|
||||
common.LogInfo("Meta: ", meta)
|
||||
|
||||
// generate new project with paths pointinig back to original repos
|
||||
// disable publishing
|
||||
|
||||
meta.Name = stagingPrj
|
||||
meta.Description = fmt.Sprintf(`Pull request build job PR#%d to branch %s of %s/%s`,
|
||||
@@ -344,7 +289,10 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
|
||||
urlPkg := make([]string, 0, len(modifiedOrNew))
|
||||
for _, pkg := range modifiedOrNew {
|
||||
urlPkg = append(urlPkg, "onlybuild="+url.QueryEscape(pkg))
|
||||
// FIXME: skip manifest subdirectories itself
|
||||
// strip any leading directory name and just hand over last directory as package name
|
||||
onlybuilds := strings.Split(pkg, "/")
|
||||
urlPkg = append(urlPkg, "onlybuild="+url.QueryEscape(onlybuilds[len(onlybuilds)-1]))
|
||||
}
|
||||
meta.ScmSync = pr.Head.Repo.CloneURL + "?" + strings.Join(urlPkg, "&") + "#" + pr.Head.Sha
|
||||
if len(meta.ScmSync) >= 65535 {
|
||||
@@ -384,12 +332,14 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
|
||||
func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingProject, templateProject, subProjectName string, buildDisableRepos []string) error {
|
||||
common.LogDebug("Setup QA sub projects")
|
||||
common.LogDebug("reading templateProject ", templateProject)
|
||||
templateMeta, err := ObsClient.GetProjectMeta(templateProject)
|
||||
if err != nil {
|
||||
common.LogError("error fetching template project meta for", templateProject, ":", err)
|
||||
return err
|
||||
}
|
||||
// patch baseMeta to become the new project
|
||||
common.LogDebug("upcoming project name ", stagingProject, ":", subProjectName)
|
||||
templateMeta.Name = stagingProject + ":" + subProjectName
|
||||
// freeze tag for now
|
||||
if len(templateMeta.ScmSync) > 0 {
|
||||
@@ -427,19 +377,50 @@ func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, git
|
||||
templateMeta.BuildFlags.Contents += string(output)
|
||||
}
|
||||
}
|
||||
|
||||
// include sources from submission project when link points to staging project
|
||||
for idx, l := range templateMeta.Link {
|
||||
if l.Project == stagingConfig.StagingProject {
|
||||
templateMeta.Link[idx].Project = stagingProject
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup ReleaseTarget and modify affected path entries
|
||||
for idx, r := range templateMeta.Repositories {
|
||||
templateMeta.Repositories[idx].ReleaseTargets = nil
|
||||
|
||||
for pidx, path := range r.Paths {
|
||||
// Check for path building against code stream
|
||||
common.LogDebug(" checking in ", templateMeta.Name)
|
||||
common.LogDebug(" stagingProject ", stagingProject)
|
||||
common.LogDebug(" checking for ", templateMeta.Repositories[idx].Paths[pidx].Project)
|
||||
common.LogDebug(" path.Project ", path.Project)
|
||||
common.LogDebug(" stagingConfig.ObsProject ", stagingConfig.ObsProject)
|
||||
common.LogDebug(" stagingConfig.StagingProject ", stagingConfig.StagingProject)
|
||||
common.LogDebug(" templateProject ", templateProject)
|
||||
if path.Project == stagingConfig.ObsProject {
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = stagingProject
|
||||
}
|
||||
} else
|
||||
// Check for path building against a repo in template project itself
|
||||
if path.Project == templateProject {
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = templateMeta.Name
|
||||
} else
|
||||
// Check for path prefixes against a template project inside of template project area
|
||||
if strings.HasPrefix(path.Project, stagingConfig.StagingProject + ":") {
|
||||
newProjectName := stagingProject
|
||||
// find project name
|
||||
for _, setup := range stagingConfig.QA {
|
||||
if setup.Origin == path.Project {
|
||||
common.LogDebug(" Match:", setup.Origin)
|
||||
newProjectName = newProjectName + ":" + setup.Name
|
||||
common.LogDebug(" New:", newProjectName)
|
||||
break
|
||||
}
|
||||
}
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = newProjectName
|
||||
common.LogDebug(" Matched prefix")
|
||||
}
|
||||
common.LogDebug(" Path using project ", templateMeta.Repositories[idx].Paths[pidx].Project)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -447,6 +428,8 @@ func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, git
|
||||
err = ObsClient.SetProjectMeta(templateMeta)
|
||||
if err != nil {
|
||||
common.LogError("cannot create project:", templateMeta.Name, err)
|
||||
x, _ := xml.MarshalIndent(templateMeta, "", " ")
|
||||
common.LogError(string(x))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -553,7 +536,7 @@ func FetchOurLatestActionableReview(gitea common.Gitea, org, repo string, id int
|
||||
}
|
||||
|
||||
func ParseNotificationToPR(thread *models.NotificationThread) (org string, repo string, num int64, err error) {
|
||||
rx := regexp.MustCompile(`^https://src\.(?:open)?suse\.(?:org|de)/api/v\d+/repos/(?<org>[-_a-zA-Z0-9]+)/(?<project>[-_a-zA-Z0-9]+)/issues/(?<num>[0-9]+)$`)
|
||||
rx := regexp.MustCompile(`^.*/api/v\d+/repos/(?<org>[-_a-zA-Z0-9]+)/(?<project>[-_a-zA-Z0-9]+)/issues/(?<num>[0-9]+)$`)
|
||||
notification := thread.Subject
|
||||
match := rx.FindStringSubmatch(notification.URL)
|
||||
if match == nil {
|
||||
@@ -702,6 +685,64 @@ func SetStatus(gitea common.Gitea, org, repo, hash string, status *models.Commit
|
||||
return err
|
||||
}
|
||||
|
||||
func commentOnPackagePR(gitea common.Gitea, org string, repo string, prNum int64, msg string) {
|
||||
if IsDryRun {
|
||||
common.LogInfo("Would comment on package PR %s/%s#%d: %s", org, repo, prNum, msg)
|
||||
return
|
||||
}
|
||||
|
||||
pr, err := gitea.GetPullRequest(org, repo, prNum)
|
||||
if err != nil {
|
||||
common.LogError("Failed to get package PR %s/%s#%d: %v", org, repo, prNum, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = gitea.AddComment(pr, msg)
|
||||
if err != nil {
|
||||
common.LogError("Failed to comment on package PR %s/%s#%d: %v", org, repo, prNum, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create and remove QA projects
|
||||
func ProcessQaProjects(stagingConfig *common.StagingConfig, git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingProject string) []string {
|
||||
usedQAprojects := make([]string, 0)
|
||||
prLabelNames := make(map[string]int)
|
||||
for _, label := range pr.Labels {
|
||||
prLabelNames[label.Name] = 1
|
||||
}
|
||||
msg := ""
|
||||
for _, setup := range stagingConfig.QA {
|
||||
QAproject := stagingProject + ":" + setup.Name
|
||||
if len(setup.Label) > 0 {
|
||||
if _, ok := prLabelNames[setup.Label]; !ok {
|
||||
if !IsDryRun {
|
||||
// blindly remove, will fail when not existing
|
||||
ObsClient.DeleteProject(QAproject)
|
||||
}
|
||||
common.LogInfo("QA project ", setup.Name, "has no matching Label")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
usedQAprojects = append(usedQAprojects, QAproject)
|
||||
// check for existens first, no error, but no meta is a 404
|
||||
if meta, err := ObsClient.GetProjectMeta(QAproject); meta == nil && err == nil {
|
||||
common.LogInfo("Create QA project ", QAproject)
|
||||
CreateQASubProject(stagingConfig, git, gitea, pr,
|
||||
stagingProject,
|
||||
setup.Origin,
|
||||
setup.Name,
|
||||
setup.BuildDisableRepos)
|
||||
msg = msg + "QA Project added: " + ObsWebHost + "/project/show/" +
|
||||
QAproject + "\n"
|
||||
}
|
||||
}
|
||||
if len(msg) > 1 {
|
||||
gitea.AddComment(pr, msg)
|
||||
}
|
||||
return usedQAprojects
|
||||
}
|
||||
|
||||
func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, error) {
|
||||
dir, err := os.MkdirTemp(os.TempDir(), BotName)
|
||||
common.PanicOnError(err)
|
||||
@@ -779,11 +820,12 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
meta, err := ObsClient.GetProjectMeta(stagingConfig.ObsProject)
|
||||
if err != nil || meta == nil {
|
||||
common.LogError("Cannot find reference project meta:", stagingConfig.ObsProject, err)
|
||||
if !IsDryRun {
|
||||
if !IsDryRun && err == nil {
|
||||
common.LogError("Reference project is absent:", stagingConfig.ObsProject, err)
|
||||
_, err := gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, "Cannot fetch reference project meta")
|
||||
return true, err
|
||||
}
|
||||
return true, nil
|
||||
return true, err
|
||||
}
|
||||
|
||||
if metaUrl, err := url.Parse(meta.ScmSync); err != nil {
|
||||
@@ -815,6 +857,7 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
l := len(stagingConfig.ObsProject)
|
||||
if l >= len(stagingConfig.StagingProject) || stagingConfig.ObsProject != stagingConfig.StagingProject[0:l] {
|
||||
common.LogError("StagingProject (", stagingConfig.StagingProject, ") is not child of target project", stagingConfig.ObsProject)
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -933,36 +976,49 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
msg = "Build is started in " + ObsWebHost + "/project/show/" +
|
||||
stagingProject + " .\n"
|
||||
|
||||
if len(stagingConfig.QA) > 0 {
|
||||
msg = msg + "\nAdditional QA builds: \n"
|
||||
}
|
||||
SetStatus(gitea, org, repo, pr.Head.Sha, status)
|
||||
for _, setup := range stagingConfig.QA {
|
||||
CreateQASubProject(stagingConfig, git, gitea, pr,
|
||||
stagingProject,
|
||||
setup.Origin,
|
||||
setup.Name,
|
||||
setup.BuildDisableRepos)
|
||||
msg = msg + ObsWebHost + "/project/show/" +
|
||||
stagingProject + ":" + setup.Name + "\n"
|
||||
}
|
||||
}
|
||||
if change != RequestModificationNoChange && !IsDryRun {
|
||||
gitea.AddComment(pr, msg)
|
||||
}
|
||||
|
||||
baseResult, err := ObsClient.LastBuildResults(stagingConfig.ObsProject, modifiedPackages...)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching ref project status for", stagingConfig.ObsProject, ":", err)
|
||||
}
|
||||
stagingResult, err := ObsClient.BuildStatus(stagingProject)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching stage project status for", stagingProject, ":", err)
|
||||
}
|
||||
buildStatus := ProcessBuildStatus(stagingResult, baseResult)
|
||||
|
||||
_, packagePRs := common.ExtractDescriptionAndPRs(bufio.NewScanner(strings.NewReader(pr.Body)))
|
||||
|
||||
// always update QA projects because Labels can change
|
||||
qaProjects := ProcessQaProjects(stagingConfig, git, gitea, pr, stagingProject)
|
||||
|
||||
done := false
|
||||
switch buildStatus {
|
||||
overallBuildStatus := ProcessBuildStatus(stagingResult)
|
||||
commentSuffix := ""
|
||||
if len(qaProjects) > 0 && overallBuildStatus == BuildStatusSummarySuccess {
|
||||
seperator := " in "
|
||||
for _, qaProject := range qaProjects {
|
||||
qaResult, err := ObsClient.BuildStatus(qaProject)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching stage project status for", qaProject, ":", err)
|
||||
}
|
||||
qaBuildStatus := ProcessBuildStatus(qaResult)
|
||||
if qaBuildStatus != BuildStatusSummarySuccess {
|
||||
// either still building or in failed state
|
||||
overallBuildStatus = qaBuildStatus
|
||||
commentSuffix = commentSuffix + seperator + qaProject
|
||||
seperator = ", "
|
||||
}
|
||||
if qaBuildStatus == BuildStatusSummaryFailed {
|
||||
// main project was successful, but QA project, adapt the link to QA project
|
||||
// and change commit state to fail
|
||||
status.Status = common.CommitStatus_Fail
|
||||
status.TargetURL = ObsWebHost + "/project/show/" + qaProject
|
||||
SetStatus(gitea, org, repo, pr.Head.Sha, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
switch overallBuildStatus {
|
||||
case BuildStatusSummarySuccess:
|
||||
status.Status = common.CommitStatus_Success
|
||||
done = true
|
||||
@@ -982,7 +1038,44 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
}
|
||||
}
|
||||
}
|
||||
common.LogInfo("Build status:", buildStatus)
|
||||
|
||||
if overallBuildStatus == BuildStatusSummarySuccess || overallBuildStatus == BuildStatusSummaryFailed {
|
||||
// avoid commenting while build is in progress
|
||||
missingPkgs := []string{}
|
||||
|
||||
for _, packagePR := range packagePRs {
|
||||
missing, packageBuildStatus := GetPackageBuildStatus(stagingResult, packagePR.Repo)
|
||||
if missing {
|
||||
missingPkgs = append(missingPkgs, packagePR.Repo)
|
||||
continue
|
||||
}
|
||||
var msg string
|
||||
switch packageBuildStatus {
|
||||
case BuildStatusSummarySuccess:
|
||||
msg = fmt.Sprintf("Build successful, for more information go in %s/project/show/%s.\n", ObsWebHost, stagingProject)
|
||||
case BuildStatusSummaryFailed:
|
||||
msg = fmt.Sprintf("Build failed, for more information go in %s/project/show/%s.\n", ObsWebHost, stagingProject)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
commentOnPackagePR(gitea, packagePR.Org, packagePR.Repo, packagePR.Num, msg)
|
||||
}
|
||||
|
||||
if len(missingPkgs) > 0 {
|
||||
overallBuildStatus = BuildStatusSummaryFailed
|
||||
msg := "The following packages were not found in the staging project:\n"
|
||||
for _, pkg := range missingPkgs {
|
||||
msg = msg + " - " + pkg + "\n"
|
||||
}
|
||||
common.LogInfo(msg)
|
||||
err := gitea.AddComment(pr, msg)
|
||||
if err != nil {
|
||||
common.LogError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
common.LogInfo("Build status:", overallBuildStatus)
|
||||
if !IsDryRun {
|
||||
if err = SetStatus(gitea, org, repo, pr.Head.Sha, status); err != nil {
|
||||
return false, err
|
||||
@@ -1047,6 +1140,7 @@ func PollWorkNotifications(giteaUrl string) {
|
||||
|
||||
var ListPullNotificationsOnly bool
|
||||
var GiteaUrl string
|
||||
var ObsApiHost string
|
||||
var ObsWebHost string
|
||||
var IsDryRun bool
|
||||
var ProcessPROnly string
|
||||
@@ -1069,8 +1163,8 @@ func main() {
|
||||
flag.BoolVar(&ListPullNotificationsOnly, "list-notifications-only", false, "Only lists notifications without acting on them")
|
||||
ProcessPROnly := flag.String("pr", "", "Process only specific PR and ignore the rest. Use for debugging")
|
||||
buildRoot := flag.String("build-root", "", "Default build location for staging projects. Default is bot's home project")
|
||||
flag.StringVar(&GiteaUrl, "gitea-url", "https://src.opensuse.org", "Gitea instance")
|
||||
obsApiHost := flag.String("obs", "https://api.opensuse.org", "API for OBS instance")
|
||||
flag.StringVar(&GiteaUrl, "gitea-url", "", "Gitea instance")
|
||||
flag.StringVar(&ObsApiHost, "obs", "", "API for OBS instance")
|
||||
flag.StringVar(&ObsWebHost, "obs-web", "", "Web OBS instance, if not derived from the obs config")
|
||||
flag.BoolVar(&IsDryRun, "dry", false, "Dry-run, don't actually create any build projects or review changes")
|
||||
debug := flag.Bool("debug", false, "Turns on debug logging")
|
||||
@@ -1082,19 +1176,34 @@ func main() {
|
||||
common.SetLoggingLevel(common.LogLevelInfo)
|
||||
}
|
||||
|
||||
if len(GiteaUrl) == 0 {
|
||||
GiteaUrl = os.Getenv(common.GiteaHostEnv)
|
||||
}
|
||||
if len(GiteaUrl) == 0 {
|
||||
GiteaUrl = "https://src.opensuse.org"
|
||||
}
|
||||
if len(ObsApiHost) == 0 {
|
||||
ObsApiHost = os.Getenv(common.ObsApiEnv)
|
||||
}
|
||||
if len(ObsApiHost) == 0 {
|
||||
ObsApiHost = "https://api.opensuse.org"
|
||||
}
|
||||
if len(ObsWebHost) == 0 {
|
||||
ObsWebHost = ObsWebHostFromApiHost(*obsApiHost)
|
||||
ObsWebHost = os.Getenv(common.ObsWebEnv)
|
||||
}
|
||||
if len(ObsWebHost) == 0 {
|
||||
ObsWebHost = "https://build.opensuse.org"
|
||||
}
|
||||
|
||||
common.LogDebug("OBS Gitea Host:", GiteaUrl)
|
||||
common.LogDebug("OBS Web Host:", ObsWebHost)
|
||||
common.LogDebug("OBS API Host:", *obsApiHost)
|
||||
common.LogDebug("OBS API Host:", ObsApiHost)
|
||||
|
||||
common.PanicOnErrorWithMsg(common.RequireGiteaSecretToken(), "Cannot find GITEA_TOKEN")
|
||||
common.PanicOnErrorWithMsg(common.RequireObsSecretToken(), "Cannot find OBS_USER and OBS_PASSWORD")
|
||||
|
||||
var err error
|
||||
if ObsClient, err = common.NewObsClient(*obsApiHost); err != nil {
|
||||
if ObsClient, err = common.NewObsClient(ObsApiHost); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -105,10 +105,10 @@ func ProjectStatusSummarySvg(res []*common.BuildResult) []byte {
|
||||
|
||||
func LinkToBuildlog(R *common.BuildResult, S *common.PackageBuildStatus) string {
|
||||
if R != nil && S != nil {
|
||||
switch S.Code {
|
||||
case "succeeded", "failed", "building":
|
||||
//switch S.Code {
|
||||
//case "succeeded", "failed", "building":
|
||||
return "/buildlog/" + url.PathEscape(R.Project) + "/" + url.PathEscape(S.Package) + "/" + url.PathEscape(R.Repository) + "/" + url.PathEscape(R.Arch)
|
||||
}
|
||||
//}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -170,6 +170,7 @@ func BuildStatusSvg(repo *common.BuildResult, status *common.PackageBuildStatus)
|
||||
buildStatus, ok := common.ObsBuildStatusDetails[status.Code]
|
||||
if !ok {
|
||||
buildStatus = common.ObsBuildStatusDetails["error"]
|
||||
common.LogError("Cannot find detail for status.Code", status.Code)
|
||||
}
|
||||
fillColor := "#480" // orange
|
||||
textColor := "#888"
|
||||
|
||||
@@ -6,13 +6,17 @@ After=network-online.target
|
||||
Type=exec
|
||||
ExecStart=/usr/bin/workflow-direct
|
||||
EnvironmentFile=-/etc/default/%i/workflow-direct.env
|
||||
DynamicUser=yes
|
||||
#DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
RuntimeDirectory=%i
|
||||
# DynamicUser does not work as we cannot seem to be able to put SSH keyfiles into the temp home that are readable by SSH
|
||||
# Also, systemd override is needed away to assign User to run this. This should be dependent per instance.
|
||||
ProtectHome=no
|
||||
PrivateTmp=yes
|
||||
# RuntimeDirectory=%i
|
||||
# SLES 15 doesn't have HOME set for dynamic users, so we improvise
|
||||
BindReadOnlyPaths=/etc/default/%i/known_hosts:/etc/ssh/ssh_known_hosts /etc/default/%i/config.json:%t/%i/config.json /etc/default/%i/id_ed25519 /etc/default/%i/id_ed25519.pub
|
||||
WorkingDirectory=%t/%i
|
||||
# BindReadOnlyPaths=/etc/default/%i/known_hosts:/etc/ssh/ssh_known_hosts /etc/default/%i/config.json:%t/%i/config.json /etc/default/%i/id_ed25519 /etc/default/%i/id_ed25519.pub
|
||||
# WorkingDirectory=%t/%i
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
23
systemd/workflow-pr@.service
Normal file
23
systemd/workflow-pr@.service
Normal file
@@ -0,0 +1,23 @@
|
||||
[Unit]
|
||||
Description=WorkflowPR git bot for %i
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
ExecStart=/usr/bin/workflow-pr
|
||||
EnvironmentFile=-/etc/default/%i/workflow-pr.env
|
||||
#DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
# DynamicUser does not work as we cannot seem to be able to put SSH keyfiles into the temp home that are readable by SSH
|
||||
# Also, systemd override is needed away to assign User to run this. This should be dependent per instance.
|
||||
ProtectHome=no
|
||||
PrivateTmp=yes
|
||||
# RuntimeDirectory=%i
|
||||
# SLES 15 doesn't have HOME set for dynamic users, so we improvise
|
||||
# BindReadOnlyPaths=/etc/default/%i/known_hosts:/etc/ssh/ssh_known_hosts /etc/default/%i/config.json:%t/%i/config.json /etc/default/%i/id_ed25519 /etc/default/%i/id_ed25519.pub
|
||||
# WorkingDirectory=%t/%i
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
98
utils/maintainer-update/main.go
Normal file
98
utils/maintainer-update/main.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
func WriteNewMaintainershipFile(m *common.MaintainershipMap, filename string) {
|
||||
f, err := os.Create(filename + ".new")
|
||||
common.PanicOnError(err)
|
||||
common.PanicOnError(m.WriteMaintainershipFile(f))
|
||||
common.PanicOnError(f.Sync())
|
||||
common.PanicOnError(f.Close())
|
||||
common.PanicOnError(os.Rename(filename+".new", filename))
|
||||
}
|
||||
|
||||
func run() error {
|
||||
pkg := flag.String("package", "", "Package to modify")
|
||||
rm := flag.Bool("rm", false, "Remove maintainer from package")
|
||||
add := flag.Bool("add", false, "Add maintainer to package")
|
||||
lint := flag.Bool("lint-only", false, "Reformat entire _maintainership.json only")
|
||||
flag.Parse()
|
||||
|
||||
if (*add == *rm) && !*lint {
|
||||
return fmt.Errorf("Need to either add or remove a maintainer, or lint")
|
||||
}
|
||||
|
||||
filename := common.MaintainershipFile
|
||||
if *lint {
|
||||
if len(flag.Args()) > 0 {
|
||||
filename = flag.Arg(0)
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m, err := common.ParseMaintainershipData(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse JSON: %w", err)
|
||||
}
|
||||
|
||||
if *lint {
|
||||
m.Raw = nil // forces a rewrite
|
||||
} else {
|
||||
users := flag.Args()
|
||||
if len(users) > 0 {
|
||||
maintainers, ok := m.Data[*pkg]
|
||||
if !ok && !*add {
|
||||
return fmt.Errorf("No package %s and not adding one.", *pkg)
|
||||
}
|
||||
|
||||
if *add {
|
||||
for _, u := range users {
|
||||
if !slices.Contains(maintainers, u) {
|
||||
maintainers = append(maintainers, u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *rm {
|
||||
newMaintainers := make([]string, 0, len(maintainers))
|
||||
for _, m := range maintainers {
|
||||
if !slices.Contains(users, m) {
|
||||
newMaintainers = append(newMaintainers, m)
|
||||
}
|
||||
}
|
||||
maintainers = newMaintainers
|
||||
}
|
||||
|
||||
if len(maintainers) > 0 {
|
||||
slices.Sort(maintainers)
|
||||
m.Data[*pkg] = maintainers
|
||||
} else {
|
||||
delete(m.Data, *pkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WriteNewMaintainershipFile(m, filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
common.LogError(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
242
utils/maintainer-update/main_test.go
Normal file
242
utils/maintainer-update/main_test.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"os"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if os.Getenv("BE_MAIN") == "1" {
|
||||
main()
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inData string
|
||||
expectedOut string
|
||||
params []string
|
||||
expectedError string
|
||||
isDir bool
|
||||
}{
|
||||
{
|
||||
name: "add user to existing package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "add user to new package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg2", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"], "pkg2": ["user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "no-op with no users",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "add existing user",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "remove user from package",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "remove last user from package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user1"},
|
||||
expectedOut: `{}`,
|
||||
},
|
||||
{
|
||||
name: "remove non-existent user",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint only unsorted",
|
||||
inData: `{"pkg1": ["user2", "user1"]}`,
|
||||
params: []string{"-lint-only"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint only no changes",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-lint-only"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "no file",
|
||||
params: []string{"-add"},
|
||||
expectedError: "no such file or directory",
|
||||
},
|
||||
{
|
||||
name: "invalid json",
|
||||
inData: `{"pkg1": ["user1"`,
|
||||
params: []string{"-add"},
|
||||
expectedError: "Failed to parse JSON",
|
||||
},
|
||||
{
|
||||
name: "add",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user3"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2", "user3"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint specific file",
|
||||
inData: `{"pkg1": ["user2", "user1"]}`,
|
||||
params: []string{"-lint-only", "other.json"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "add user to package when it was not there before",
|
||||
inData: `{}`,
|
||||
params: []string{"-package", "newpkg", "-add", "user1"},
|
||||
expectedOut: `{"newpkg": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "unreadable file (is a directory)",
|
||||
isDir: true,
|
||||
params: []string{"-rm"},
|
||||
expectedError: "is a directory",
|
||||
},
|
||||
{
|
||||
name: "remove user from non-existent package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg2", "-rm", "user2"},
|
||||
expectedError: "No package pkg2 and not adding one.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
oldWd, _ := os.Getwd()
|
||||
_ = os.Chdir(dir)
|
||||
defer os.Chdir(oldWd)
|
||||
|
||||
targetFile := common.MaintainershipFile
|
||||
if tt.name == "lint specific file" {
|
||||
targetFile = "other.json"
|
||||
}
|
||||
|
||||
if tt.isDir {
|
||||
_ = os.Mkdir(targetFile, 0755)
|
||||
} else if tt.inData != "" {
|
||||
_ = os.WriteFile(targetFile, []byte(tt.inData), 0644)
|
||||
}
|
||||
|
||||
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
|
||||
|
||||
os.Args = append([]string{"cmd"}, tt.params...)
|
||||
err := run()
|
||||
|
||||
if tt.expectedError != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error containing %q, but got none", tt.expectedError)
|
||||
}
|
||||
if !strings.Contains(err.Error(), tt.expectedError) {
|
||||
t.Fatalf("expected error containing %q, got %q", tt.expectedError, err.Error())
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if tt.expectedOut != "" {
|
||||
data, _ := os.ReadFile(targetFile)
|
||||
var got, expected map[string][]string
|
||||
_ = json.Unmarshal(data, &got)
|
||||
_ = json.Unmarshal([]byte(tt.expectedOut), &expected)
|
||||
|
||||
if len(got) == 0 && len(expected) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMainRecursive(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inData string
|
||||
expectedOut string
|
||||
params []string
|
||||
expectExit bool
|
||||
}{
|
||||
{
|
||||
name: "test main() via recursive call",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "test main() failure",
|
||||
params: []string{"-package", "pkg1"},
|
||||
expectExit: true,
|
||||
},
|
||||
}
|
||||
|
||||
exe, _ := os.Executable()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
oldWd, _ := os.Getwd()
|
||||
_ = os.Chdir(dir)
|
||||
defer os.Chdir(oldWd)
|
||||
|
||||
if tt.inData != "" {
|
||||
_ = os.WriteFile(common.MaintainershipFile, []byte(tt.inData), 0644)
|
||||
}
|
||||
|
||||
cmd := exec.Command(exe, append([]string{"-test.run=None"}, tt.params...)...)
|
||||
cmd.Env = append(os.Environ(), "BE_MAIN=1")
|
||||
out, runErr := cmd.CombinedOutput()
|
||||
|
||||
if tt.expectExit {
|
||||
if runErr == nil {
|
||||
t.Fatalf("expected exit with error, but it succeeded")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if runErr != nil {
|
||||
t.Fatalf("unexpected error: %v: %s", runErr, string(out))
|
||||
}
|
||||
|
||||
if tt.expectedOut != "" {
|
||||
data, _ := os.ReadFile(common.MaintainershipFile)
|
||||
var got, expected map[string][]string
|
||||
_ = json.Unmarshal(data, &got)
|
||||
_ = json.Unmarshal([]byte(tt.expectedOut), &expected)
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -94,6 +94,19 @@ Package Deletion Requests
|
||||
If you wish to re-add a package, create a new PrjGit PR which adds again the submodule on the branch that has the "-removed" suffix. The bot will automatically remove this suffix from the project branch in the pool.
|
||||
|
||||
|
||||
Merge Modes
|
||||
-----------
|
||||
|
||||
| Merge Mode | Description
|
||||
|------------|--------------------------------------------------------------------------------
|
||||
| ff-only | Only allow --ff-only merges in the package branch. This is best suited for
|
||||
| | devel projects and openSUSE Tumbleweed development, where history should be linear
|
||||
| replace | Merge is done via `-X theirs` strategy and old files are removed in the merge.
|
||||
| | This works well for downstream codestreams, like Leap, that would update their branch
|
||||
| | using latest version.
|
||||
| devel | No merge, just set the project branch to PR HEAD. This is suitable for downstream
|
||||
| | projects like Leap during development cycle, where keeping maintenance history is not important
|
||||
|
||||
Labels
|
||||
------
|
||||
|
||||
|
||||
@@ -44,19 +44,35 @@ var CurrentUser *models.User
|
||||
var GitHandler common.GitHandlerGenerator
|
||||
var Gitea common.Gitea
|
||||
|
||||
func getEnvOverrideString(env, def string) string {
|
||||
if envValue := os.Getenv(env); len(envValue) != 0 {
|
||||
return envValue
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
func getEnvOverrideBool(env string, def bool) bool {
|
||||
if envValue := os.Getenv(env); len(envValue) != 0 {
|
||||
if value, err := strconv.Atoi(envValue); err == nil && value > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.StringVar(&GitAuthor, "git-author", "AutoGits PR Review Bot", "Git commit author")
|
||||
flag.StringVar(&GitEmail, "git-email", "amajer+devel-git@suse.de", "Git commit email")
|
||||
|
||||
workflowConfig := flag.String("config", "", "Repository and workflow definition file")
|
||||
workflowConfig := flag.String("config", getEnvOverrideString("AUTOGITS_CONFIG", ""), "Repository and workflow definition file")
|
||||
giteaUrl := flag.String("gitea-url", "https://src.opensuse.org", "Gitea instance")
|
||||
rabbitUrl := flag.String("url", "amqps://rabbit.opensuse.org", "URL for RabbitMQ instance")
|
||||
debugMode := flag.Bool("debug", false, "Extra debugging information")
|
||||
checkOnStart := flag.Bool("check-on-start", false, "Check all repositories for consistency on start, without delays")
|
||||
debugMode := flag.Bool("debug", getEnvOverrideBool("AUTOGITS_DEBUG", false), "Extra debugging information")
|
||||
checkOnStart := flag.Bool("check-on-start", getEnvOverrideBool("AUTOGITS_CHECK_ON_START", false), "Check all repositories for consistency on start, without delays")
|
||||
checkIntervalHours := flag.Float64("check-interval", 5, "Check interval (+-random delay) for repositories for consitency, in hours")
|
||||
flag.BoolVar(&ListPROnly, "list-prs-only", false, "Only lists PRs without acting on them")
|
||||
flag.Int64Var(&PRID, "id", -1, "Process only the specific ID and ignore the rest. Use for debugging")
|
||||
basePath := flag.String("repo-path", "", "Repository path. Default is temporary directory")
|
||||
basePath := flag.String("repo-path", getEnvOverrideString("AUTOGITS_REPO_PATH", ""), "Repository path. Default is temporary directory")
|
||||
pr := flag.String("only-pr", "", "Only specific PR to process. For debugging")
|
||||
flag.BoolVar(&common.IsDryRun, "dry", false, "Dry mode. Do not push changes to remote repo.")
|
||||
flag.Parse()
|
||||
|
||||
@@ -24,6 +24,7 @@ func PrjGitDescription(prset *common.PRSet) (title string, desc string) {
|
||||
title_refs := make([]string, 0, len(prset.PRs)-1)
|
||||
refs := make([]string, 0, len(prset.PRs)-1)
|
||||
|
||||
prefix := ""
|
||||
for _, pr := range prset.PRs {
|
||||
if prset.IsPrjGitPR(pr.PR) {
|
||||
continue
|
||||
@@ -32,6 +33,9 @@ func PrjGitDescription(prset *common.PRSet) (title string, desc string) {
|
||||
// remove PRs that are not open from description
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(pr.PR.Title, "WIP:") {
|
||||
prefix = "WIP: "
|
||||
}
|
||||
org, repo, idx := pr.PRComponents()
|
||||
|
||||
title_refs = append(title_refs, repo)
|
||||
@@ -42,7 +46,7 @@ func PrjGitDescription(prset *common.PRSet) (title string, desc string) {
|
||||
slices.Sort(title_refs)
|
||||
slices.Sort(refs)
|
||||
|
||||
title = "Forwarded PRs: " + strings.Join(title_refs, ", ")
|
||||
title = prefix + "Forwarded PRs: " + strings.Join(title_refs, ", ")
|
||||
desc = fmt.Sprintf("This is a forwarded pull request by %s\nreferencing the following pull request(s):\n\n", GitAuthor) + strings.Join(refs, "\n") + "\n"
|
||||
|
||||
if prset.Config.ManualMergeOnly {
|
||||
@@ -402,6 +406,12 @@ func (pr *PRProcessor) Process(req *models.PullRequest) error {
|
||||
}
|
||||
common.LogInfo("fetched PRSet of size:", len(prset.PRs))
|
||||
|
||||
if !prset.PrepareForMerge(git) {
|
||||
common.LogError("PRs are NOT mergeable.")
|
||||
} else {
|
||||
common.LogInfo("PRs are in mergeable state.")
|
||||
}
|
||||
|
||||
prjGitPRbranch := prGitBranchNameForPR(prRepo, prNo)
|
||||
prjGitPR, err := prset.GetPrjGitPR()
|
||||
if err == common.PRSet_PrjGitMissing {
|
||||
|
||||
Reference in New Issue
Block a user