Compare commits
33 Commits
new-packag
...
main
| Author | SHA256 | Date | |
|---|---|---|---|
| 708add1017 | |||
| 712349d638 | |||
| ba5a42dd29 | |||
| 53cf2c8bad | |||
| 868c28cd5a | |||
| 962c4b2562 | |||
| 57cb251dbc | |||
| 75c4fada50 | |||
| 7d13e586ac | |||
| 7729b845b0 | |||
| c662b2fdbf | |||
|
|
4cedb37da4 | ||
|
|
fe519628c8 | ||
|
|
ff18828692 | ||
| 6337ef7e50 | |||
| e9992d2e99 | |||
| aac218fc6d | |||
| 139f40fce3 | |||
| c44d34fdbe | |||
| 23be3df1fb | |||
| 68b67c6975 | |||
| 478a3a140a | |||
| df4da87bfd | |||
| b19d301d95 | |||
| 9532aa897c | |||
| f942909ac7 | |||
| 7f98298b89 | |||
| f0b053ca07 | |||
| 844ec8a87b | |||
| 6ee8fcc597 | |||
| 1220799e57 | |||
| 86a176a785 | |||
| bb9e9a08e5 |
@@ -129,6 +129,9 @@ go build \
|
||||
go build \
|
||||
-C utils/hujson \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C utils/maintainer-update \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C gitea-events-rabbitmq-publisher \
|
||||
-buildmode=pie
|
||||
@@ -160,6 +163,7 @@ go test -C group-review -v
|
||||
go test -C obs-staging-bot -v
|
||||
go test -C obs-status-service -v
|
||||
go test -C workflow-direct -v
|
||||
go test -C utils/maintainer-update
|
||||
# TODO build fails
|
||||
#go test -C workflow-pr -v
|
||||
|
||||
@@ -179,6 +183,7 @@ install -D -m0755 workflow-direct/workflow-direct
|
||||
install -D -m0644 systemd/workflow-direct@.service %{buildroot}%{_unitdir}/workflow-direct@.service
|
||||
install -D -m0755 workflow-pr/workflow-pr %{buildroot}%{_bindir}/workflow-pr
|
||||
install -D -m0755 utils/hujson/hujson %{buildroot}%{_bindir}/hujson
|
||||
install -D -m0755 utils/maintainer-update/maintainer-update %{buildroot}%{_bindir}/maintainer-update
|
||||
|
||||
%pre gitea-events-rabbitmq-publisher
|
||||
%service_add_pre gitea-events-rabbitmq-publisher.service
|
||||
@@ -285,6 +290,7 @@ install -D -m0755 utils/hujson/hujson
|
||||
%files utils
|
||||
%license COPYING
|
||||
%{_bindir}/hujson
|
||||
%{_bindir}/maintainer-update
|
||||
|
||||
%files workflow-direct
|
||||
%license COPYING
|
||||
|
||||
@@ -54,6 +54,7 @@ type ReviewGroup struct {
|
||||
type QAConfig struct {
|
||||
Name string
|
||||
Origin string
|
||||
Label string // requires this gitea lable to be set or skipped
|
||||
BuildDisableRepos []string // which repos to build disable in the new project
|
||||
}
|
||||
|
||||
|
||||
@@ -20,10 +20,13 @@ package common
|
||||
|
||||
const (
|
||||
GiteaTokenEnv = "GITEA_TOKEN"
|
||||
GiteaHostEnv = "GITEA_HOST"
|
||||
ObsUserEnv = "OBS_USER"
|
||||
ObsPasswordEnv = "OBS_PASSWORD"
|
||||
ObsSshkeyEnv = "OBS_SSHKEY"
|
||||
ObsSshkeyFileEnv = "OBS_SSHKEYFILE"
|
||||
ObsApiEnv = "OBS_API"
|
||||
ObsWebEnv = "OBS_WEB"
|
||||
|
||||
DefaultGitPrj = "_ObsPrj"
|
||||
PrjLinksFile = "links.json"
|
||||
|
||||
@@ -822,6 +822,7 @@ func (gitea *GiteaTransport) ResetTimelineCache(org, repo string, idx int64) {
|
||||
Cache, IsCached := giteaTimelineCache[prID]
|
||||
if IsCached {
|
||||
Cache.lastCheck = Cache.lastCheck.Add(-time.Hour)
|
||||
giteaTimelineCache[prID] = Cache
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"src.opensuse.org/autogits/common/gitea-generated/client/repository"
|
||||
"src.opensuse.org/autogits/common/gitea-generated/models"
|
||||
@@ -27,11 +29,13 @@ type MaintainershipMap struct {
|
||||
IsDir bool
|
||||
Config *AutogitConfig
|
||||
FetchPackage func(string) ([]byte, error)
|
||||
Raw []byte
|
||||
}
|
||||
|
||||
func parseMaintainershipData(data []byte) (*MaintainershipMap, error) {
|
||||
func ParseMaintainershipData(data []byte) (*MaintainershipMap, error) {
|
||||
maintainers := &MaintainershipMap{
|
||||
Data: make(map[string][]string),
|
||||
Raw: data,
|
||||
}
|
||||
if err := json.Unmarshal(data, &maintainers.Data); err != nil {
|
||||
return nil, err
|
||||
@@ -62,7 +66,7 @@ func FetchProjectMaintainershipData(gitea GiteaMaintainershipReader, config *Aut
|
||||
}
|
||||
}
|
||||
|
||||
m, err := parseMaintainershipData(data)
|
||||
m, err := ParseMaintainershipData(data)
|
||||
if m != nil {
|
||||
m.Config = config
|
||||
m.IsDir = dir
|
||||
@@ -84,6 +88,8 @@ func (data *MaintainershipMap) ListProjectMaintainers(groups []*ReviewGroup) []s
|
||||
return nil
|
||||
}
|
||||
|
||||
m = slices.Clone(m)
|
||||
|
||||
// expands groups
|
||||
for _, g := range groups {
|
||||
m = g.ExpandMaintainers(m)
|
||||
@@ -120,6 +126,7 @@ func (data *MaintainershipMap) ListPackageMaintainers(pkg string, groups []*Revi
|
||||
}
|
||||
}
|
||||
}
|
||||
pkgMaintainers = slices.Clone(pkgMaintainers)
|
||||
prjMaintainers := data.ListProjectMaintainers(nil)
|
||||
|
||||
prjMaintainer:
|
||||
@@ -171,13 +178,135 @@ func (data *MaintainershipMap) IsApproved(pkg string, reviews []*models.PullRevi
|
||||
return false
|
||||
}
|
||||
|
||||
func (data *MaintainershipMap) modifyInplace(writer io.StringWriter) error {
|
||||
var original map[string][]string
|
||||
if err := json.Unmarshal(data.Raw, &original); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dec := json.NewDecoder(bytes.NewReader(data.Raw))
|
||||
_, err := dec.Token()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
output := ""
|
||||
lastPos := 0
|
||||
modified := false
|
||||
|
||||
type entry struct {
|
||||
key string
|
||||
valStart int
|
||||
valEnd int
|
||||
}
|
||||
var entries []entry
|
||||
|
||||
for dec.More() {
|
||||
kToken, _ := dec.Token()
|
||||
key := kToken.(string)
|
||||
var raw json.RawMessage
|
||||
dec.Decode(&raw)
|
||||
valEnd := int(dec.InputOffset())
|
||||
valStart := valEnd - len(raw)
|
||||
entries = append(entries, entry{key, valStart, valEnd})
|
||||
}
|
||||
|
||||
changed := make(map[string]bool)
|
||||
for k, v := range data.Data {
|
||||
if ov, ok := original[k]; !ok || !slices.Equal(v, ov) {
|
||||
changed[k] = true
|
||||
}
|
||||
}
|
||||
for k := range original {
|
||||
if _, ok := data.Data[k]; !ok {
|
||||
changed[k] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(changed) == 0 {
|
||||
_, err = writer.WriteString(string(data.Raw))
|
||||
return err
|
||||
}
|
||||
|
||||
for _, e := range entries {
|
||||
if v, ok := data.Data[e.key]; ok {
|
||||
prefix := string(data.Raw[lastPos:e.valStart])
|
||||
if modified && strings.TrimSpace(output) == "{" {
|
||||
if commaIdx := strings.Index(prefix, ","); commaIdx != -1 {
|
||||
if quoteIdx := strings.Index(prefix, "\""); quoteIdx == -1 || commaIdx < quoteIdx {
|
||||
prefix = prefix[:commaIdx] + prefix[commaIdx+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
output += prefix
|
||||
if changed[e.key] {
|
||||
slices.Sort(v)
|
||||
newVal, _ := json.Marshal(v)
|
||||
output += string(newVal)
|
||||
modified = true
|
||||
} else {
|
||||
output += string(data.Raw[e.valStart:e.valEnd])
|
||||
}
|
||||
} else {
|
||||
// Deleted
|
||||
modified = true
|
||||
}
|
||||
lastPos = e.valEnd
|
||||
}
|
||||
output += string(data.Raw[lastPos:])
|
||||
|
||||
// Handle additions (simplistic: at the end)
|
||||
for k, v := range data.Data {
|
||||
if _, ok := original[k]; !ok {
|
||||
slices.Sort(v)
|
||||
newVal, _ := json.Marshal(v)
|
||||
keyStr, _ := json.Marshal(k)
|
||||
|
||||
// Insert before closing brace
|
||||
if idx := strings.LastIndex(output, "}"); idx != -1 {
|
||||
prefix := output[:idx]
|
||||
suffix := output[idx:]
|
||||
|
||||
trimmedPrefix := strings.TrimRight(prefix, " \n\r\t")
|
||||
if !strings.HasSuffix(trimmedPrefix, "{") && !strings.HasSuffix(trimmedPrefix, ",") {
|
||||
// find the actual position of the last non-whitespace character in prefix
|
||||
lastCharIdx := strings.LastIndexAny(prefix, "]}0123456789\"")
|
||||
if lastCharIdx != -1 {
|
||||
prefix = prefix[:lastCharIdx+1] + "," + prefix[lastCharIdx+1:]
|
||||
}
|
||||
}
|
||||
|
||||
insertion := fmt.Sprintf(" %s: %s", string(keyStr), string(newVal))
|
||||
if !strings.HasSuffix(prefix, "\n") {
|
||||
insertion = "\n" + insertion
|
||||
}
|
||||
output = prefix + insertion + "\n" + suffix
|
||||
modified = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if modified {
|
||||
_, err := writer.WriteString(output)
|
||||
return err
|
||||
}
|
||||
_, err = writer.WriteString(string(data.Raw))
|
||||
return err
|
||||
}
|
||||
|
||||
func (data *MaintainershipMap) WriteMaintainershipFile(writer io.StringWriter) error {
|
||||
if data.IsDir {
|
||||
return fmt.Errorf("Not implemented")
|
||||
}
|
||||
|
||||
writer.WriteString("{\n")
|
||||
if len(data.Raw) > 0 {
|
||||
if err := data.modifyInplace(writer); err == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback to full write
|
||||
writer.WriteString("{\n")
|
||||
if d, ok := data.Data[""]; ok {
|
||||
eol := ","
|
||||
if len(data.Data) == 1 {
|
||||
@@ -188,17 +317,12 @@ func (data *MaintainershipMap) WriteMaintainershipFile(writer io.StringWriter) e
|
||||
writer.WriteString(fmt.Sprintf(" \"\": %s%s\n", string(str), eol))
|
||||
}
|
||||
|
||||
keys := make([]string, len(data.Data))
|
||||
i := 0
|
||||
keys := make([]string, 0, len(data.Data))
|
||||
for pkg := range data.Data {
|
||||
if pkg == "" {
|
||||
continue
|
||||
}
|
||||
keys[i] = pkg
|
||||
i++
|
||||
}
|
||||
if len(keys) >= i {
|
||||
keys = slices.Delete(keys, i, len(keys))
|
||||
keys = append(keys, pkg)
|
||||
}
|
||||
slices.Sort(keys)
|
||||
for i, pkg := range keys {
|
||||
|
||||
@@ -208,6 +208,7 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
name string
|
||||
is_dir bool
|
||||
maintainers map[string][]string
|
||||
raw []byte
|
||||
expected_output string
|
||||
expected_error error
|
||||
}{
|
||||
@@ -231,6 +232,43 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
},
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical modification",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one", "two"},
|
||||
"foo": {"byte", "four", "newone"},
|
||||
"pkg1": {},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\",\"newone\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "no change",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one", "two"},
|
||||
"foo": {"byte", "four"},
|
||||
"pkg1": {},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\",\"two\"],\n \"foo\": [\"byte\",\"four\"],\n \"pkg1\": []\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical addition",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one"},
|
||||
"new": {"user"},
|
||||
},
|
||||
raw: []byte("{\n \"\": [ \"one\" ]\n}\n"),
|
||||
expected_output: "{\n \"\": [ \"one\" ],\n \"new\": [\"user\"]\n}\n",
|
||||
},
|
||||
{
|
||||
name: "surgical deletion",
|
||||
maintainers: map[string][]string{
|
||||
"": {"one"},
|
||||
},
|
||||
raw: []byte("{\n \"\": [\"one\"],\n \"old\": [\"user\"]\n}\n"),
|
||||
expected_output: "{\n \"\": [\"one\"]\n}\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
@@ -239,6 +277,7 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
data := common.MaintainershipMap{
|
||||
Data: test.maintainers,
|
||||
IsDir: test.is_dir,
|
||||
Raw: test.raw,
|
||||
}
|
||||
|
||||
if err := data.WriteMaintainershipFile(&b); err != test.expected_error {
|
||||
@@ -248,7 +287,7 @@ func TestMaintainershipFileWrite(t *testing.T) {
|
||||
output := b.String()
|
||||
|
||||
if test.expected_output != output {
|
||||
t.Fatal("unexpected output:", output, "Expecting:", test.expected_output)
|
||||
t.Fatalf("unexpected output:\n%q\nExpecting:\n%q", output, test.expected_output)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -293,3 +332,89 @@ func TestReviewRequired(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaintainershipDataCorruption_PackageAppend(t *testing.T) {
|
||||
// Test corruption when append happens (merging project maintainers)
|
||||
// If backing array has capacity, append writes to it.
|
||||
|
||||
// We construct a slice with capacity > len to simulate this common scenario
|
||||
backingArray := make([]string, 1, 10)
|
||||
backingArray[0] = "@g1"
|
||||
|
||||
initialData := map[string][]string{
|
||||
"pkg": backingArray, // len 1, cap 10
|
||||
"": {"prjUser"},
|
||||
}
|
||||
|
||||
m := &common.MaintainershipMap{
|
||||
Data: initialData,
|
||||
}
|
||||
|
||||
groups := []*common.ReviewGroup{
|
||||
{
|
||||
Name: "@g1",
|
||||
Reviewers: []string{"u1"},
|
||||
},
|
||||
}
|
||||
|
||||
// ListPackageMaintainers("pkg", groups)
|
||||
// 1. gets ["@g1"] (cap 10)
|
||||
// 2. Appends "prjUser" -> ["@g1", "prjUser"] (in backing array)
|
||||
// 3. Expands "@g1" -> "u1".
|
||||
// Replace: ["u1", "prjUser"]
|
||||
// Sort: ["prjUser", "u1"]
|
||||
//
|
||||
// The backing array is now ["prjUser", "u1", ...]
|
||||
// The map entry "pkg" is still len 1.
|
||||
// So it sees ["prjUser"].
|
||||
|
||||
list1 := m.ListPackageMaintainers("pkg", groups)
|
||||
t.Logf("List1: %v", list1)
|
||||
|
||||
// ListPackageMaintainers("pkg", nil)
|
||||
// Should be ["@g1", "prjUser"] (because prjUser is appended from project maintainers)
|
||||
// But since backing array is corrupted:
|
||||
// It sees ["prjUser"] (from map) + appends "prjUser" -> ["prjUser", "prjUser"].
|
||||
|
||||
list2 := m.ListPackageMaintainers("pkg", nil)
|
||||
t.Logf("List2: %v", list2)
|
||||
|
||||
if !slices.Contains(list2, "@g1") {
|
||||
t.Errorf("Corruption: '@g1' is missing from second call. Got %v", list2)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMaintainershipDataCorruption_ProjectInPlace(t *testing.T) {
|
||||
// Test corruption in ListProjectMaintainers when replacement fits in place
|
||||
// e.g. replacing 1 group with 1 user.
|
||||
|
||||
initialData := map[string][]string{
|
||||
"": {"@g1"},
|
||||
}
|
||||
|
||||
m := &common.MaintainershipMap{
|
||||
Data: initialData,
|
||||
}
|
||||
|
||||
groups := []*common.ReviewGroup{
|
||||
{
|
||||
Name: "@g1",
|
||||
Reviewers: []string{"u1"},
|
||||
},
|
||||
}
|
||||
|
||||
// First call with expansion
|
||||
// Replaces "@g1" with "u1". Length stays 1. Modifies backing array in place.
|
||||
list1 := m.ListProjectMaintainers(groups)
|
||||
t.Logf("List1: %v", list1)
|
||||
|
||||
// Second call without expansion
|
||||
// Should return ["@g1"]
|
||||
list2 := m.ListProjectMaintainers(nil)
|
||||
t.Logf("List2: %v", list2)
|
||||
|
||||
if !slices.Contains(list2, "@g1") {
|
||||
t.Errorf("Corruption: '@g1' is missing from second call (Project). Got %v", list2)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -116,13 +116,18 @@ type Flags struct {
|
||||
Contents string `xml:",innerxml"`
|
||||
}
|
||||
|
||||
type ProjectLinkMeta struct {
|
||||
Project string `xml:"project,attr"`
|
||||
}
|
||||
|
||||
type ProjectMeta struct {
|
||||
XMLName xml.Name `xml:"project"`
|
||||
Name string `xml:"name,attr"`
|
||||
Title string `xml:"title"`
|
||||
Description string `xml:"description"`
|
||||
Url string `xml:"url,omitempty"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
ScmSync string `xml:"scmsync,omitempty"`
|
||||
Link []ProjectLinkMeta `xml:"link"`
|
||||
Persons []PersonRepoMeta `xml:"person"`
|
||||
Groups []GroupRepoMeta `xml:"group"`
|
||||
Repositories []RepositoryMeta `xml:"repository"`
|
||||
@@ -138,8 +143,8 @@ type ProjectMeta struct {
|
||||
type PackageMeta struct {
|
||||
XMLName xml.Name `xml:"package"`
|
||||
Name string `xml:"name,attr"`
|
||||
Project string `xml:"project,attr"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
Project string `xml:"project,attr,omitempty"`
|
||||
ScmSync string `xml:"scmsync,omitempty"`
|
||||
Persons []PersonRepoMeta `xml:"person"`
|
||||
Groups []GroupRepoMeta `xml:"group"`
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ devel:languages:lua
|
||||
devel:languages:nodejs
|
||||
devel:languages:perl
|
||||
devel:languages:python:Factory
|
||||
devel:languages:python:mailman
|
||||
devel:languages:python:pytest
|
||||
devel:openSUSE:Factory
|
||||
network:chromium
|
||||
|
||||
@@ -35,6 +35,7 @@ It's a JSON file with following syntax:
|
||||
{
|
||||
"Name": "SLES",
|
||||
"Origin": "SUSE:SLFO:Products:SLES:16.0",
|
||||
"Label": "BootstrapRing",
|
||||
"BuildDisableRepos": ["product"]
|
||||
}
|
||||
]
|
||||
@@ -48,6 +49,7 @@ It's a JSON file with following syntax:
|
||||
| *QA* | Crucial for generating a product build (such as an ISO or FTP tree) that incorporates the packages. | no | array of objects | | |
|
||||
| *QA > Name* | Suffix for the QA OBS staging project. The project is named *StagingProject:<PR_Number>:Name*. | no | string | | |
|
||||
| *QA > Origin* | OBS reference project | no | string | | |
|
||||
| *QA > Label* | Setup the project only when the given gitea label is set on pull request | no | string | | |
|
||||
| *QA > BuildDisableRepos* | The names of OBS repositories to build-disable, if any. | no | array of strings | | [] |
|
||||
|
||||
|
||||
@@ -69,4 +71,10 @@ Details
|
||||
* In this case, the **scmsync** tag is inherited from the `QA > Origin` project.
|
||||
* It is desirable in some cases to avoid building some specific build service repositories when not needed. In this case, `QA > BuildDisableRepos` can be specified.
|
||||
These repositories would be disabled in the project meta when generating the QA project.
|
||||
|
||||
* QA projects can build on each other. In this case it is important that the order to setup is correct
|
||||
in the staging.config file.
|
||||
* Based on Label settings QA projects can get created or removed. The staging bot is also checking that these
|
||||
projects build successfully.
|
||||
* It is possible to include the sources from the staging project also in the QA project. Define a template using
|
||||
a project link pointing to the project defined as "StagingProject". You must *not* use scmsync directly in the
|
||||
same project then, but you can use it indirectly via a second project link
|
||||
|
||||
@@ -19,6 +19,7 @@ package main
|
||||
*/
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"flag"
|
||||
@@ -114,161 +115,105 @@ type DisableFlag struct {
|
||||
Name string `xml:"repository,attr"`
|
||||
}
|
||||
|
||||
func ProcessBuildStatus(project, refProject *common.BuildResultList) BuildStatusSummary {
|
||||
if _, finished := refProject.BuildResultSummary(); !finished {
|
||||
common.LogDebug("refProject not finished building??")
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
|
||||
func ProcessBuildStatus(project *common.BuildResultList) BuildStatusSummary {
|
||||
if _, finished := project.BuildResultSummary(); !finished {
|
||||
common.LogDebug("Still building...")
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
// the repositories should be setup equally between the projects. We
|
||||
// need to verify that packages that are building in `refProject` are not
|
||||
// failing in the `project`
|
||||
BuildResultSorter := func(a, b *common.BuildResult) int {
|
||||
if c := strings.Compare(a.Repository, b.Repository); c != 0 {
|
||||
return c
|
||||
}
|
||||
if c := strings.Compare(a.Arch, b.Arch); c != 0 {
|
||||
return c
|
||||
}
|
||||
|
||||
panic("Should not happen -- BuiltResultSorter equal repos?")
|
||||
}
|
||||
slices.SortFunc(project.Result, BuildResultSorter)
|
||||
if refProject == nil {
|
||||
// just return if buid finished and have some successes, since new package
|
||||
common.LogInfo("New package. Only need some success...")
|
||||
SomeSuccess := false
|
||||
for i := 0; i < len(project.Result); i++ {
|
||||
repoRes := project.Result[i]
|
||||
repoResStatus, ok := common.ObsRepoStatusDetails[repoRes.Code]
|
||||
if !ok {
|
||||
common.LogDebug("cannot find code:", repoRes.Code)
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
if !repoResStatus.Finished {
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
for _, pkg := range repoRes.Status {
|
||||
pkgStatus, ok := common.ObsBuildStatusDetails[pkg.Code]
|
||||
if !ok {
|
||||
common.LogInfo("Unknown package build status:", pkg.Code, "for", pkg.Package)
|
||||
common.LogDebug("Details:", pkg.Details)
|
||||
}
|
||||
|
||||
if pkgStatus.Success {
|
||||
SomeSuccess = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if SomeSuccess {
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
return BuildStatusSummaryFailed
|
||||
}
|
||||
|
||||
slices.SortFunc(refProject.Result, BuildResultSorter)
|
||||
|
||||
common.LogDebug("comparing results", len(project.Result), "vs. ref", len(refProject.Result))
|
||||
SomeSuccess := false
|
||||
common.LogDebug("build results", len(project.Result))
|
||||
for i := 0; i < len(project.Result); i++ {
|
||||
common.LogDebug("searching for", project.Result[i].Repository, "/", project.Result[i].Arch)
|
||||
j := 0
|
||||
found:
|
||||
for ; j < len(refProject.Result); j++ {
|
||||
if project.Result[i].Repository != refProject.Result[j].Repository ||
|
||||
project.Result[i].Arch != refProject.Result[j].Arch {
|
||||
continue
|
||||
}
|
||||
|
||||
for j := 0; j < len(project.Result); j++ {
|
||||
common.LogDebug(" found match for @ idx:", j)
|
||||
res, success := ProcessRepoBuildStatus(project.Result[i].Status, refProject.Result[j].Status)
|
||||
res := ProcessRepoBuildStatus(project.Result[i].Status)
|
||||
switch res {
|
||||
case BuildStatusSummarySuccess:
|
||||
SomeSuccess = SomeSuccess || success
|
||||
break found
|
||||
case BuildStatusSummaryFailed:
|
||||
return BuildStatusSummaryFailed
|
||||
default:
|
||||
return res
|
||||
}
|
||||
}
|
||||
|
||||
if j >= len(refProject.Result) {
|
||||
common.LogDebug("Cannot find results...")
|
||||
common.LogDebug(project.Result[i])
|
||||
common.LogDebug(refProject.Result)
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
}
|
||||
|
||||
if SomeSuccess {
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
return BuildStatusSummaryFailed
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func ProcessRepoBuildStatus(results, ref []*common.PackageBuildStatus) (status BuildStatusSummary, SomeSuccess bool) {
|
||||
PackageBuildStatusSorter := func(a, b *common.PackageBuildStatus) int {
|
||||
return strings.Compare(a.Package, b.Package)
|
||||
}
|
||||
func ProcessRepoBuildStatus(results []*common.PackageBuildStatus) (status BuildStatusSummary) {
|
||||
|
||||
PackageBuildStatusSorter := func(a, b *common.PackageBuildStatus) int {
|
||||
return strings.Compare(a.Package, b.Package)
|
||||
}
|
||||
|
||||
common.LogDebug("******** REF: ")
|
||||
data, _ := xml.MarshalIndent(ref, "", " ")
|
||||
common.LogDebug(string(data))
|
||||
common.LogDebug("******* RESULTS: ")
|
||||
data, _ = xml.MarshalIndent(results, "", " ")
|
||||
data, _ := xml.MarshalIndent(results, "", " ")
|
||||
common.LogDebug(string(data))
|
||||
common.LogDebug("*******")
|
||||
|
||||
// compare build result
|
||||
slices.SortFunc(results, PackageBuildStatusSorter)
|
||||
slices.SortFunc(ref, PackageBuildStatusSorter)
|
||||
|
||||
j := 0
|
||||
SomeSuccess = false
|
||||
for i := 0; i < len(results); i++ {
|
||||
res, ok := common.ObsBuildStatusDetails[results[i].Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown package result code:", results[i].Code, "for package:", results[i].Package)
|
||||
return BuildStatusSummaryUnknown, SomeSuccess
|
||||
return BuildStatusSummaryUnknown
|
||||
}
|
||||
|
||||
if !res.Finished {
|
||||
return BuildStatusSummaryBuilding, SomeSuccess
|
||||
return BuildStatusSummaryBuilding
|
||||
}
|
||||
|
||||
if !res.Success {
|
||||
// not failed if reference project also failed for same package here
|
||||
for ; j < len(results) && strings.Compare(results[i].Package, ref[j].Package) < 0; j++ {
|
||||
}
|
||||
|
||||
if j < len(results) && results[i].Package == ref[j].Package {
|
||||
refRes, ok := common.ObsBuildStatusDetails[ref[j].Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown ref package result code:", ref[j].Code, "package:", ref[j].Package)
|
||||
return BuildStatusSummaryUnknown, SomeSuccess
|
||||
}
|
||||
|
||||
if !refRes.Finished {
|
||||
common.LogDebug("Not finished building in reference project?")
|
||||
}
|
||||
|
||||
if refRes.Success {
|
||||
return BuildStatusSummaryFailed, SomeSuccess
|
||||
}
|
||||
}
|
||||
} else {
|
||||
SomeSuccess = true
|
||||
return BuildStatusSummaryFailed
|
||||
}
|
||||
}
|
||||
|
||||
return BuildStatusSummarySuccess, SomeSuccess
|
||||
return BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func GetPackageBuildStatus(project *common.BuildResultList, packageName string) (bool, BuildStatusSummary) {
|
||||
var packageStatuses []*common.PackageBuildStatus
|
||||
|
||||
// Collect all statuses for the package
|
||||
for _, result := range project.Result {
|
||||
for _, pkgStatus := range result.Status {
|
||||
if pkgStatus.Package == packageName {
|
||||
packageStatuses = append(packageStatuses, pkgStatus)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(packageStatuses) == 0 {
|
||||
return true, BuildStatusSummaryUnknown // true for 'missing'
|
||||
}
|
||||
|
||||
// Check for any failures
|
||||
for _, pkgStatus := range packageStatuses {
|
||||
res, ok := common.ObsBuildStatusDetails[pkgStatus.Code]
|
||||
if !ok {
|
||||
common.LogInfo("unknown package result code:", pkgStatus.Code, "for package:", pkgStatus.Package)
|
||||
return false, BuildStatusSummaryUnknown
|
||||
}
|
||||
if !res.Success {
|
||||
return false, BuildStatusSummaryFailed
|
||||
}
|
||||
}
|
||||
|
||||
// Check for any unfinished builds
|
||||
for _, pkgStatus := range packageStatuses {
|
||||
res, _ := common.ObsBuildStatusDetails[pkgStatus.Code]
|
||||
// 'ok' is already checked in the loop above
|
||||
if !res.Finished {
|
||||
return false, BuildStatusSummaryBuilding
|
||||
}
|
||||
}
|
||||
|
||||
// If we got here, all are finished and successful
|
||||
return false, BuildStatusSummarySuccess
|
||||
}
|
||||
|
||||
func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingPrj, buildPrj string, stagingMasterPrj string) (*common.ProjectMeta, error) {
|
||||
@@ -327,9 +272,9 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
common.LogError("error fetching project meta for", buildPrj, ". Err:", err)
|
||||
return nil, err
|
||||
}
|
||||
common.LogInfo("Meta: ", meta)
|
||||
|
||||
// generate new project with paths pointinig back to original repos
|
||||
// disable publishing
|
||||
|
||||
meta.Name = stagingPrj
|
||||
meta.Description = fmt.Sprintf(`Pull request build job PR#%d to branch %s of %s/%s`,
|
||||
@@ -344,7 +289,10 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
|
||||
urlPkg := make([]string, 0, len(modifiedOrNew))
|
||||
for _, pkg := range modifiedOrNew {
|
||||
urlPkg = append(urlPkg, "onlybuild="+url.QueryEscape(pkg))
|
||||
// FIXME: skip manifest subdirectories itself
|
||||
// strip any leading directory name and just hand over last directory as package name
|
||||
onlybuilds := strings.Split(pkg, "/")
|
||||
urlPkg = append(urlPkg, "onlybuild="+url.QueryEscape(onlybuilds[len(onlybuilds)-1]))
|
||||
}
|
||||
meta.ScmSync = pr.Head.Repo.CloneURL + "?" + strings.Join(urlPkg, "&") + "#" + pr.Head.Sha
|
||||
if len(meta.ScmSync) >= 65535 {
|
||||
@@ -384,12 +332,14 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
|
||||
func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingProject, templateProject, subProjectName string, buildDisableRepos []string) error {
|
||||
common.LogDebug("Setup QA sub projects")
|
||||
common.LogDebug("reading templateProject ", templateProject)
|
||||
templateMeta, err := ObsClient.GetProjectMeta(templateProject)
|
||||
if err != nil {
|
||||
common.LogError("error fetching template project meta for", templateProject, ":", err)
|
||||
return err
|
||||
}
|
||||
// patch baseMeta to become the new project
|
||||
common.LogDebug("upcoming project name ", stagingProject, ":", subProjectName)
|
||||
templateMeta.Name = stagingProject + ":" + subProjectName
|
||||
// freeze tag for now
|
||||
if len(templateMeta.ScmSync) > 0 {
|
||||
@@ -427,19 +377,50 @@ func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, git
|
||||
templateMeta.BuildFlags.Contents += string(output)
|
||||
}
|
||||
}
|
||||
|
||||
// include sources from submission project when link points to staging project
|
||||
for idx, l := range templateMeta.Link {
|
||||
if l.Project == stagingConfig.StagingProject {
|
||||
templateMeta.Link[idx].Project = stagingProject
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup ReleaseTarget and modify affected path entries
|
||||
for idx, r := range templateMeta.Repositories {
|
||||
templateMeta.Repositories[idx].ReleaseTargets = nil
|
||||
|
||||
for pidx, path := range r.Paths {
|
||||
// Check for path building against code stream
|
||||
common.LogDebug(" checking in ", templateMeta.Name)
|
||||
common.LogDebug(" stagingProject ", stagingProject)
|
||||
common.LogDebug(" checking for ", templateMeta.Repositories[idx].Paths[pidx].Project)
|
||||
common.LogDebug(" path.Project ", path.Project)
|
||||
common.LogDebug(" stagingConfig.ObsProject ", stagingConfig.ObsProject)
|
||||
common.LogDebug(" stagingConfig.StagingProject ", stagingConfig.StagingProject)
|
||||
common.LogDebug(" templateProject ", templateProject)
|
||||
if path.Project == stagingConfig.ObsProject {
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = stagingProject
|
||||
}
|
||||
} else
|
||||
// Check for path building against a repo in template project itself
|
||||
if path.Project == templateProject {
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = templateMeta.Name
|
||||
} else
|
||||
// Check for path prefixes against a template project inside of template project area
|
||||
if strings.HasPrefix(path.Project, stagingConfig.StagingProject + ":") {
|
||||
newProjectName := stagingProject
|
||||
// find project name
|
||||
for _, setup := range stagingConfig.QA {
|
||||
if setup.Origin == path.Project {
|
||||
common.LogDebug(" Match:", setup.Origin)
|
||||
newProjectName = newProjectName + ":" + setup.Name
|
||||
common.LogDebug(" New:", newProjectName)
|
||||
break
|
||||
}
|
||||
}
|
||||
templateMeta.Repositories[idx].Paths[pidx].Project = newProjectName
|
||||
common.LogDebug(" Matched prefix")
|
||||
}
|
||||
common.LogDebug(" Path using project ", templateMeta.Repositories[idx].Paths[pidx].Project)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -447,6 +428,8 @@ func CreateQASubProject(stagingConfig *common.StagingConfig, git common.Git, git
|
||||
err = ObsClient.SetProjectMeta(templateMeta)
|
||||
if err != nil {
|
||||
common.LogError("cannot create project:", templateMeta.Name, err)
|
||||
x, _ := xml.MarshalIndent(templateMeta, "", " ")
|
||||
common.LogError(string(x))
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
@@ -553,7 +536,7 @@ func FetchOurLatestActionableReview(gitea common.Gitea, org, repo string, id int
|
||||
}
|
||||
|
||||
func ParseNotificationToPR(thread *models.NotificationThread) (org string, repo string, num int64, err error) {
|
||||
rx := regexp.MustCompile(`^https://src\.(?:open)?suse\.(?:org|de)/api/v\d+/repos/(?<org>[-_a-zA-Z0-9]+)/(?<project>[-_a-zA-Z0-9]+)/issues/(?<num>[0-9]+)$`)
|
||||
rx := regexp.MustCompile(`^.*/api/v\d+/repos/(?<org>[-_a-zA-Z0-9]+)/(?<project>[-_a-zA-Z0-9]+)/issues/(?<num>[0-9]+)$`)
|
||||
notification := thread.Subject
|
||||
match := rx.FindStringSubmatch(notification.URL)
|
||||
if match == nil {
|
||||
@@ -702,6 +685,64 @@ func SetStatus(gitea common.Gitea, org, repo, hash string, status *models.Commit
|
||||
return err
|
||||
}
|
||||
|
||||
func commentOnPackagePR(gitea common.Gitea, org string, repo string, prNum int64, msg string) {
|
||||
if IsDryRun {
|
||||
common.LogInfo("Would comment on package PR %s/%s#%d: %s", org, repo, prNum, msg)
|
||||
return
|
||||
}
|
||||
|
||||
pr, err := gitea.GetPullRequest(org, repo, prNum)
|
||||
if err != nil {
|
||||
common.LogError("Failed to get package PR %s/%s#%d: %v", org, repo, prNum, err)
|
||||
return
|
||||
}
|
||||
|
||||
err = gitea.AddComment(pr, msg)
|
||||
if err != nil {
|
||||
common.LogError("Failed to comment on package PR %s/%s#%d: %v", org, repo, prNum, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Create and remove QA projects
|
||||
func ProcessQaProjects(stagingConfig *common.StagingConfig, git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingProject string) []string {
|
||||
usedQAprojects := make([]string, 0)
|
||||
prLabelNames := make(map[string]int)
|
||||
for _, label := range pr.Labels {
|
||||
prLabelNames[label.Name] = 1
|
||||
}
|
||||
msg := ""
|
||||
for _, setup := range stagingConfig.QA {
|
||||
QAproject := stagingProject + ":" + setup.Name
|
||||
if len(setup.Label) > 0 {
|
||||
if _, ok := prLabelNames[setup.Label]; !ok {
|
||||
if !IsDryRun {
|
||||
// blindly remove, will fail when not existing
|
||||
ObsClient.DeleteProject(QAproject)
|
||||
}
|
||||
common.LogInfo("QA project ", setup.Name, "has no matching Label")
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
usedQAprojects = append(usedQAprojects, QAproject)
|
||||
// check for existens first, no error, but no meta is a 404
|
||||
if meta, err := ObsClient.GetProjectMeta(QAproject); meta == nil && err == nil {
|
||||
common.LogInfo("Create QA project ", QAproject)
|
||||
CreateQASubProject(stagingConfig, git, gitea, pr,
|
||||
stagingProject,
|
||||
setup.Origin,
|
||||
setup.Name,
|
||||
setup.BuildDisableRepos)
|
||||
msg = msg + "QA Project added: " + ObsWebHost + "/project/show/" +
|
||||
QAproject + "\n"
|
||||
}
|
||||
}
|
||||
if len(msg) > 1 {
|
||||
gitea.AddComment(pr, msg)
|
||||
}
|
||||
return usedQAprojects
|
||||
}
|
||||
|
||||
func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, error) {
|
||||
dir, err := os.MkdirTemp(os.TempDir(), BotName)
|
||||
common.PanicOnError(err)
|
||||
@@ -779,11 +820,12 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
meta, err := ObsClient.GetProjectMeta(stagingConfig.ObsProject)
|
||||
if err != nil || meta == nil {
|
||||
common.LogError("Cannot find reference project meta:", stagingConfig.ObsProject, err)
|
||||
if !IsDryRun {
|
||||
if !IsDryRun && err == nil {
|
||||
common.LogError("Reference project is absent:", stagingConfig.ObsProject, err)
|
||||
_, err := gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, "Cannot fetch reference project meta")
|
||||
return true, err
|
||||
}
|
||||
return true, nil
|
||||
return true, err
|
||||
}
|
||||
|
||||
if metaUrl, err := url.Parse(meta.ScmSync); err != nil {
|
||||
@@ -933,36 +975,49 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
msg = "Build is started in " + ObsWebHost + "/project/show/" +
|
||||
stagingProject + " .\n"
|
||||
|
||||
if len(stagingConfig.QA) > 0 {
|
||||
msg = msg + "\nAdditional QA builds: \n"
|
||||
}
|
||||
SetStatus(gitea, org, repo, pr.Head.Sha, status)
|
||||
for _, setup := range stagingConfig.QA {
|
||||
CreateQASubProject(stagingConfig, git, gitea, pr,
|
||||
stagingProject,
|
||||
setup.Origin,
|
||||
setup.Name,
|
||||
setup.BuildDisableRepos)
|
||||
msg = msg + ObsWebHost + "/project/show/" +
|
||||
stagingProject + ":" + setup.Name + "\n"
|
||||
}
|
||||
}
|
||||
if change != RequestModificationNoChange && !IsDryRun {
|
||||
gitea.AddComment(pr, msg)
|
||||
}
|
||||
|
||||
baseResult, err := ObsClient.LastBuildResults(stagingConfig.ObsProject, modifiedPackages...)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching ref project status for", stagingConfig.ObsProject, ":", err)
|
||||
}
|
||||
stagingResult, err := ObsClient.BuildStatus(stagingProject)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching stage project status for", stagingProject, ":", err)
|
||||
}
|
||||
buildStatus := ProcessBuildStatus(stagingResult, baseResult)
|
||||
|
||||
_, packagePRs := common.ExtractDescriptionAndPRs(bufio.NewScanner(strings.NewReader(pr.Body)))
|
||||
|
||||
// always update QA projects because Labels can change
|
||||
qaProjects := ProcessQaProjects(stagingConfig, git, gitea, pr, stagingProject)
|
||||
|
||||
done := false
|
||||
switch buildStatus {
|
||||
overallBuildStatus := ProcessBuildStatus(stagingResult)
|
||||
commentSuffix := ""
|
||||
if len(qaProjects) > 0 && overallBuildStatus == BuildStatusSummarySuccess {
|
||||
seperator := " in "
|
||||
for _, qaProject := range qaProjects {
|
||||
qaResult, err := ObsClient.BuildStatus(qaProject)
|
||||
if err != nil {
|
||||
common.LogError("failed fetching stage project status for", qaProject, ":", err)
|
||||
}
|
||||
qaBuildStatus := ProcessBuildStatus(qaResult)
|
||||
if qaBuildStatus != BuildStatusSummarySuccess {
|
||||
// either still building or in failed state
|
||||
overallBuildStatus = qaBuildStatus
|
||||
commentSuffix = commentSuffix + seperator + qaProject
|
||||
seperator = ", "
|
||||
}
|
||||
if qaBuildStatus == BuildStatusSummaryFailed {
|
||||
// main project was successful, but QA project, adapt the link to QA project
|
||||
// and change commit state to fail
|
||||
status.Status = common.CommitStatus_Fail
|
||||
status.TargetURL = ObsWebHost + "/project/show/" + qaProject
|
||||
SetStatus(gitea, org, repo, pr.Head.Sha, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
switch overallBuildStatus {
|
||||
case BuildStatusSummarySuccess:
|
||||
status.Status = common.CommitStatus_Success
|
||||
done = true
|
||||
@@ -982,7 +1037,44 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
}
|
||||
}
|
||||
}
|
||||
common.LogInfo("Build status:", buildStatus)
|
||||
|
||||
if overallBuildStatus == BuildStatusSummarySuccess || overallBuildStatus == BuildStatusSummaryFailed {
|
||||
// avoid commenting while build is in progress
|
||||
missingPkgs := []string{}
|
||||
|
||||
for _, packagePR := range packagePRs {
|
||||
missing, packageBuildStatus := GetPackageBuildStatus(stagingResult, packagePR.Repo)
|
||||
if missing {
|
||||
missingPkgs = append(missingPkgs, packagePR.Repo)
|
||||
continue
|
||||
}
|
||||
var msg string
|
||||
switch packageBuildStatus {
|
||||
case BuildStatusSummarySuccess:
|
||||
msg = fmt.Sprintf("Build successful, for more information go in %s/project/show/%s.\n", ObsWebHost, stagingProject)
|
||||
case BuildStatusSummaryFailed:
|
||||
msg = fmt.Sprintf("Build failed, for more information go in %s/project/show/%s.\n", ObsWebHost, stagingProject)
|
||||
default:
|
||||
continue
|
||||
}
|
||||
commentOnPackagePR(gitea, packagePR.Org, packagePR.Repo, packagePR.Num, msg)
|
||||
}
|
||||
|
||||
if len(missingPkgs) > 0 {
|
||||
overallBuildStatus = BuildStatusSummaryFailed
|
||||
msg := "The following packages were not found in the staging project:\n"
|
||||
for _, pkg := range missingPkgs {
|
||||
msg = msg + " - " + pkg + "\n"
|
||||
}
|
||||
common.LogInfo(msg)
|
||||
err := gitea.AddComment(pr, msg)
|
||||
if err != nil {
|
||||
common.LogError(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
common.LogInfo("Build status:", overallBuildStatus)
|
||||
if !IsDryRun {
|
||||
if err = SetStatus(gitea, org, repo, pr.Head.Sha, status); err != nil {
|
||||
return false, err
|
||||
@@ -1047,6 +1139,7 @@ func PollWorkNotifications(giteaUrl string) {
|
||||
|
||||
var ListPullNotificationsOnly bool
|
||||
var GiteaUrl string
|
||||
var ObsApiHost string
|
||||
var ObsWebHost string
|
||||
var IsDryRun bool
|
||||
var ProcessPROnly string
|
||||
@@ -1069,8 +1162,8 @@ func main() {
|
||||
flag.BoolVar(&ListPullNotificationsOnly, "list-notifications-only", false, "Only lists notifications without acting on them")
|
||||
ProcessPROnly := flag.String("pr", "", "Process only specific PR and ignore the rest. Use for debugging")
|
||||
buildRoot := flag.String("build-root", "", "Default build location for staging projects. Default is bot's home project")
|
||||
flag.StringVar(&GiteaUrl, "gitea-url", "https://src.opensuse.org", "Gitea instance")
|
||||
obsApiHost := flag.String("obs", "https://api.opensuse.org", "API for OBS instance")
|
||||
flag.StringVar(&GiteaUrl, "gitea-url", "", "Gitea instance")
|
||||
flag.StringVar(&ObsApiHost, "obs", "", "API for OBS instance")
|
||||
flag.StringVar(&ObsWebHost, "obs-web", "", "Web OBS instance, if not derived from the obs config")
|
||||
flag.BoolVar(&IsDryRun, "dry", false, "Dry-run, don't actually create any build projects or review changes")
|
||||
debug := flag.Bool("debug", false, "Turns on debug logging")
|
||||
@@ -1082,19 +1175,34 @@ func main() {
|
||||
common.SetLoggingLevel(common.LogLevelInfo)
|
||||
}
|
||||
|
||||
if len(GiteaUrl) == 0 {
|
||||
GiteaUrl = os.Getenv(common.GiteaHostEnv)
|
||||
}
|
||||
if len(GiteaUrl) == 0 {
|
||||
GiteaUrl = "https://src.opensuse.org"
|
||||
}
|
||||
if len(ObsApiHost) == 0 {
|
||||
ObsApiHost = os.Getenv(common.ObsApiEnv)
|
||||
}
|
||||
if len(ObsApiHost) == 0 {
|
||||
ObsApiHost = "https://api.opensuse.org"
|
||||
}
|
||||
if len(ObsWebHost) == 0 {
|
||||
ObsWebHost = ObsWebHostFromApiHost(*obsApiHost)
|
||||
ObsWebHost = os.Getenv(common.ObsWebEnv)
|
||||
}
|
||||
if len(ObsWebHost) == 0 {
|
||||
ObsWebHost = "https://build.opensuse.org"
|
||||
}
|
||||
|
||||
common.LogDebug("OBS Gitea Host:", GiteaUrl)
|
||||
common.LogDebug("OBS Web Host:", ObsWebHost)
|
||||
common.LogDebug("OBS API Host:", *obsApiHost)
|
||||
common.LogDebug("OBS API Host:", ObsApiHost)
|
||||
|
||||
common.PanicOnErrorWithMsg(common.RequireGiteaSecretToken(), "Cannot find GITEA_TOKEN")
|
||||
common.PanicOnErrorWithMsg(common.RequireObsSecretToken(), "Cannot find OBS_USER and OBS_PASSWORD")
|
||||
|
||||
var err error
|
||||
if ObsClient, err = common.NewObsClient(*obsApiHost); err != nil {
|
||||
if ObsClient, err = common.NewObsClient(ObsApiHost); err != nil {
|
||||
log.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -105,10 +105,10 @@ func ProjectStatusSummarySvg(res []*common.BuildResult) []byte {
|
||||
|
||||
func LinkToBuildlog(R *common.BuildResult, S *common.PackageBuildStatus) string {
|
||||
if R != nil && S != nil {
|
||||
switch S.Code {
|
||||
case "succeeded", "failed", "building":
|
||||
//switch S.Code {
|
||||
//case "succeeded", "failed", "building":
|
||||
return "/buildlog/" + url.PathEscape(R.Project) + "/" + url.PathEscape(S.Package) + "/" + url.PathEscape(R.Repository) + "/" + url.PathEscape(R.Arch)
|
||||
}
|
||||
//}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
@@ -170,6 +170,7 @@ func BuildStatusSvg(repo *common.BuildResult, status *common.PackageBuildStatus)
|
||||
buildStatus, ok := common.ObsBuildStatusDetails[status.Code]
|
||||
if !ok {
|
||||
buildStatus = common.ObsBuildStatusDetails["error"]
|
||||
common.LogError("Cannot find detail for status.Code", status.Code)
|
||||
}
|
||||
fillColor := "#480" // orange
|
||||
textColor := "#888"
|
||||
|
||||
98
utils/maintainer-update/main.go
Normal file
98
utils/maintainer-update/main.go
Normal file
@@ -0,0 +1,98 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
func WriteNewMaintainershipFile(m *common.MaintainershipMap, filename string) {
|
||||
f, err := os.Create(filename + ".new")
|
||||
common.PanicOnError(err)
|
||||
common.PanicOnError(m.WriteMaintainershipFile(f))
|
||||
common.PanicOnError(f.Sync())
|
||||
common.PanicOnError(f.Close())
|
||||
common.PanicOnError(os.Rename(filename+".new", filename))
|
||||
}
|
||||
|
||||
func run() error {
|
||||
pkg := flag.String("package", "", "Package to modify")
|
||||
rm := flag.Bool("rm", false, "Remove maintainer from package")
|
||||
add := flag.Bool("add", false, "Add maintainer to package")
|
||||
lint := flag.Bool("lint-only", false, "Reformat entire _maintainership.json only")
|
||||
flag.Parse()
|
||||
|
||||
if (*add == *rm) && !*lint {
|
||||
return fmt.Errorf("Need to either add or remove a maintainer, or lint")
|
||||
}
|
||||
|
||||
filename := common.MaintainershipFile
|
||||
if *lint {
|
||||
if len(flag.Args()) > 0 {
|
||||
filename = flag.Arg(0)
|
||||
}
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(filename)
|
||||
if os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
m, err := common.ParseMaintainershipData(data)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to parse JSON: %w", err)
|
||||
}
|
||||
|
||||
if *lint {
|
||||
m.Raw = nil // forces a rewrite
|
||||
} else {
|
||||
users := flag.Args()
|
||||
if len(users) > 0 {
|
||||
maintainers, ok := m.Data[*pkg]
|
||||
if !ok && !*add {
|
||||
return fmt.Errorf("No package %s and not adding one.", *pkg)
|
||||
}
|
||||
|
||||
if *add {
|
||||
for _, u := range users {
|
||||
if !slices.Contains(maintainers, u) {
|
||||
maintainers = append(maintainers, u)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if *rm {
|
||||
newMaintainers := make([]string, 0, len(maintainers))
|
||||
for _, m := range maintainers {
|
||||
if !slices.Contains(users, m) {
|
||||
newMaintainers = append(newMaintainers, m)
|
||||
}
|
||||
}
|
||||
maintainers = newMaintainers
|
||||
}
|
||||
|
||||
if len(maintainers) > 0 {
|
||||
slices.Sort(maintainers)
|
||||
m.Data[*pkg] = maintainers
|
||||
} else {
|
||||
delete(m.Data, *pkg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
WriteNewMaintainershipFile(m, filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
if err := run(); err != nil {
|
||||
common.LogError(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
242
utils/maintainer-update/main_test.go
Normal file
242
utils/maintainer-update/main_test.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"os"
|
||||
"os/exec"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
if os.Getenv("BE_MAIN") == "1" {
|
||||
main()
|
||||
return
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func TestRun(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inData string
|
||||
expectedOut string
|
||||
params []string
|
||||
expectedError string
|
||||
isDir bool
|
||||
}{
|
||||
{
|
||||
name: "add user to existing package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "add user to new package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg2", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"], "pkg2": ["user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "no-op with no users",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "add existing user",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "remove user from package",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "remove last user from package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user1"},
|
||||
expectedOut: `{}`,
|
||||
},
|
||||
{
|
||||
name: "remove non-existent user",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-rm", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint only unsorted",
|
||||
inData: `{"pkg1": ["user2", "user1"]}`,
|
||||
params: []string{"-lint-only"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint only no changes",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-lint-only"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "no file",
|
||||
params: []string{"-add"},
|
||||
expectedError: "no such file or directory",
|
||||
},
|
||||
{
|
||||
name: "invalid json",
|
||||
inData: `{"pkg1": ["user1"`,
|
||||
params: []string{"-add"},
|
||||
expectedError: "Failed to parse JSON",
|
||||
},
|
||||
{
|
||||
name: "add",
|
||||
inData: `{"pkg1": ["user1", "user2"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user3"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2", "user3"]}`,
|
||||
},
|
||||
{
|
||||
name: "lint specific file",
|
||||
inData: `{"pkg1": ["user2", "user1"]}`,
|
||||
params: []string{"-lint-only", "other.json"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "add user to package when it was not there before",
|
||||
inData: `{}`,
|
||||
params: []string{"-package", "newpkg", "-add", "user1"},
|
||||
expectedOut: `{"newpkg": ["user1"]}`,
|
||||
},
|
||||
{
|
||||
name: "unreadable file (is a directory)",
|
||||
isDir: true,
|
||||
params: []string{"-rm"},
|
||||
expectedError: "is a directory",
|
||||
},
|
||||
{
|
||||
name: "remove user from non-existent package",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg2", "-rm", "user2"},
|
||||
expectedError: "No package pkg2 and not adding one.",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
oldWd, _ := os.Getwd()
|
||||
_ = os.Chdir(dir)
|
||||
defer os.Chdir(oldWd)
|
||||
|
||||
targetFile := common.MaintainershipFile
|
||||
if tt.name == "lint specific file" {
|
||||
targetFile = "other.json"
|
||||
}
|
||||
|
||||
if tt.isDir {
|
||||
_ = os.Mkdir(targetFile, 0755)
|
||||
} else if tt.inData != "" {
|
||||
_ = os.WriteFile(targetFile, []byte(tt.inData), 0644)
|
||||
}
|
||||
|
||||
flag.CommandLine = flag.NewFlagSet(os.Args[0], flag.ContinueOnError)
|
||||
|
||||
os.Args = append([]string{"cmd"}, tt.params...)
|
||||
err := run()
|
||||
|
||||
if tt.expectedError != "" {
|
||||
if err == nil {
|
||||
t.Fatalf("expected error containing %q, but got none", tt.expectedError)
|
||||
}
|
||||
if !strings.Contains(err.Error(), tt.expectedError) {
|
||||
t.Fatalf("expected error containing %q, got %q", tt.expectedError, err.Error())
|
||||
}
|
||||
} else if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
|
||||
if tt.expectedOut != "" {
|
||||
data, _ := os.ReadFile(targetFile)
|
||||
var got, expected map[string][]string
|
||||
_ = json.Unmarshal(data, &got)
|
||||
_ = json.Unmarshal([]byte(tt.expectedOut), &expected)
|
||||
|
||||
if len(got) == 0 && len(expected) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMainRecursive(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
inData string
|
||||
expectedOut string
|
||||
params []string
|
||||
expectExit bool
|
||||
}{
|
||||
{
|
||||
name: "test main() via recursive call",
|
||||
inData: `{"pkg1": ["user1"]}`,
|
||||
params: []string{"-package", "pkg1", "-add", "user2"},
|
||||
expectedOut: `{"pkg1": ["user1", "user2"]}`,
|
||||
},
|
||||
{
|
||||
name: "test main() failure",
|
||||
params: []string{"-package", "pkg1"},
|
||||
expectExit: true,
|
||||
},
|
||||
}
|
||||
|
||||
exe, _ := os.Executable()
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
oldWd, _ := os.Getwd()
|
||||
_ = os.Chdir(dir)
|
||||
defer os.Chdir(oldWd)
|
||||
|
||||
if tt.inData != "" {
|
||||
_ = os.WriteFile(common.MaintainershipFile, []byte(tt.inData), 0644)
|
||||
}
|
||||
|
||||
cmd := exec.Command(exe, append([]string{"-test.run=None"}, tt.params...)...)
|
||||
cmd.Env = append(os.Environ(), "BE_MAIN=1")
|
||||
out, runErr := cmd.CombinedOutput()
|
||||
|
||||
if tt.expectExit {
|
||||
if runErr == nil {
|
||||
t.Fatalf("expected exit with error, but it succeeded")
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if runErr != nil {
|
||||
t.Fatalf("unexpected error: %v: %s", runErr, string(out))
|
||||
}
|
||||
|
||||
if tt.expectedOut != "" {
|
||||
data, _ := os.ReadFile(common.MaintainershipFile)
|
||||
var got, expected map[string][]string
|
||||
_ = json.Unmarshal(data, &got)
|
||||
_ = json.Unmarshal([]byte(tt.expectedOut), &expected)
|
||||
|
||||
if !reflect.DeepEqual(got, expected) {
|
||||
t.Fatalf("expected %v, got %v", expected, got)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,7 @@ func PrjGitDescription(prset *common.PRSet) (title string, desc string) {
|
||||
title_refs := make([]string, 0, len(prset.PRs)-1)
|
||||
refs := make([]string, 0, len(prset.PRs)-1)
|
||||
|
||||
prefix := ""
|
||||
for _, pr := range prset.PRs {
|
||||
if prset.IsPrjGitPR(pr.PR) {
|
||||
continue
|
||||
@@ -32,6 +33,9 @@ func PrjGitDescription(prset *common.PRSet) (title string, desc string) {
|
||||
// remove PRs that are not open from description
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(pr.PR.Title, "WIP:") {
|
||||
prefix = "WIP: "
|
||||
}
|
||||
org, repo, idx := pr.PRComponents()
|
||||
|
||||
title_refs = append(title_refs, repo)
|
||||
@@ -42,7 +46,7 @@ func PrjGitDescription(prset *common.PRSet) (title string, desc string) {
|
||||
slices.Sort(title_refs)
|
||||
slices.Sort(refs)
|
||||
|
||||
title = "Forwarded PRs: " + strings.Join(title_refs, ", ")
|
||||
title = prefix + "Forwarded PRs: " + strings.Join(title_refs, ", ")
|
||||
desc = fmt.Sprintf("This is a forwarded pull request by %s\nreferencing the following pull request(s):\n\n", GitAuthor) + strings.Join(refs, "\n") + "\n"
|
||||
|
||||
if prset.Config.ManualMergeOnly {
|
||||
|
||||
Reference in New Issue
Block a user