2
1
forked from adamm/autogits

6 Commits

Author SHA256 Message Date
f2089f99fc staging: use helper function to SetCommitStatus 2025-09-09 12:55:14 +02:00
10ea3a8f8f obs-staging-bot: Fix setting of commit status 2025-09-09 12:46:43 +02:00
9faa6ead49 Log errors on SetCommitStatus 2025-09-09 12:46:21 +02:00
29cce5741a staging: typo fix 2025-09-09 12:46:11 +02:00
804e542c3f Decline too large staging projects
In most cases anyway an error in pull request.
2025-09-09 12:41:07 +02:00
72899162b0 status: need to fetch repositories during sync
We need to fetch repositories so that we can have package
data. We only need to fetch one set of results per project,
not all repos.
2025-09-03 16:42:01 +02:00
3 changed files with 68 additions and 26 deletions

View File

@@ -322,10 +322,13 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
urlPkg = append(urlPkg, "onlybuild="+url.QueryEscape(pkg)) urlPkg = append(urlPkg, "onlybuild="+url.QueryEscape(pkg))
} }
meta.ScmSync = pr.Head.Repo.CloneURL + "?" + strings.Join(urlPkg, "&") + "#" + pr.Head.Sha meta.ScmSync = pr.Head.Repo.CloneURL + "?" + strings.Join(urlPkg, "&") + "#" + pr.Head.Sha
if len(meta.ScmSync) >= 65535 {
return nil, errors.New("Reached max amount of package changes per request")
}
meta.Title = fmt.Sprintf("PR#%d to %s", pr.Index, pr.Base.Name) meta.Title = fmt.Sprintf("PR#%d to %s", pr.Index, pr.Base.Name)
// QE wants it published ... also we should not hardcode it here, since // QE wants it published ... also we should not hardcode it here, since
// it is configurable via the :PullRequest project // it is configurable via the :PullRequest project
// meta.PublicFlags = common.Flags{Contents: "<disable/>"} // meta.PublicFlags = common.Flags{Contents: "<disable/>"}
meta.Groups = nil meta.Groups = nil
meta.Persons = nil meta.Persons = nil
@@ -633,6 +636,14 @@ func CleanupPullNotification(gitea common.Gitea, thread *models.NotificationThre
return false // cleaned up now, but the cleanup was not aleady done return false // cleaned up now, but the cleanup was not aleady done
} }
func SetStatus(gitea common.Gitea, org, repo, hash string, status *models.CommitStatus) error {
_, err := gitea.SetCommitStatus(org, repo, hash, status)
if err != nil {
common.LogError(err)
}
return err
}
func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, error) { func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, error) {
dir, err := os.MkdirTemp(os.TempDir(), BotName) dir, err := os.MkdirTemp(os.TempDir(), BotName)
common.PanicOnError(err) common.PanicOnError(err)
@@ -837,6 +848,22 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
TargetURL: ObsWebHost + "/project/show/" + stagingProject, TargetURL: ObsWebHost + "/project/show/" + stagingProject,
} }
if err != nil {
msg := "Unable to setup stage project " + stagingConfig.ObsProject
status.Status = common.CommitStatus_Fail
common.LogError(msg)
if !IsDryRun {
SetStatus(gitea, org, repo, pr.Head.Sha, status)
_, err = gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, msg)
if err != nil {
common.LogError(err)
} else {
return true, nil
}
}
return false, nil
}
msg := "Changed source updated for build" msg := "Changed source updated for build"
if change == RequestModificationProjectCreated { if change == RequestModificationProjectCreated {
msg = "Build is started in " + ObsWebHost + "/project/show/" + msg = "Build is started in " + ObsWebHost + "/project/show/" +
@@ -845,8 +872,7 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
if len(stagingConfig.QA) > 0 { if len(stagingConfig.QA) > 0 {
msg = msg + "\nAdditional QA builds: \n" msg = msg + "\nAdditional QA builds: \n"
} }
gitea.SetCommitStatus(pr.Base.Repo.Owner.UserName, pr.Base.Repo.Name, pr.Head.Sha, status) SetStatus(gitea, org, repo, pr.Head.Sha, status)
for _, setup := range stagingConfig.QA { for _, setup := range stagingConfig.QA {
CreateQASubProject(stagingConfig, git, gitea, pr, CreateQASubProject(stagingConfig, git, gitea, pr,
stagingProject, stagingProject,
@@ -870,32 +896,34 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
} }
buildStatus := ProcessBuildStatus(stagingResult, baseResult) buildStatus := ProcessBuildStatus(stagingResult, baseResult)
done := false
switch buildStatus { switch buildStatus {
case BuildStatusSummarySuccess: case BuildStatusSummarySuccess:
status.Status = common.CommitStatus_Success status.Status = common.CommitStatus_Success
done = true
if !IsDryRun { if !IsDryRun {
_, err := gitea.AddReviewComment(pr, common.ReviewStateApproved, "Build successful") _, err := gitea.AddReviewComment(pr, common.ReviewStateApproved, "Build successful")
if err != nil { if err != nil {
common.LogError(err) common.LogError(err)
} else {
return true, nil
} }
} }
case BuildStatusSummaryFailed: case BuildStatusSummaryFailed:
status.Status = common.CommitStatus_Fail status.Status = common.CommitStatus_Fail
done = true
if !IsDryRun { if !IsDryRun {
_, err := gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, "Build failed") _, err := gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, "Build failed")
if err != nil { if err != nil {
common.LogError(err) common.LogError(err)
} else {
return true, nil
} }
} }
} }
common.LogInfo("Build status:", buildStatus) common.LogInfo("Build status:", buildStatus)
gitea.SetCommitStatus(pr.Base.Repo.Owner.UserName, pr.Base.Repo.Name, pr.Head.Sha, status) if !IsDryRun {
if err = SetStatus(gitea, org, repo, pr.Head.Sha, status); err != nil {
// waiting for build results -- nothing to do return false, err
}
}
return done, nil
} else if err == NonActionableReviewError || err == NoReviewsFoundError { } else if err == NonActionableReviewError || err == NoReviewsFoundError {
return true, nil return true, nil

View File

@@ -278,12 +278,12 @@ func main() {
} }
names := queries["q"] names := queries["q"]
if len(names) < 1 || len(names) > 10 { if len(names) != 1 {
res.WriteHeader(400) res.WriteHeader(400)
return return
} }
packages := FindPackages(names) packages := FindPackages(names[0])
data, err := json.MarshalIndent(packages, "", " ") data, err := json.MarshalIndent(packages, "", " ")
if err != nil { if err != nil {
res.WriteHeader(500) res.WriteHeader(500)
@@ -292,6 +292,7 @@ func main() {
} }
res.Write(data) res.Write(data)
res.Header().Add("content-type", "application/json")
res.WriteHeader(200) res.WriteHeader(200)
}) })

View File

@@ -29,13 +29,15 @@ func UpdateResults(r *common.BuildResult) {
RepoStatusLock.Lock() RepoStatusLock.Lock()
defer RepoStatusLock.Unlock() defer RepoStatusLock.Unlock()
updateResultsWithoutLocking(r)
}
func updateResultsWithoutLocking(r *common.BuildResult) {
key := "result." + r.Project + "/" + r.Repository + "/" + r.Arch key := "result." + r.Project + "/" + r.Repository + "/" + r.Arch
common.LogDebug(" + Updating", key)
data, err := redisClient.HGetAll(context.Background(), key).Result() data, err := redisClient.HGetAll(context.Background(), key).Result()
if err != nil { if err != nil {
common.LogError("Failed fetching build results for", key, err) common.LogError("Failed fetching build results for", key, err)
} }
common.LogDebug(" + Update size", len(data))
reset_time := time.Date(1000, 1, 1, 1, 1, 1, 1, time.Local) reset_time := time.Date(1000, 1, 1, 1, 1, 1, 1, time.Local)
for _, pkg := range r.Status { for _, pkg := range r.Status {
@@ -110,21 +112,15 @@ func FindRepoResults(project, repo string) []*common.BuildResult {
return ret return ret
} }
func FindPackages(search_terms []string) []string { func FindPackages(pkg string) []string {
RepoStatusLock.RLock() RepoStatusLock.RLock()
defer RepoStatusLock.RUnlock() defer RepoStatusLock.RUnlock()
data := make([]string, 0, 100) data := make([]string, 0, 100)
for _, repo := range RepoStatus { for _, repo := range RepoStatus {
for _, status := range repo.Status { for _, status := range repo.Status {
pkg := status.Package if pkg == status.Package {
match := true entry := repo.Project + "/" + pkg
for _, term := range search_terms {
match = match && strings.Contains(pkg, term)
}
if match {
entry := repo.Project + "/" + repo.Status[0].Package
if idx, found := slices.BinarySearch(data, entry); !found { if idx, found := slices.BinarySearch(data, entry); !found {
data = slices.Insert(data, idx, entry) data = slices.Insert(data, idx, entry)
if len(data) >= 100 { if len(data) >= 100 {
@@ -188,6 +184,8 @@ func RescanRepositories() error {
RepoStatusLock.Unlock() RepoStatusLock.Unlock()
var count int var count int
projectsLooked := make([]string, 0, 10000)
for { for {
var data []string var data []string
data, cursor, err = redisClient.ScanType(ctx, cursor, "", 1000, "hash").Result() data, cursor, err = redisClient.ScanType(ctx, cursor, "", 1000, "hash").Result()
@@ -196,6 +194,7 @@ func RescanRepositories() error {
return err return err
} }
wg := &sync.WaitGroup{}
RepoStatusLock.Lock() RepoStatusLock.Lock()
for _, repo := range data { for _, repo := range data {
r := strings.Split(repo, "/") r := strings.Split(repo, "/")
@@ -207,14 +206,28 @@ func RescanRepositories() error {
Repository: r[1], Repository: r[1],
Arch: r[2], Arch: r[2],
} }
if pos, found := slices.BinarySearchFunc(RepoStatus, d, common.BuildResultComp); found {
var pos int
var found bool
if pos, found = slices.BinarySearchFunc(RepoStatus, d, common.BuildResultComp); found {
RepoStatus[pos].Dirty = true RepoStatus[pos].Dirty = true
} else { } else {
d.Dirty = true d.Dirty = true
RepoStatus = slices.Insert(RepoStatus, pos, d) RepoStatus = slices.Insert(RepoStatus, pos, d)
count++ count++
} }
// fetch all keys, one per non-maintenance/non-home: projects, for package search
if idx, found := slices.BinarySearch(projectsLooked, d.Project); !found && !strings.Contains(d.Project, ":Maintenance:") && (len(d.Project) < 5 || d.Project[0:5] != "home:") {
projectsLooked = slices.Insert(projectsLooked, idx, d.Project)
wg.Add(1)
go func(r *common.BuildResult) {
updateResultsWithoutLocking(r)
wg.Done()
}(RepoStatus[pos])
}
} }
wg.Wait()
RepoStatusLock.Unlock() RepoStatusLock.Unlock()
if cursor == 0 { if cursor == 0 {