We need to fetch repositories so that we can have package data. We only need to fetch one set of results per project, not all repos.
255 lines
5.9 KiB
Go
255 lines
5.9 KiB
Go
package main
|
|
|
|
import (
|
|
"context"
|
|
"slices"
|
|
"strings"
|
|
"sync"
|
|
"time"
|
|
|
|
"github.com/redis/go-redis/v9"
|
|
"src.opensuse.org/autogits/common"
|
|
)
|
|
|
|
var RepoStatus []*common.BuildResult = []*common.BuildResult{}
|
|
var RepoStatusLock *sync.RWMutex = &sync.RWMutex{}
|
|
|
|
var redisClient *redis.Client
|
|
|
|
func RedisConnect(RedisUrl string) {
|
|
opts, err := redis.ParseURL(RedisUrl)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
|
|
redisClient = redis.NewClient(opts)
|
|
}
|
|
|
|
func UpdateResults(r *common.BuildResult) {
|
|
RepoStatusLock.Lock()
|
|
defer RepoStatusLock.Unlock()
|
|
|
|
updateResultsWithoutLocking(r)
|
|
}
|
|
|
|
func updateResultsWithoutLocking(r *common.BuildResult) {
|
|
key := "result." + r.Project + "/" + r.Repository + "/" + r.Arch
|
|
data, err := redisClient.HGetAll(context.Background(), key).Result()
|
|
if err != nil {
|
|
common.LogError("Failed fetching build results for", key, err)
|
|
}
|
|
|
|
reset_time := time.Date(1000, 1, 1, 1, 1, 1, 1, time.Local)
|
|
for _, pkg := range r.Status {
|
|
pkg.LastUpdate = reset_time
|
|
}
|
|
r.LastUpdate = time.Now()
|
|
for pkg, result := range data {
|
|
if strings.HasPrefix(result, "scheduled") {
|
|
// TODO: lookup where's building
|
|
result = "building"
|
|
}
|
|
|
|
var idx int
|
|
var found bool
|
|
var code string
|
|
var details string
|
|
|
|
if pos := strings.IndexByte(result, ':'); pos > -1 && pos < len(result) {
|
|
code = result[0:pos]
|
|
details = result[pos+1:]
|
|
} else {
|
|
code = result
|
|
details = ""
|
|
}
|
|
|
|
if idx, found = slices.BinarySearchFunc(r.Status, &common.PackageBuildStatus{Package: pkg}, common.PackageBuildStatusComp); found {
|
|
res := r.Status[idx]
|
|
res.LastUpdate = r.LastUpdate
|
|
res.Code = code
|
|
res.Details = details
|
|
} else {
|
|
r.Status = slices.Insert(r.Status, idx, &common.PackageBuildStatus{
|
|
Package: pkg,
|
|
Code: code,
|
|
Details: details,
|
|
LastUpdate: r.LastUpdate,
|
|
})
|
|
}
|
|
}
|
|
for idx := 0; idx < len(r.Status); {
|
|
if r.Status[idx].LastUpdate == reset_time {
|
|
r.Status = slices.Delete(r.Status, idx, idx+1)
|
|
} else {
|
|
idx++
|
|
}
|
|
}
|
|
}
|
|
|
|
func FindProjectResults(project string) []*common.BuildResult {
|
|
RepoStatusLock.RLock()
|
|
defer RepoStatusLock.RUnlock()
|
|
|
|
ret := make([]*common.BuildResult, 0, 8)
|
|
idx, _ := slices.BinarySearchFunc(RepoStatus, &common.BuildResult{Project: project}, common.BuildResultComp)
|
|
for idx < len(RepoStatus) && RepoStatus[idx].Project == project {
|
|
ret = append(ret, RepoStatus[idx])
|
|
idx++
|
|
}
|
|
return ret
|
|
}
|
|
|
|
func FindRepoResults(project, repo string) []*common.BuildResult {
|
|
RepoStatusLock.RLock()
|
|
defer RepoStatusLock.RUnlock()
|
|
|
|
ret := make([]*common.BuildResult, 0, 8)
|
|
idx, _ := slices.BinarySearchFunc(RepoStatus, &common.BuildResult{Project: project, Repository: repo}, common.BuildResultComp)
|
|
for idx < len(RepoStatus) && RepoStatus[idx].Project == project && RepoStatus[idx].Repository == repo {
|
|
ret = append(ret, RepoStatus[idx])
|
|
idx++
|
|
}
|
|
return ret
|
|
}
|
|
|
|
func FindPackages(pkg string) []string {
|
|
RepoStatusLock.RLock()
|
|
defer RepoStatusLock.RUnlock()
|
|
|
|
data := make([]string, 0, 100)
|
|
for _, repo := range RepoStatus {
|
|
for _, status := range repo.Status {
|
|
if pkg == status.Package {
|
|
entry := repo.Project + "/" + pkg
|
|
if idx, found := slices.BinarySearch(data, entry); !found {
|
|
data = slices.Insert(data, idx, entry)
|
|
if len(data) >= 100 {
|
|
return data
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
return data
|
|
}
|
|
|
|
func FindAndUpdateProjectResults(project string) []*common.BuildResult {
|
|
res := FindProjectResults(project)
|
|
wg := &sync.WaitGroup{}
|
|
now := time.Now()
|
|
for _, r := range res {
|
|
if now.Sub(r.LastUpdate).Abs() < time.Second*10 {
|
|
// 1 update per 10 second for now
|
|
continue
|
|
}
|
|
wg.Add(1)
|
|
go func() {
|
|
UpdateResults(r)
|
|
wg.Done()
|
|
}()
|
|
}
|
|
wg.Wait()
|
|
return res
|
|
}
|
|
|
|
func FindAndUpdateRepoResults(project, repo string) []*common.BuildResult {
|
|
res := FindRepoResults(project, repo)
|
|
wg := &sync.WaitGroup{}
|
|
now := time.Now()
|
|
for _, r := range res {
|
|
if now.Sub(r.LastUpdate).Abs() < time.Second*10 {
|
|
// 1 update per 10 second for now
|
|
continue
|
|
}
|
|
wg.Add(1)
|
|
go func() {
|
|
UpdateResults(r)
|
|
wg.Done()
|
|
}()
|
|
}
|
|
wg.Wait()
|
|
return res
|
|
}
|
|
|
|
func RescanRepositories() error {
|
|
ctx := context.Background()
|
|
var cursor uint64
|
|
var err error
|
|
|
|
common.LogDebug("** starting rescanning ...")
|
|
RepoStatusLock.Lock()
|
|
for _, repo := range RepoStatus {
|
|
repo.Dirty = false
|
|
}
|
|
RepoStatusLock.Unlock()
|
|
var count int
|
|
|
|
projectsLooked := make([]string, 0, 10000)
|
|
|
|
for {
|
|
var data []string
|
|
data, cursor, err = redisClient.ScanType(ctx, cursor, "", 1000, "hash").Result()
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
wg := &sync.WaitGroup{}
|
|
RepoStatusLock.Lock()
|
|
for _, repo := range data {
|
|
r := strings.Split(repo, "/")
|
|
if len(r) != 3 || len(r[0]) < 8 || r[0][0:7] != "result." {
|
|
continue
|
|
}
|
|
d := &common.BuildResult{
|
|
Project: r[0][7:],
|
|
Repository: r[1],
|
|
Arch: r[2],
|
|
}
|
|
|
|
var pos int
|
|
var found bool
|
|
if pos, found = slices.BinarySearchFunc(RepoStatus, d, common.BuildResultComp); found {
|
|
RepoStatus[pos].Dirty = true
|
|
} else {
|
|
d.Dirty = true
|
|
RepoStatus = slices.Insert(RepoStatus, pos, d)
|
|
count++
|
|
}
|
|
|
|
// fetch all keys, one per non-maintenance/non-home: projects, for package search
|
|
if idx, found := slices.BinarySearch(projectsLooked, d.Project); !found && !strings.Contains(d.Project, ":Maintenance:") && (len(d.Project) < 5 || d.Project[0:5] != "home:") {
|
|
projectsLooked = slices.Insert(projectsLooked, idx, d.Project)
|
|
wg.Add(1)
|
|
go func(r *common.BuildResult) {
|
|
updateResultsWithoutLocking(r)
|
|
wg.Done()
|
|
}(RepoStatus[pos])
|
|
}
|
|
}
|
|
wg.Wait()
|
|
RepoStatusLock.Unlock()
|
|
|
|
if cursor == 0 {
|
|
break
|
|
}
|
|
}
|
|
common.LogDebug(" added a total", count, "repos")
|
|
count = 0
|
|
|
|
RepoStatusLock.Lock()
|
|
for i := 0; i < len(RepoStatus); {
|
|
if !RepoStatus[i].Dirty {
|
|
RepoStatus = slices.Delete(RepoStatus, i, i+1)
|
|
count++
|
|
} else {
|
|
i++
|
|
}
|
|
}
|
|
RepoStatusLock.Unlock()
|
|
common.LogDebug(" removed", count, "repos")
|
|
common.LogDebug(" total repos:", len(RepoStatus))
|
|
|
|
return nil
|
|
}
|