Compare commits
1 Commits
main
...
status_ser
Author | SHA256 | Date | |
---|---|---|---|
e8b6066bae |
1
.gitignore
vendored
1
.gitignore
vendored
@@ -3,4 +3,3 @@ node_modules
|
||||
*.obscpio
|
||||
autogits-tmp.tar.zst
|
||||
*.osc
|
||||
*.conf
|
||||
|
11
README.md
11
README.md
@@ -22,15 +22,4 @@ Bugs
|
||||
Report bugs to issue tracker at https://src.opensuse.org/adamm/autogits
|
||||
|
||||
|
||||
Build Status
|
||||
------------
|
||||
|
||||
main branch build status:
|
||||
|
||||

|
||||
|
||||
Devel project build status:
|
||||
|
||||

|
||||
|
||||
|
||||
|
@@ -22,19 +22,16 @@ Release: 0
|
||||
Summary: GitWorkflow utilities
|
||||
License: GPL-2.0-or-later
|
||||
URL: https://src.opensuse.org/adamm/autogits
|
||||
Source1: vendor.tar.zst
|
||||
BuildRequires: golang-packaging
|
||||
BuildRequires: systemd-rpm-macros
|
||||
BuildRequires: go
|
||||
BuildRequires: zstd
|
||||
%{?systemd_ordering}
|
||||
|
||||
%description
|
||||
Git Workflow tooling and utilities enabling automated handing of OBS projects
|
||||
as git repositories
|
||||
|
||||
%package -n hujson
|
||||
Summary: HuJSON to JSON parser
|
||||
|
||||
%description -n hujson
|
||||
HuJSON to JSON parser, using stdin -> stdout pipe
|
||||
|
||||
%package -n gitea-events-rabbitmq-publisher
|
||||
Summary: Publishes Gitea webhook data via RabbitMQ
|
||||
@@ -52,6 +49,13 @@ Summary: Common documentation files
|
||||
Common documentation files
|
||||
|
||||
|
||||
%package -n devel-importer
|
||||
Summary: Imports devel projects from obs to git
|
||||
|
||||
%description -n devel-importer
|
||||
Command-line tool to import devel projects from obs to git
|
||||
|
||||
|
||||
%package -n group-review
|
||||
Summary: Reviews of groups defined in ProjectGit
|
||||
|
||||
@@ -91,40 +95,47 @@ Keeps ProjectGit PR in-sync with a PackageGit PR
|
||||
|
||||
%prep
|
||||
cp -r /home/abuild/rpmbuild/SOURCES/* ./
|
||||
tar x --zstd -f %{SOURCE1}
|
||||
|
||||
%build
|
||||
go build \
|
||||
-C hujson \
|
||||
-C gitea-events-rabbitmq-publisher \
|
||||
-mod=vendor \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C gitea-events-rabbitmq-publisher \
|
||||
-C devel-importer \
|
||||
-mod=vendor \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C group-review \
|
||||
-mod=vendor \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C obs-staging-bot \
|
||||
-mod=vendor \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C obs-status-service \
|
||||
-mod=vendor \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C workflow-direct \
|
||||
-buildmode=pie
|
||||
go build \
|
||||
-C workflow-pr \
|
||||
-buildmode=pie
|
||||
#go build \
|
||||
# -C workflow-direct \
|
||||
# -mod=vendor \
|
||||
# -buildmode=pie
|
||||
#go build \
|
||||
# -C workflow-pr \
|
||||
# -mod=vendor \
|
||||
# -buildmode=pie
|
||||
|
||||
%install
|
||||
install -D -m0755 gitea-events-rabbitmq-publisher/gitea-events-rabbitmq-publisher %{buildroot}%{_bindir}/gitea-events-rabbitmq-publisher
|
||||
install -D -m0644 systemd/gitea-events-rabbitmq-publisher.service %{buildroot}%{_unitdir}/gitea-events-rabbitmq-publisher.service
|
||||
install -D -m0755 devel-importer/devel-importer %{buildroot}%{_bindir}/devel-importer
|
||||
install -D -m0755 group-review/group-review %{buildroot}%{_bindir}/group-review
|
||||
install -D -m0755 obs-staging-bot/obs-staging-bot %{buildroot}%{_bindir}/obs-staging-bot
|
||||
install -D -m0644 systemd/obs-staging-bot.service %{buildroot}%{_unitdir}/obs-staging-bot.service
|
||||
install -D -m0755 obs-status-service/obs-status-service %{buildroot}%{_bindir}/obs-status-service
|
||||
install -D -m0755 workflow-direct/workflow-direct %{buildroot}%{_bindir}/workflow-direct
|
||||
install -D -m0755 workflow-pr/workflow-pr %{buildroot}%{_bindir}/workflow-pr
|
||||
install -D -m0755 hujson/hujson %{buildroot}%{_bindir}/hujson
|
||||
#install -D -m0755 workflow-direct/workflow-direct %{buildroot}%{_bindir}/workflow-direct
|
||||
#install -D -m0755 workflow-pr/workflow-pr %{buildroot}%{_bindir}/workflow-pr
|
||||
|
||||
%pre -n gitea-events-rabbitmq-publisher
|
||||
%service_add_pre gitea-events-rabbitmq-publisher.service
|
||||
@@ -138,18 +149,6 @@ install -D -m0755 hujson/hujson
|
||||
%postun -n gitea-events-rabbitmq-publisher
|
||||
%service_del_postun gitea-events-rabbitmq-publisher.service
|
||||
|
||||
%pre -n obs-staging-bot
|
||||
%service_add_pre obs-staging-bot.service
|
||||
|
||||
%post -n obs-staging-bot
|
||||
%service_add_post obs-staging-bot.service
|
||||
|
||||
%preun -n obs-staging-bot
|
||||
%service_del_preun obs-staging-bot.service
|
||||
|
||||
%postun -n obs-staging-bot
|
||||
%service_del_postun obs-staging-bot.service
|
||||
|
||||
%files -n gitea-events-rabbitmq-publisher
|
||||
%license COPYING
|
||||
%doc gitea-events-rabbitmq-publisher/README.md
|
||||
@@ -161,20 +160,20 @@ install -D -m0755 hujson/hujson
|
||||
%doc doc/README.md
|
||||
%doc doc/workflows.md
|
||||
|
||||
%files -n devel-importer
|
||||
%license COPYING
|
||||
%doc devel-importer/README.md
|
||||
%{_bindir}/devel-importer
|
||||
|
||||
%files -n group-review
|
||||
%license COPYING
|
||||
%doc group-review/README.md
|
||||
%{_bindir}/group-review
|
||||
|
||||
%files -n hujson
|
||||
%license COPYING
|
||||
%{_bindir}/hujson
|
||||
|
||||
%files -n obs-staging-bot
|
||||
%license COPYING
|
||||
%doc obs-staging-bot/README.md
|
||||
%{_bindir}/obs-staging-bot
|
||||
%{_unitdir}/obs-staging-bot.service
|
||||
|
||||
%files -n obs-status-service
|
||||
%license COPYING
|
||||
@@ -184,10 +183,10 @@ install -D -m0755 hujson/hujson
|
||||
%files -n workflow-direct
|
||||
%license COPYING
|
||||
%doc workflow-direct/README.md
|
||||
%{_bindir}/workflow-direct
|
||||
#%{_bindir}/workflow-direct
|
||||
|
||||
%files -n workflow-pr
|
||||
%license COPYING
|
||||
%doc workflow-pr/README.md
|
||||
%{_bindir}/workflow-pr
|
||||
#%{_bindir}/workflow-pr
|
||||
|
||||
|
@@ -11,7 +11,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const PrPattern = "PR: %s/%s!%d"
|
||||
const PrPattern = "PR: %s/%s#%d"
|
||||
|
||||
type BasicPR struct {
|
||||
Org, Repo string
|
||||
@@ -36,14 +36,10 @@ func parsePrLine(line string) (BasicPR, error) {
|
||||
return ret, errors.New("missing / separator")
|
||||
}
|
||||
|
||||
repo := strings.SplitN(org[1], "!", 2)
|
||||
repo := strings.SplitN(org[1], "#", 2)
|
||||
ret.Repo = repo[0]
|
||||
if len(repo) != 2 {
|
||||
repo = strings.SplitN(org[1], "#", 2)
|
||||
ret.Repo = repo[0]
|
||||
}
|
||||
if len(repo) != 2 {
|
||||
return ret, errors.New("Missing ! or # separator")
|
||||
return ret, errors.New("Missing # separator")
|
||||
}
|
||||
|
||||
// Gitea requires that each org and repo be [A-Za-z0-9_-]+
|
||||
|
@@ -34,7 +34,7 @@ func TestAssociatedPRScanner(t *testing.T) {
|
||||
},
|
||||
{
|
||||
"Multiple PRs",
|
||||
"Some header of the issue\n\nFollowed by some description\nPR: test/foo#4\n\nPR: test/goo!5\n",
|
||||
"Some header of the issue\n\nFollowed by some description\nPR: test/foo#4\n\nPR: test/goo#5\n",
|
||||
[]common.BasicPR{
|
||||
{Org: "test", Repo: "foo", Num: 4},
|
||||
{Org: "test", Repo: "goo", Num: 5},
|
||||
@@ -107,7 +107,7 @@ func TestAppendingPRsToDescription(t *testing.T) {
|
||||
[]common.BasicPR{
|
||||
{Org: "a", Repo: "b", Num: 100},
|
||||
},
|
||||
"something\n\nPR: a/b!100",
|
||||
"something\n\nPR: a/b#100",
|
||||
},
|
||||
{
|
||||
"Append multiple PR to end of description",
|
||||
@@ -119,7 +119,7 @@ func TestAppendingPRsToDescription(t *testing.T) {
|
||||
{Org: "b", Repo: "b", Num: 100},
|
||||
{Org: "c", Repo: "b", Num: 100},
|
||||
},
|
||||
"something\n\nPR: a1/b!100\nPR: a1/c!100\nPR: a1/c!101\nPR: b/b!100\nPR: c/b!100",
|
||||
"something\n\nPR: a1/b#100\nPR: a1/c#100\nPR: a1/c#101\nPR: b/b#100\nPR: c/b#100",
|
||||
},
|
||||
{
|
||||
"Append multiple sorted PR to end of description and remove dups",
|
||||
@@ -133,7 +133,7 @@ func TestAppendingPRsToDescription(t *testing.T) {
|
||||
{Org: "a1", Repo: "c", Num: 101},
|
||||
{Org: "a1", Repo: "b", Num: 100},
|
||||
},
|
||||
"something\n\nPR: a1/b!100\nPR: a1/c!100\nPR: a1/c!101\nPR: b/b!100\nPR: c/b!100",
|
||||
"something\n\nPR: a1/b#100\nPR: a1/c#100\nPR: a1/c#101\nPR: b/b#100\nPR: c/b#100",
|
||||
},
|
||||
}
|
||||
|
||||
|
@@ -43,7 +43,6 @@ type ConfigFile struct {
|
||||
|
||||
type ReviewGroup struct {
|
||||
Name string
|
||||
Silent bool // will not request reviews from group members
|
||||
Reviewers []string
|
||||
}
|
||||
|
||||
@@ -58,11 +57,10 @@ type AutogitConfig struct {
|
||||
GitProjectName string // Organization/GitProjectName.git is PrjGit
|
||||
Branch string // branch name of PkgGit that aligns with PrjGit submodules
|
||||
Reviewers []string // only used by `pr` workflow
|
||||
ReviewGroups []*ReviewGroup
|
||||
ReviewGroups []ReviewGroup
|
||||
Committers []string // group in addition to Reviewers and Maintainers that can order the bot around, mostly as helper for factory-maintainers
|
||||
Subdirs []string // list of directories to sort submodules into. Needed b/c _manifest cannot list non-existent directories
|
||||
|
||||
NoProjectGitPR bool // do not automatically create project git PRs, just assign reviewers and assume somethign else creates the ProjectGit PR
|
||||
ManualMergeOnly bool // only merge with "Merge OK" comment by Project Maintainers and/or Package Maintainers and/or reviewers
|
||||
ManualMergeProject bool // require merge of ProjectGit PRs with "Merge OK" by ProjectMaintainers and/or reviewers
|
||||
}
|
||||
@@ -196,19 +194,10 @@ func (config *AutogitConfig) GetReviewGroupMembers(reviewer string) ([]string, e
|
||||
return nil, errors.New("User " + reviewer + " not found as group reviewer for " + config.GitProjectName)
|
||||
}
|
||||
|
||||
func (config *AutogitConfig) GetReviewGroup(reviewer string) (*ReviewGroup, error) {
|
||||
for _, g := range config.ReviewGroups {
|
||||
if g.Name == reviewer {
|
||||
return g, nil
|
||||
}
|
||||
}
|
||||
return nil, errors.New("User " + reviewer + " not found as group reviewer for " + config.GitProjectName)
|
||||
}
|
||||
|
||||
func (config *AutogitConfig) GetPrjGit() (string, string, string) {
|
||||
org := config.Organization
|
||||
repo := DefaultGitPrj
|
||||
branch := ""
|
||||
branch := "master"
|
||||
|
||||
a := strings.Split(config.GitProjectName, "/")
|
||||
if len(a[0]) > 0 {
|
||||
@@ -232,9 +221,6 @@ func (config *AutogitConfig) GetPrjGit() (string, string, string) {
|
||||
}
|
||||
}
|
||||
|
||||
if len(branch) == 0 {
|
||||
panic("branch for project is undefined. Should not happend." + org + "/" + repo)
|
||||
}
|
||||
return org, repo, branch
|
||||
}
|
||||
|
||||
|
@@ -109,7 +109,6 @@ func TestConfigWorkflowParser(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: should test ReadWorkflowConfig as it will always set prjgit completely
|
||||
func TestProjectGitParser(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -120,21 +119,20 @@ func TestProjectGitParser(t *testing.T) {
|
||||
}{
|
||||
{
|
||||
name: "repo only",
|
||||
prjgit: "repo.git#master",
|
||||
prjgit: "repo.git",
|
||||
org: "org",
|
||||
branch: "br",
|
||||
res: [3]string{"org", "repo.git", "master"},
|
||||
},
|
||||
{
|
||||
name: "default",
|
||||
org: "org",
|
||||
prjgit: "org/_ObsPrj#master",
|
||||
res: [3]string{"org", common.DefaultGitPrj, "master"},
|
||||
name: "default",
|
||||
org: "org",
|
||||
res: [3]string{"org", common.DefaultGitPrj, "master"},
|
||||
},
|
||||
{
|
||||
name: "repo with branch",
|
||||
org: "org2",
|
||||
prjgit: "org2/repo.git#somebranch",
|
||||
prjgit: "repo.git#somebranch",
|
||||
res: [3]string{"org2", "repo.git", "somebranch"},
|
||||
},
|
||||
{
|
||||
@@ -151,25 +149,25 @@ func TestProjectGitParser(t *testing.T) {
|
||||
{
|
||||
name: "repo org and empty branch",
|
||||
org: "org3",
|
||||
prjgit: "oorg/foo.bar#master",
|
||||
prjgit: "oorg/foo.bar#",
|
||||
res: [3]string{"oorg", "foo.bar", "master"},
|
||||
},
|
||||
{
|
||||
name: "only branch defined",
|
||||
org: "org3",
|
||||
prjgit: "org3/_ObsPrj#mybranch",
|
||||
prjgit: "#mybranch",
|
||||
res: [3]string{"org3", "_ObsPrj", "mybranch"},
|
||||
},
|
||||
{
|
||||
name: "only org and branch defined",
|
||||
org: "org3",
|
||||
prjgit: "org1/_ObsPrj#mybranch",
|
||||
prjgit: "org1/#mybranch",
|
||||
res: [3]string{"org1", "_ObsPrj", "mybranch"},
|
||||
},
|
||||
{
|
||||
name: "empty org and repo",
|
||||
org: "org3",
|
||||
prjgit: "org3/repo#master",
|
||||
prjgit: "/repo#",
|
||||
res: [3]string{"org3", "repo", "master"},
|
||||
},
|
||||
}
|
||||
|
@@ -67,7 +67,6 @@ type Git interface {
|
||||
GitExecOrPanic(cwd string, params ...string)
|
||||
GitExec(cwd string, params ...string) error
|
||||
GitExecWithOutput(cwd string, params ...string) (string, error)
|
||||
GitExecQuietOrPanic(cwd string, params ...string)
|
||||
|
||||
GitDiffLister
|
||||
}
|
||||
@@ -77,8 +76,7 @@ type GitHandlerImpl struct {
|
||||
GitCommiter string
|
||||
GitEmail string
|
||||
|
||||
lock *sync.Mutex
|
||||
quiet bool
|
||||
lock *sync.Mutex
|
||||
}
|
||||
|
||||
func (s *GitHandlerImpl) GetPath() string {
|
||||
@@ -213,7 +211,7 @@ func (e *GitHandlerImpl) GitClone(repo, branch, remoteUrl string) (string, error
|
||||
return "", fmt.Errorf("Cannot parse remote URL: %w", err)
|
||||
}
|
||||
remoteBranch := "HEAD"
|
||||
if len(branch) == 0 && remoteUrlComp != nil && remoteUrlComp.Commit != "HEAD" {
|
||||
if len(branch) == 0 && remoteUrlComp != nil {
|
||||
branch = remoteUrlComp.Commit
|
||||
remoteBranch = branch
|
||||
} else if len(branch) > 0 {
|
||||
@@ -242,12 +240,12 @@ func (e *GitHandlerImpl) GitClone(repo, branch, remoteUrl string) (string, error
|
||||
|
||||
// check if we have submodule to deinit
|
||||
if list, _ := e.GitSubmoduleList(repo, "HEAD"); len(list) > 0 {
|
||||
e.GitExecQuietOrPanic(repo, "submodule", "deinit", "--all", "--force")
|
||||
e.GitExecOrPanic(repo, "submodule", "deinit", "--all", "--force")
|
||||
}
|
||||
|
||||
e.GitExecOrPanic(repo, "fetch", "--prune", remoteName, remoteBranch)
|
||||
}
|
||||
/*
|
||||
|
||||
refsBytes, err := os.ReadFile(path.Join(e.GitPath, repo, ".git/refs/remotes", remoteName, "HEAD"))
|
||||
if err != nil {
|
||||
LogError("Cannot read HEAD of remote", remoteName)
|
||||
@@ -266,7 +264,7 @@ func (e *GitHandlerImpl) GitClone(repo, branch, remoteUrl string) (string, error
|
||||
LogDebug("remoteRef", remoteRef)
|
||||
LogDebug("branch", branch)
|
||||
}
|
||||
*/
|
||||
|
||||
args := []string{"fetch", "--prune", remoteName, branch}
|
||||
if strings.TrimSpace(e.GitExecWithOutputOrPanic(repo, "rev-parse", "--is-shallow-repository")) == "true" {
|
||||
args = slices.Insert(args, 1, "--unshallow")
|
||||
@@ -276,17 +274,12 @@ func (e *GitHandlerImpl) GitClone(repo, branch, remoteUrl string) (string, error
|
||||
}
|
||||
|
||||
func (e *GitHandlerImpl) GitBranchHead(gitDir, branchName string) (string, error) {
|
||||
id, err := e.GitExecWithOutput(gitDir, "show-ref", "--heads", "--hash", branchName)
|
||||
id, err := e.GitExecWithOutput(gitDir, "show-ref", "--hash", "--verify", "refs/heads/"+branchName)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Can't find default branch: %s", branchName)
|
||||
}
|
||||
|
||||
id = strings.TrimSpace(SplitLines(id)[0])
|
||||
if len(id) < 10 {
|
||||
return "", fmt.Errorf("Can't find branch: %s", branchName)
|
||||
}
|
||||
|
||||
return id, nil
|
||||
return strings.TrimSpace(id), nil
|
||||
}
|
||||
|
||||
func (e *GitHandlerImpl) GitRemoteHead(gitDir, remote, branchName string) (string, error) {
|
||||
@@ -352,7 +345,6 @@ func (e *GitHandlerImpl) GitExecWithOutput(cwd string, params ...string) (string
|
||||
"GIT_COMMITTER_NAME=" + e.GitCommiter,
|
||||
"EMAIL=not@exist@src.opensuse.org",
|
||||
"GIT_LFS_SKIP_SMUDGE=1",
|
||||
"GIT_LFS_SKIP_PUSH=1",
|
||||
"GIT_SSH_COMMAND=/usr/bin/ssh -o StrictHostKeyChecking=yes",
|
||||
}
|
||||
if len(ExtraGitParams) > 0 {
|
||||
@@ -363,9 +355,7 @@ func (e *GitHandlerImpl) GitExecWithOutput(cwd string, params ...string) (string
|
||||
|
||||
LogDebug("git execute @", cwd, ":", cmd.Args)
|
||||
out, err := cmd.CombinedOutput()
|
||||
if !e.quiet {
|
||||
LogDebug(string(out))
|
||||
}
|
||||
LogDebug(string(out))
|
||||
if err != nil {
|
||||
LogError("git", cmd.Args, " error:", err)
|
||||
return "", fmt.Errorf("error executing: git %#v \n%s\n err: %w", cmd.Args, out, err)
|
||||
@@ -374,13 +364,6 @@ func (e *GitHandlerImpl) GitExecWithOutput(cwd string, params ...string) (string
|
||||
return string(out), nil
|
||||
}
|
||||
|
||||
func (e *GitHandlerImpl) GitExecQuietOrPanic(cwd string, params ...string) {
|
||||
e.quiet = true
|
||||
e.GitExecOrPanic(cwd, params...)
|
||||
e.quiet = false
|
||||
return
|
||||
}
|
||||
|
||||
type ChanIO struct {
|
||||
ch chan byte
|
||||
}
|
||||
|
@@ -160,10 +160,6 @@ type GiteaCommitStatusGetter interface {
|
||||
GetCommitStatus(org, repo, hash string) ([]*models.CommitStatus, error)
|
||||
}
|
||||
|
||||
type GiteaMerger interface {
|
||||
ManualMergePR(org, repo string, id int64, commitid string, delBranch bool) error
|
||||
}
|
||||
|
||||
type Gitea interface {
|
||||
GiteaComment
|
||||
GiteaRepoFetcher
|
||||
@@ -172,7 +168,6 @@ type Gitea interface {
|
||||
GiteaReviewer
|
||||
GiteaPRFetcher
|
||||
GiteaPRUpdater
|
||||
GiteaMerger
|
||||
GiteaCommitFetcher
|
||||
GiteaReviewFetcher
|
||||
GiteaCommentFetcher
|
||||
@@ -238,11 +233,6 @@ func (gitea *GiteaTransport) GetPullRequest(org, project string, num int64) (*mo
|
||||
gitea.transport.DefaultAuthentication,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
LogError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pr.Payload, err
|
||||
}
|
||||
|
||||
@@ -256,36 +246,9 @@ func (gitea *GiteaTransport) UpdatePullRequest(org, repo string, num int64, opti
|
||||
gitea.transport.DefaultAuthentication,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
LogError(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return pr.Payload, err
|
||||
}
|
||||
|
||||
func (gitea *GiteaTransport) ManualMergePR(org, repo string, num int64, commitid string, delBranch bool) error {
|
||||
manual_merge := "manually-merged"
|
||||
_, err := gitea.client.Repository.RepoMergePullRequest(
|
||||
repository.NewRepoMergePullRequestParams().
|
||||
WithOwner(org).
|
||||
WithRepo(repo).
|
||||
WithIndex(num).
|
||||
WithBody(&models.MergePullRequestForm{
|
||||
Do: &manual_merge,
|
||||
DeleteBranchAfterMerge: delBranch,
|
||||
HeadCommitID: commitid,
|
||||
}), gitea.transport.DefaultAuthentication,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
LogError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gitea *GiteaTransport) GetPullRequests(org, repo string) ([]*models.PullRequest, error) {
|
||||
var page, limit int64
|
||||
|
||||
@@ -647,11 +610,7 @@ func (gitea *GiteaTransport) CreatePullRequestIfNotExist(repo *models.Repository
|
||||
}
|
||||
|
||||
if pr, err := gitea.client.Repository.RepoGetPullRequestByBaseHead(
|
||||
repository.NewRepoGetPullRequestByBaseHeadParams().
|
||||
WithOwner(repo.Owner.UserName).
|
||||
WithRepo(repo.Name).
|
||||
WithBase(targetId).
|
||||
WithHead(srcId),
|
||||
repository.NewRepoGetPullRequestByBaseHeadParams().WithOwner(repo.Owner.UserName).WithRepo(repo.Name).WithBase(targetId).WithHead(srcId),
|
||||
gitea.transport.DefaultAuthentication,
|
||||
); err == nil {
|
||||
return pr.Payload, nil
|
||||
@@ -782,13 +741,10 @@ func (gitea *GiteaTransport) GetTimeline(org, repo string, idx int64) ([]*models
|
||||
LogDebug("page:", page, "len:", resCount)
|
||||
page++
|
||||
|
||||
for _, d := range res.Payload {
|
||||
if d != nil {
|
||||
retData = append(retData, d)
|
||||
}
|
||||
}
|
||||
retData = append(retData, res.Payload...)
|
||||
}
|
||||
LogDebug("total results:", len(retData))
|
||||
|
||||
slices.SortFunc(retData, func(a, b *models.TimelineComment) int {
|
||||
return time.Time(b.Created).Compare(time.Time(a.Created))
|
||||
})
|
||||
|
@@ -63,10 +63,6 @@ func SetLoggingLevel(ll LogLevel) {
|
||||
logLevel = ll
|
||||
}
|
||||
|
||||
func GetLoggingLevel() LogLevel {
|
||||
return logLevel
|
||||
}
|
||||
|
||||
func SetLoggingLevelFromString(ll string) error {
|
||||
switch ll {
|
||||
case "info":
|
||||
|
@@ -584,35 +584,22 @@ type PackageBuildStatus struct {
|
||||
Code string `xml:"code,attr"`
|
||||
Details string `xml:"details"`
|
||||
|
||||
LastUpdate time.Time
|
||||
}
|
||||
|
||||
func PackageBuildStatusComp(A, B *PackageBuildStatus) int {
|
||||
return strings.Compare(A.Package, B.Package)
|
||||
LastUpdate int64
|
||||
}
|
||||
|
||||
type BuildResult struct {
|
||||
Project string `xml:"project,attr"`
|
||||
Repository string `xml:"repository,attr"`
|
||||
Arch string `xml:"arch,attr"`
|
||||
Code string `xml:"code,attr"`
|
||||
Dirty bool `xml:"dirty,attr"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
ScmInfo string `xml:"scminfo"`
|
||||
Status []*PackageBuildStatus `xml:"status"`
|
||||
Binaries []BinaryList `xml:"binarylist"`
|
||||
Project string `xml:"project,attr"`
|
||||
Repository string `xml:"repository,attr"`
|
||||
Arch string `xml:"arch,attr"`
|
||||
|
||||
LastUpdate time.Time
|
||||
}
|
||||
Code string `xml:"code,attr"`
|
||||
Dirty bool `xml:"dirty,attr"`
|
||||
ScmSync string `xml:"scmsync"`
|
||||
ScmInfo string `xml:"scminfo"`
|
||||
Status []PackageBuildStatus `xml:"status"`
|
||||
Binaries []BinaryList `xml:"binarylist"`
|
||||
|
||||
func BuildResultComp(A, B *BuildResult) int {
|
||||
if cmp := strings.Compare(A.Project, B.Project); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
if cmp := strings.Compare(A.Repository, B.Repository); cmp != 0 {
|
||||
return cmp
|
||||
}
|
||||
return strings.Compare(A.Arch, B.Arch)
|
||||
LastUpdate int64
|
||||
}
|
||||
|
||||
type Binary struct {
|
||||
@@ -629,9 +616,10 @@ type BinaryList struct {
|
||||
type BuildResultList struct {
|
||||
XMLName xml.Name `xml:"resultlist"`
|
||||
State string `xml:"state,attr"`
|
||||
Result []*BuildResult `xml:"result"`
|
||||
Result []BuildResult `xml:"result"`
|
||||
|
||||
isLastBuild bool
|
||||
LastUpdate int64
|
||||
}
|
||||
|
||||
func (r *BuildResultList) GetPackageList() []string {
|
||||
@@ -654,6 +642,48 @@ func (r *BuildResultList) GetPackageList() []string {
|
||||
return pkgList
|
||||
}
|
||||
|
||||
func packageSort(A, B PackageBuildStatus) int {
|
||||
return strings.Compare(A.Package, B.Package)
|
||||
}
|
||||
|
||||
func repoSort(A, B BuildResult) int {
|
||||
eq := strings.Compare(A.Project, B.Project)
|
||||
if eq == 0 {
|
||||
eq = strings.Compare(A.Repository, B.Repository)
|
||||
if eq == 0 {
|
||||
eq = strings.Compare(A.Arch, B.Arch)
|
||||
}
|
||||
}
|
||||
return eq
|
||||
}
|
||||
|
||||
func (r *BuildResultList) MergePackageState(now int64, pkgState *BuildResultList) {
|
||||
for _, nr := range pkgState.Result {
|
||||
idx, found := slices.BinarySearchFunc(r.Result, nr, repoSort)
|
||||
// not found, new repo?
|
||||
if !found {
|
||||
nr.LastUpdate = now
|
||||
r.Result = slices.Insert(r.Result, idx, nr)
|
||||
continue
|
||||
}
|
||||
|
||||
// update current repo
|
||||
repo := &r.Result[idx]
|
||||
|
||||
// update all the packages in the repo
|
||||
for _, p := range nr.Status {
|
||||
p.LastUpdate = now
|
||||
idx, found := slices.BinarySearchFunc(repo.Status, p, packageSort)
|
||||
if !found {
|
||||
repo.Status = slices.Insert(repo.Status, idx, p)
|
||||
continue
|
||||
}
|
||||
|
||||
repo.Status[idx] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BuildResultList) BuildResultSummary() (success, finished bool) {
|
||||
if r == nil {
|
||||
return false, false
|
||||
@@ -921,5 +951,11 @@ func (c *ObsClient) BuildStatusWithState(project string, opts *BuildResultOption
|
||||
if ret != nil {
|
||||
ret.isLastBuild = opts.LastBuild
|
||||
}
|
||||
|
||||
slices.SortFunc(ret.Result, repoSort)
|
||||
for _, r := range ret.Result {
|
||||
slices.SortFunc(r.Status, packageSort)
|
||||
}
|
||||
|
||||
return ret, err
|
||||
}
|
||||
|
31
common/pr.go
31
common/pr.go
@@ -63,6 +63,7 @@ func readPRData(gitea GiteaPRFetcher, pr *models.PullRequest, currentSet []*PRIn
|
||||
var Timeline_RefIssueNotFound error = errors.New("RefIssue not found on the timeline")
|
||||
|
||||
func LastPrjGitRefOnTimeline(gitea GiteaPRTimelineFetcher, org, repo string, num int64, prjGitOrg, prjGitRepo string) (*models.PullRequest, error) {
|
||||
prRefLine := fmt.Sprintf(PrPattern, org, repo, num)
|
||||
timeline, err := gitea.GetTimeline(org, repo, num)
|
||||
if err != nil {
|
||||
LogError("Failed to fetch timeline for", org, repo, "#", num, err)
|
||||
@@ -78,9 +79,9 @@ func LastPrjGitRefOnTimeline(gitea GiteaPRTimelineFetcher, org, repo string, num
|
||||
issue.Repository.Owner == prjGitOrg &&
|
||||
issue.Repository.Name == prjGitRepo {
|
||||
|
||||
_, prs := ExtractDescriptionAndPRs(bufio.NewScanner(strings.NewReader(item.RefIssue.Body)))
|
||||
for _, pr := range prs {
|
||||
if pr.Org == org && pr.Repo == repo && pr.Num == num {
|
||||
lines := SplitLines(item.RefIssue.Body)
|
||||
for _, line := range lines {
|
||||
if strings.TrimSpace(line) == prRefLine {
|
||||
LogDebug("Found PrjGit PR in Timeline:", issue.Index)
|
||||
|
||||
// found prjgit PR in timeline. Return it
|
||||
@@ -272,7 +273,7 @@ func (rs *PRSet) AssignReviewers(gitea GiteaReviewFetcherAndRequester, maintaine
|
||||
if !IsDryRun {
|
||||
for _, r := range reviewers {
|
||||
if _, err := gitea.RequestReviews(pr.PR, r); err != nil {
|
||||
LogError("Cannot create reviews on", fmt.Sprintf("%s/%s!%d for [%s]", pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index, strings.Join(reviewers, ", ")), err)
|
||||
LogError("Cannot create reviews on", fmt.Sprintf("%s/%s#%d for [%s]", pr.PR.Base.Repo.Owner.UserName, pr.PR.Base.Repo.Name, pr.PR.Index, strings.Join(reviewers, ", ")), err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -281,12 +282,6 @@ func (rs *PRSet) AssignReviewers(gitea GiteaReviewFetcherAndRequester, maintaine
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rs *PRSet) RemoveClosedPRs() {
|
||||
rs.PRs = slices.DeleteFunc(rs.PRs, func(pr *PRInfo) bool {
|
||||
return pr.PR.State != "open"
|
||||
})
|
||||
}
|
||||
|
||||
func (rs *PRSet) IsApproved(gitea GiteaPRChecker, maintainers MaintainershipData) bool {
|
||||
configReviewers := ParseReviewers(rs.Config.Reviewers)
|
||||
|
||||
@@ -385,8 +380,7 @@ func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
}
|
||||
prjgit := prjgit_info.PR
|
||||
|
||||
_, _, prjgitBranch := rs.Config.GetPrjGit()
|
||||
remote, err := git.GitClone(DefaultGitPrj, prjgitBranch, prjgit.Base.Repo.SSHURL)
|
||||
remote, err := git.GitClone(DefaultGitPrj, rs.Config.Branch, prjgit.Base.Repo.SSHURL)
|
||||
PanicOnError(err)
|
||||
git.GitExecOrPanic(DefaultGitPrj, "fetch", remote, prjgit.Head.Sha)
|
||||
|
||||
@@ -403,7 +397,7 @@ func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
panic("FIXME")
|
||||
}
|
||||
*/
|
||||
msg := fmt.Sprintf("Merging\n\nPR: %s/%s!%d", prjgit.Base.Repo.Owner.UserName, prjgit.Base.Repo.Name, prjgit.Index)
|
||||
msg := fmt.Sprintf("Merging\n\nPR: %s/%s#%d", prjgit.Base.Repo.Owner.UserName, prjgit.Base.Repo.Name, prjgit.Index)
|
||||
|
||||
err = git.GitExec(DefaultGitPrj, "merge", "--no-ff", "-m", msg, prjgit.Head.Sha)
|
||||
if err != nil {
|
||||
@@ -415,7 +409,6 @@ func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
// we can only resolve conflicts with .gitmodules
|
||||
for _, s := range status {
|
||||
if s.Status == GitStatus_Unmerged {
|
||||
panic("Can't handle conflicts yet")
|
||||
if s.Path != ".gitmodules" {
|
||||
return err
|
||||
}
|
||||
@@ -506,15 +499,7 @@ func (rs *PRSet) Merge(gitea GiteaReviewUnrequester, git Git) error {
|
||||
if rs.IsPrjGitPR(prinfo.PR) {
|
||||
continue
|
||||
}
|
||||
br := rs.Config.Branch
|
||||
if len(br) == 0 {
|
||||
// if branch is unspecified, take it from the PR as it
|
||||
// matches default branch already
|
||||
br = prinfo.PR.Base.Name
|
||||
} else if br != prinfo.PR.Base.Name {
|
||||
panic(prinfo.PR.Base.Name + " is expected to match " + br)
|
||||
}
|
||||
prinfo.RemoteName, err = git.GitClone(repo.Name, br, repo.SSHURL)
|
||||
prinfo.RemoteName, err = git.GitClone(repo.Name, rs.Config.Branch, repo.SSHURL)
|
||||
PanicOnError(err)
|
||||
git.GitExecOrPanic(repo.Name, "fetch", prinfo.RemoteName, head.Sha)
|
||||
git.GitExecOrPanic(repo.Name, "merge", "--ff", head.Sha)
|
||||
|
@@ -48,13 +48,11 @@ func reviewsToTimeline(reviews []*models.PullReview) []*models.TimelineComment {
|
||||
}
|
||||
|
||||
func TestPR(t *testing.T) {
|
||||
return
|
||||
|
||||
baseConfig := common.AutogitConfig{
|
||||
Reviewers: []string{"+super1", "*super2", "m1", "-m2"},
|
||||
Branch: "branch",
|
||||
Organization: "foo",
|
||||
GitProjectName: "foo/barPrj#master",
|
||||
GitProjectName: "barPrj",
|
||||
}
|
||||
|
||||
type prdata struct {
|
||||
@@ -640,7 +638,7 @@ func TestPRAssignReviewers(t *testing.T) {
|
||||
{
|
||||
name: "No reviewers",
|
||||
config: common.AutogitConfig{
|
||||
GitProjectName: "prg/repo#main",
|
||||
GitProjectName: "repo",
|
||||
Organization: "org",
|
||||
Branch: "main",
|
||||
Reviewers: []string{},
|
||||
@@ -650,7 +648,7 @@ func TestPRAssignReviewers(t *testing.T) {
|
||||
{
|
||||
name: "One project reviewer only",
|
||||
config: common.AutogitConfig{
|
||||
GitProjectName: "prg/repo#main",
|
||||
GitProjectName: "repo",
|
||||
Organization: "org",
|
||||
Branch: "main",
|
||||
Reviewers: []string{"-user1"},
|
||||
@@ -660,7 +658,7 @@ func TestPRAssignReviewers(t *testing.T) {
|
||||
{
|
||||
name: "One project reviewer and one pkg reviewer only",
|
||||
config: common.AutogitConfig{
|
||||
GitProjectName: "prg/repo#main",
|
||||
GitProjectName: "repo",
|
||||
Organization: "org",
|
||||
Branch: "main",
|
||||
Reviewers: []string{"-user1", "user2"},
|
||||
@@ -670,7 +668,7 @@ func TestPRAssignReviewers(t *testing.T) {
|
||||
{
|
||||
name: "No need to get reviews of submitter",
|
||||
config: common.AutogitConfig{
|
||||
GitProjectName: "prg/repo#main",
|
||||
GitProjectName: "repo",
|
||||
Organization: "org",
|
||||
Branch: "main",
|
||||
Reviewers: []string{"-user1", "submitter"},
|
||||
@@ -680,7 +678,7 @@ func TestPRAssignReviewers(t *testing.T) {
|
||||
{
|
||||
name: "Reviews are done",
|
||||
config: common.AutogitConfig{
|
||||
GitProjectName: "prg/repo#main",
|
||||
GitProjectName: "repo",
|
||||
Organization: "org",
|
||||
Branch: "main",
|
||||
Reviewers: []string{"-user1", "user2"},
|
||||
@@ -714,7 +712,7 @@ func TestPRAssignReviewers(t *testing.T) {
|
||||
{
|
||||
name: "Stale review is not done, re-request it",
|
||||
config: common.AutogitConfig{
|
||||
GitProjectName: "org/repo#main",
|
||||
GitProjectName: "repo",
|
||||
Organization: "org",
|
||||
Branch: "main",
|
||||
Reviewers: []string{"-user1", "user2"},
|
||||
@@ -746,7 +744,7 @@ func TestPRAssignReviewers(t *testing.T) {
|
||||
{
|
||||
name: "Stale optional review is not done, re-request it",
|
||||
config: common.AutogitConfig{
|
||||
GitProjectName: "prg/repo#main",
|
||||
GitProjectName: "repo",
|
||||
Organization: "org",
|
||||
Branch: "main",
|
||||
Reviewers: []string{"-user1", "user2", "~bot"},
|
||||
@@ -854,7 +852,7 @@ func TestPRAssignReviewers(t *testing.T) {
|
||||
{
|
||||
name: "PrjMaintainers in prjgit review when not part of pkg set",
|
||||
config: common.AutogitConfig{
|
||||
GitProjectName: "org/repo#main",
|
||||
GitProjectName: "repo",
|
||||
Organization: "org",
|
||||
Branch: "main",
|
||||
Reviewers: []string{},
|
||||
@@ -925,7 +923,7 @@ func TestPRMerge(t *testing.T) {
|
||||
|
||||
config := &common.AutogitConfig{
|
||||
Organization: "org",
|
||||
GitProjectName: "org/prj#master",
|
||||
GitProjectName: "prj",
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
|
@@ -86,38 +86,38 @@ func (l *RabbitConnection) ProcessRabbitMQ(msgCh chan<- RabbitMessage) error {
|
||||
}
|
||||
defer connection.Close()
|
||||
|
||||
l.ch, err = connection.Channel()
|
||||
ch, err := connection.Channel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot create a channel. Err: %w", err)
|
||||
}
|
||||
defer l.ch.Close()
|
||||
defer ch.Close()
|
||||
|
||||
if err = l.ch.ExchangeDeclarePassive("pubsub", "topic", true, false, false, false, nil); err != nil {
|
||||
if err = ch.ExchangeDeclarePassive("pubsub", "topic", true, false, false, false, nil); err != nil {
|
||||
return fmt.Errorf("Cannot find pubsub exchange? Err: %w", err)
|
||||
}
|
||||
|
||||
var q rabbitmq.Queue
|
||||
if len(queueName) == 0 {
|
||||
q, err = l.ch.QueueDeclare("", false, true, true, false, nil)
|
||||
q, err = ch.QueueDeclare("", false, true, true, false, nil)
|
||||
} else {
|
||||
q, err = l.ch.QueueDeclarePassive(queueName, true, false, true, false, nil)
|
||||
q, err = ch.QueueDeclarePassive(queueName, true, false, true, false, nil)
|
||||
if err != nil {
|
||||
LogInfo("queue not found .. trying to create it:", err)
|
||||
if l.ch.IsClosed() {
|
||||
l.ch, err = connection.Channel()
|
||||
if ch.IsClosed() {
|
||||
ch, err = connection.Channel()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Channel cannot be re-opened. Err: %w", err)
|
||||
}
|
||||
}
|
||||
q, err = l.ch.QueueDeclare(queueName, true, false, true, false, nil)
|
||||
q, err = ch.QueueDeclare(queueName, true, false, true, false, nil)
|
||||
|
||||
if err != nil {
|
||||
LogInfo("can't create persistent queue ... falling back to temporaty queue:", err)
|
||||
if l.ch.IsClosed() {
|
||||
l.ch, err = connection.Channel()
|
||||
if ch.IsClosed() {
|
||||
ch, err = connection.Channel()
|
||||
return fmt.Errorf("Channel cannot be re-opened. Err: %w", err)
|
||||
}
|
||||
q, err = l.ch.QueueDeclare("", false, true, true, false, nil)
|
||||
q, err = ch.QueueDeclare("", false, true, true, false, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ func (l *RabbitConnection) ProcessRabbitMQ(msgCh chan<- RabbitMessage) error {
|
||||
l.topicSubChanges <- "+" + topic
|
||||
}
|
||||
|
||||
msgs, err := l.ch.Consume(q.Name, "", true, true, false, false, nil)
|
||||
msgs, err := ch.Consume(q.Name, "", true, true, false, false, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Cannot start consumer. Err: %w", err)
|
||||
}
|
||||
@@ -164,7 +164,7 @@ func (l *RabbitConnection) ConnectAndProcessRabbitMQ(ch chan<- RabbitMessage) {
|
||||
for {
|
||||
err := l.ProcessRabbitMQ(ch)
|
||||
if err != nil {
|
||||
LogError("Error in RabbitMQ connection:", err)
|
||||
LogError("Error in RabbitMQ connection. %#v", err)
|
||||
LogInfo("Reconnecting in 2 seconds...")
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
@@ -25,28 +25,30 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
const RequestType_CreateBrachTag = "create"
|
||||
const RequestType_DeleteBranchTag = "delete"
|
||||
const RequestType_Fork = "fork"
|
||||
const RequestType_Issue = "issues"
|
||||
const RequestType_IssueAssign = "issue_assign"
|
||||
const RequestType_IssueComment = "issue_comment"
|
||||
const RequestType_IssueLabel = "issue_label"
|
||||
const RequestType_IssueMilestone = "issue_milestone"
|
||||
const RequestType_Push = "push"
|
||||
const RequestType_Repository = "repository"
|
||||
const RequestType_Release = "release"
|
||||
const RequestType_PR = "pull_request"
|
||||
const RequestType_PRAssign = "pull_request_assign"
|
||||
const RequestType_PRLabel = "pull_request_label"
|
||||
const RequestType_PRComment = "pull_request_comment"
|
||||
const RequestType_PRMilestone = "pull_request_milestone"
|
||||
const RequestType_PRSync = "pull_request_sync"
|
||||
const RequestType_PRReviewAccepted = "pull_request_review_approved"
|
||||
const RequestType_PRReviewRejected = "pull_request_review_rejected"
|
||||
const RequestType_PRReviewRequest = "pull_request_review_request"
|
||||
const RequestType_PRReviewComment = "pull_request_review_comment"
|
||||
const RequestType_Wiki = "wiki"
|
||||
const (
|
||||
RequestType_CreateBrachTag = "create"
|
||||
RequestType_DeleteBranchTag = "delete"
|
||||
RequestType_Fork = "fork"
|
||||
RequestType_Issue = "issues"
|
||||
RequestType_IssueAssign = "issue_assign"
|
||||
RequestType_IssueComment = "issue_comment"
|
||||
RequestType_IssueLabel = "issue_label"
|
||||
RequestType_IssueMilestone = "issue_milestone"
|
||||
RequestType_Push = "push"
|
||||
RequestType_Repository = "repository"
|
||||
RequestType_Release = "release"
|
||||
RequestType_PR = "pull_request"
|
||||
RequestType_PRAssign = "pull_request_assign"
|
||||
RequestType_PRLabel = "pull_request_label"
|
||||
RequestType_PRComment = "pull_request_comment"
|
||||
RequestType_PRMilestone = "pull_request_milestone"
|
||||
RequestType_PRSync = "pull_request_sync"
|
||||
RequestType_PRReviewAccepted = "pull_request_review_approved"
|
||||
RequestType_PRReviewRejected = "pull_request_review_rejected"
|
||||
RequestType_PRReviewRequest = "pull_request_review_request"
|
||||
RequestType_PRReviewComment = "pull_request_review_comment"
|
||||
RequestType_Wiki = "wiki"
|
||||
)
|
||||
|
||||
type RequestProcessor interface {
|
||||
ProcessFunc(*Request) error
|
||||
@@ -99,12 +101,12 @@ func (gitea *RabbitMQGiteaEventsProcessor) ProcessRabbitMessage(msg RabbitMessag
|
||||
req, err := ParseRequestJSON(reqType, msg.Body)
|
||||
if err != nil {
|
||||
LogError("Error parsing request JSON:", err)
|
||||
return nil
|
||||
} else {
|
||||
LogDebug("processing req", req.Type)
|
||||
// h.Request = req
|
||||
ProcessEvent(handler, req)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@@ -1,11 +1,99 @@
|
||||
package common
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
ObsMessageType_PackageBuildFail = "package.build_fail"
|
||||
ObsMessageType_PackageBuildSuccess = "package.build_success"
|
||||
ObsMessageType_PackageBuildUnchanged = "package.build_unchanged"
|
||||
|
||||
ObsMessageType_RepoBuildFinished = "repo.build_finished"
|
||||
ObsMessageType_RepoBuildStarted = "repo.build_started"
|
||||
)
|
||||
|
||||
type BuildResultMsg struct {
|
||||
Status string
|
||||
Project string `json:"project"`
|
||||
Package string `json:"package"`
|
||||
Repo string `json:"repository"`
|
||||
Arch string `json:"arch"`
|
||||
|
||||
StartTime int32 `json:"starttime"`
|
||||
EndTime int32 `json:"endtime"`
|
||||
WorkerID string `json:"workerid"`
|
||||
Version string `json:"versrel"`
|
||||
Build string `json:"buildtype"`
|
||||
}
|
||||
|
||||
type RepoBuildMsg struct {
|
||||
Status string
|
||||
Project string `json:"project"`
|
||||
Repo string `json:"repo"`
|
||||
Arch string `json:"arch"`
|
||||
BuildId string `json:"buildid"`
|
||||
}
|
||||
|
||||
var ObsRabbitMessageError_UnknownMessageType error = errors.New("Unknown message type")
|
||||
var ObsRabbitMessageError_ParseError error = errors.New("JSON parsing error")
|
||||
|
||||
func ParseObsRabbitMessaege(ObsMessageType string, data []byte) (interface{}, error) {
|
||||
unmarshall := func(data []byte, v any) (interface{}, error) {
|
||||
if err := json.Unmarshal(data, v); err != nil {
|
||||
return nil, fmt.Errorf("%w: %s", ObsRabbitMessageError_ParseError, err)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
switch ObsMessageType {
|
||||
case ObsMessageType_PackageBuildSuccess, ObsMessageType_PackageBuildUnchanged:
|
||||
ret := &BuildResultMsg{Status: "succeeded"}
|
||||
return unmarshall(data, ret)
|
||||
case ObsMessageType_PackageBuildFail:
|
||||
ret := &BuildResultMsg{Status: "failed"}
|
||||
return unmarshall(data, ret)
|
||||
case ObsMessageType_RepoBuildFinished:
|
||||
ret := &RepoBuildMsg{Status: "finished"}
|
||||
return unmarshall(data, ret)
|
||||
case ObsMessageType_RepoBuildStarted:
|
||||
ret := &RepoBuildMsg{Status: "building"}
|
||||
return unmarshall(data, ret)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("%w: %s", ObsRabbitMessageError_UnknownMessageType, ObsMessageType)
|
||||
}
|
||||
|
||||
type ObsMessageProcessor func(topic string, data []byte) error
|
||||
|
||||
type RabbitMQObsBuildStatusProcessor struct {
|
||||
Handlers map[string]ObsMessageProcessor
|
||||
|
||||
c *RabbitConnection
|
||||
}
|
||||
|
||||
func (o *RabbitMQObsBuildStatusProcessor) routingKeyPrefix() string {
|
||||
if strings.HasSuffix(o.c.RabbitURL.Hostname(), "opensuse.org") {
|
||||
return "opensuse"
|
||||
}
|
||||
|
||||
return "suse"
|
||||
}
|
||||
|
||||
func (o *RabbitMQObsBuildStatusProcessor) GenerateTopics() []string {
|
||||
return []string{}
|
||||
prefix := o.routingKeyPrefix()
|
||||
msgs := make([]string, len(o.Handlers))
|
||||
idx := 0
|
||||
for k, _ := range o.Handlers {
|
||||
msgs[idx] = prefix + ".obs." + k
|
||||
idx++
|
||||
}
|
||||
slices.Sort(msgs)
|
||||
return msgs
|
||||
}
|
||||
|
||||
func (o *RabbitMQObsBuildStatusProcessor) Connection() *RabbitConnection {
|
||||
@@ -17,6 +105,11 @@ func (o *RabbitMQObsBuildStatusProcessor) Connection() *RabbitConnection {
|
||||
}
|
||||
|
||||
func (o *RabbitMQObsBuildStatusProcessor) ProcessRabbitMessage(msg RabbitMessage) error {
|
||||
return nil
|
||||
}
|
||||
prefix := o.routingKeyPrefix() + ".obs."
|
||||
topic := strings.TrimPrefix(msg.RoutingKey, prefix)
|
||||
if h, ok := o.Handlers[topic]; ok {
|
||||
return h(topic, msg.Body)
|
||||
}
|
||||
|
||||
return fmt.Errorf("Unhandled message received: %s", msg.RoutingKey)
|
||||
}
|
||||
|
@@ -132,7 +132,7 @@ func PRtoString(pr *models.PullRequest) string {
|
||||
return "(null)"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s/%s!%d", pr.Base.Repo.Owner.UserName, pr.Base.Repo.Name, pr.Index)
|
||||
return fmt.Sprintf("%s/%s#%d", pr.Base.Repo.Owner.UserName, pr.Base.Repo.Name, pr.Index)
|
||||
}
|
||||
|
||||
type DevelProject struct {
|
||||
@@ -173,4 +173,3 @@ func (d DevelProjects) GetDevelProject(pkg string) (string, error) {
|
||||
|
||||
return "", DevelProjectNotFound
|
||||
}
|
||||
|
||||
|
@@ -1,8 +0,0 @@
|
||||
all: ../workflow-direct/workflow-direct
|
||||
cp ../workflow-direct/workflow-direct workflow-direct
|
||||
podman build --pull=always -t workflow-direct workflow-direct
|
||||
|
||||
pr:
|
||||
cp ../workflow-pr/workflow-pr workflow-pr
|
||||
podman build --pull=always -t workflow-pr workflow-pr
|
||||
|
1
containers/workflow-direct/.gitignore
vendored
1
containers/workflow-direct/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
workflow-direct
|
@@ -1,14 +0,0 @@
|
||||
FROM registry.suse.com/bci/bci-base
|
||||
RUN zypper install -y openssh-clients git-core
|
||||
RUN mkdir /root/.ssh
|
||||
RUN mkdir /repos
|
||||
RUN ln -s /data/workflow-direct.key /root/.ssh/id_ed25519
|
||||
RUN ln -s /data/workflow-direct.key.pub /root/.ssh/id_ed25519.pub
|
||||
ADD known_hosts /root/.ssh/known_hosts
|
||||
ADD workflow-direct /srv/workflow-direct
|
||||
ENV AMQP_USERNAME=opensuse
|
||||
ENV AMQP_PASSWORD=opensuse
|
||||
VOLUME /data
|
||||
VOLUME /repos
|
||||
ENTRYPOINT /srv/workflow-direct -config /data/config.json -repo-path /repos -debug -check-on-start
|
||||
|
@@ -1,4 +0,0 @@
|
||||
src.opensuse.org,195.135.223.224 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDJ8V51MVIFUkQqQOdHwC3SP9NPqp1ZWYoEbcjvZ7HhSFi2XF8ALo/h1Mk+q8kT2O75/goeTsKFbcU8zrYFeOh0=
|
||||
src.opensuse.org,195.135.223.224 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCkVeXePin0haffC085V2L0jvILfwbB2Mt1fpVe21QAOcWNM+/jOC5RwtWweV/LigHImB39/KvkuPa9yLoDf+eLhdZQckSSauRfDjxtlKeFLPrfJKSA0XeVJT3kJcOvDT/3ANFhYeBbAUBTAeQt5bi2hHC1twMPbaaEdJ2jiMaIBztFf6aE9K58uoS+7Y2tTv87Mv/7lqoBW6BFMoDmjQFWgjik6ZMCvIM/7bj7AgqHk/rjmr5zKS4ag5wtHtYLm1L3LBmHdj7d0VFsOpPQexIOEnnjzKqlwmAxT6eYJ/t3qgBlT8KRfshBFgEuUZ5GJOC7TOne4PfB0bboPMZzIRo3WE9dPGRR8kAIme8XqhFbmjdJ+WsTjg0Lj+415tIbyRQoNkLtawrJxozvevs6wFEFcA/YG6o03Z577tiLT3WxOguCcD5vrALH48SyZb8jDUtcVgTWMW0to/n63S8JGUNyF7Bkw9HQWUx+GO1cv2GNzKpk22KS5dlNUVGE9E/7Ydc=
|
||||
src.opensuse.org,195.135.223.224 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFKNThLRPznU5Io1KrAYHmYpaoLQEMGM9nwpKyYQCkPx
|
||||
|
1
containers/workflow-pr/.gitignore
vendored
1
containers/workflow-pr/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
workflow-pr
|
@@ -1,14 +0,0 @@
|
||||
FROM registry.suse.com/bci/bci-base
|
||||
RUN zypper install -y openssh-clients git-core
|
||||
RUN mkdir /root/.ssh
|
||||
RUN mkdir /repos
|
||||
RUN ln -s /data/workflow-pr.key /root/.ssh/id_ed25519
|
||||
RUN ln -s /data/workflow-pr.key.pub /root/.ssh/id_ed25519.pub
|
||||
ADD known_hosts /root/.ssh/known_hosts
|
||||
ADD workflow-pr /srv/workflow-pr
|
||||
ENV AMQP_USERNAME=opensuse
|
||||
ENV AMQP_PASSWORD=opensuse
|
||||
VOLUME /data
|
||||
VOLUME /repos
|
||||
ENTRYPOINT /srv/workflow-pr -config /data/config.json -repo-path /repos -debug -check-on-start
|
||||
|
@@ -1,4 +0,0 @@
|
||||
src.opensuse.org,195.135.223.224 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBDJ8V51MVIFUkQqQOdHwC3SP9NPqp1ZWYoEbcjvZ7HhSFi2XF8ALo/h1Mk+q8kT2O75/goeTsKFbcU8zrYFeOh0=
|
||||
src.opensuse.org,195.135.223.224 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCkVeXePin0haffC085V2L0jvILfwbB2Mt1fpVe21QAOcWNM+/jOC5RwtWweV/LigHImB39/KvkuPa9yLoDf+eLhdZQckSSauRfDjxtlKeFLPrfJKSA0XeVJT3kJcOvDT/3ANFhYeBbAUBTAeQt5bi2hHC1twMPbaaEdJ2jiMaIBztFf6aE9K58uoS+7Y2tTv87Mv/7lqoBW6BFMoDmjQFWgjik6ZMCvIM/7bj7AgqHk/rjmr5zKS4ag5wtHtYLm1L3LBmHdj7d0VFsOpPQexIOEnnjzKqlwmAxT6eYJ/t3qgBlT8KRfshBFgEuUZ5GJOC7TOne4PfB0bboPMZzIRo3WE9dPGRR8kAIme8XqhFbmjdJ+WsTjg0Lj+415tIbyRQoNkLtawrJxozvevs6wFEFcA/YG6o03Z577tiLT3WxOguCcD5vrALH48SyZb8jDUtcVgTWMW0to/n63S8JGUNyF7Bkw9HQWUx+GO1cv2GNzKpk22KS5dlNUVGE9E/7Ydc=
|
||||
src.opensuse.org,195.135.223.224 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFKNThLRPznU5Io1KrAYHmYpaoLQEMGM9nwpKyYQCkPx
|
||||
|
1
devel-importer/.gitignore
vendored
1
devel-importer/.gitignore
vendored
@@ -2,4 +2,3 @@ devel-importer
|
||||
Factory
|
||||
git
|
||||
git-migrated
|
||||
git-importer
|
||||
|
File diff suppressed because it is too large
Load Diff
5
go.mod
5
go.mod
@@ -10,16 +10,12 @@ require (
|
||||
github.com/go-openapi/validate v0.24.0
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/rabbitmq/amqp091-go v1.10.0
|
||||
github.com/redis/go-redis/v9 v9.11.0
|
||||
github.com/tailscale/hujson v0.0.0-20250226034555-ec1d1c113d33
|
||||
go.uber.org/mock v0.5.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/analysis v0.23.0 // indirect
|
||||
@@ -37,4 +33,5 @@ require (
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
golang.org/x/sync v0.7.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
10
go.sum
10
go.sum
@@ -1,16 +1,8 @@
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
|
||||
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
|
||||
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||
@@ -58,8 +50,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0 h1:STpn5XsHlHGcecLmMFCtg7mqq0RnD+zFr4uzukfVhBw=
|
||||
github.com/rabbitmq/amqp091-go v1.10.0/go.mod h1:Hy4jKW5kQART1u+JkDTF9YYOQUHXqMuhrgxOEeS7G4o=
|
||||
github.com/redis/go-redis/v9 v9.11.0 h1:E3S08Gl/nJNn5vkxd2i78wZxWAPNZgUNTp8WIJUAiIs=
|
||||
github.com/redis/go-redis/v9 v9.11.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
|
||||
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
@@ -15,23 +15,6 @@ Target Usage
|
||||
|
||||
Projects where policy reviews are required.
|
||||
|
||||
Configiuration
|
||||
--------------
|
||||
|
||||
Groups are defined in the workflow.config inside the project git. They take following options,
|
||||
|
||||
{
|
||||
...
|
||||
ReviewGroups: [
|
||||
{
|
||||
"Name": "name of the group user",
|
||||
"Reviewers": ["members", "of", "group"],
|
||||
"Silent": (true, false) -- if true, do not explicitly require review requests of group members
|
||||
},
|
||||
],
|
||||
...
|
||||
}
|
||||
|
||||
Requirements
|
||||
------------
|
||||
* Gitea token to:
|
||||
|
@@ -125,16 +125,6 @@ func FindAcceptableReviewInTimeline(user string, timeline []*models.TimelineComm
|
||||
return nil
|
||||
}
|
||||
|
||||
func FindOurLastReviewInTimeline(timeline []*models.TimelineComment) *models.TimelineComment {
|
||||
for _, t := range timeline {
|
||||
if t.Type == common.TimelineCommentType_Review && t.User.UserName == groupName && t.Created == t.Updated {
|
||||
return t
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func UnrequestReviews(gitea common.Gitea, org, repo string, id int64, users []string) {
|
||||
if err := gitea.UnrequestReview(org, repo, id, users...); err != nil {
|
||||
common.LogError("Can't remove reviewrs after a review:", err)
|
||||
@@ -167,29 +157,13 @@ func ProcessNotifications(notification *models.NotificationThread, gitea common.
|
||||
repo := match[2]
|
||||
id, _ := strconv.ParseInt(match[3], 10, 64)
|
||||
|
||||
common.LogInfo("processing:", fmt.Sprintf("%s/%s!%d", org, repo, id))
|
||||
common.LogInfo("processing:", fmt.Sprintf("%s/%s#%d", org, repo, id))
|
||||
pr, err := gitea.GetPullRequest(org, repo, id)
|
||||
if err != nil {
|
||||
common.LogError(" ** Cannot fetch PR associated with review:", subject.URL, "Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := ProcessPR(pr); err == nil && !common.IsDryRun {
|
||||
if err := gitea.SetNotificationRead(notification.ID); err != nil {
|
||||
common.LogDebug(" Cannot set notification as read", err)
|
||||
}
|
||||
} else if err != nil && err != ReviewNotFinished {
|
||||
common.LogError(err)
|
||||
}
|
||||
}
|
||||
|
||||
var ReviewNotFinished = fmt.Errorf("Review is not finished")
|
||||
|
||||
func ProcessPR(pr *models.PullRequest) error {
|
||||
org := pr.Base.Repo.Owner.UserName
|
||||
repo := pr.Base.Repo.Name
|
||||
id := pr.Index
|
||||
|
||||
found := false
|
||||
for _, reviewer := range pr.RequestedReviewers {
|
||||
if reviewer != nil && reviewer.UserName == groupName {
|
||||
@@ -199,36 +173,45 @@ func ProcessPR(pr *models.PullRequest) error {
|
||||
}
|
||||
if !found {
|
||||
common.LogInfo(" review is not requested for", groupName)
|
||||
return nil
|
||||
if !common.IsDryRun {
|
||||
gitea.SetNotificationRead(notification.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
config := configs.GetPrjGitConfig(org, repo, pr.Base.Name)
|
||||
if config == nil {
|
||||
return fmt.Errorf("Cannot find config for: %s", pr.URL)
|
||||
common.LogError("Cannot find config for:", fmt.Sprintf("%s/%s#%s", org, repo, pr.Base.Name))
|
||||
return
|
||||
}
|
||||
if pr.State == "closed" {
|
||||
// dismiss the review
|
||||
common.LogInfo(" -- closed request, so nothing to review")
|
||||
return nil
|
||||
if !common.IsDryRun {
|
||||
gitea.SetNotificationRead(notification.ID)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
reviews, err := gitea.GetPullRequestReviews(org, repo, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to fetch reviews for: %v: %w", pr.URL, err)
|
||||
common.LogInfo(" ** No reviews associated with request:", subject.URL, "Error:", err)
|
||||
return
|
||||
}
|
||||
|
||||
timeline, err := common.FetchTimelineSinceReviewRequestOrPush(gitea, groupName, pr.Head.Sha, org, repo, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to fetch timeline to review. %w", err)
|
||||
common.LogError(err)
|
||||
return
|
||||
}
|
||||
|
||||
groupConfig, err := config.GetReviewGroup(groupName)
|
||||
requestReviewers, err := config.GetReviewGroupMembers(groupName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to fetch review group. %w", err)
|
||||
common.LogError(err)
|
||||
return
|
||||
}
|
||||
|
||||
// submitter cannot be reviewer
|
||||
requestReviewers := groupConfig.Reviewers
|
||||
requestReviewers = slices.DeleteFunc(requestReviewers, func(u string) bool { return u == pr.User.UserName })
|
||||
// pr.Head.Sha
|
||||
|
||||
@@ -236,38 +219,34 @@ func ProcessPR(pr *models.PullRequest) error {
|
||||
if review := FindAcceptableReviewInTimeline(reviewer, timeline, reviews); review != nil {
|
||||
if ReviewAccepted(review.Body) {
|
||||
if !common.IsDryRun {
|
||||
text := reviewer + " approved a review on behalf of " + groupName
|
||||
if review := FindOurLastReviewInTimeline(timeline); review == nil || review.Body != text {
|
||||
_, err := gitea.AddReviewComment(pr, common.ReviewStateApproved, text)
|
||||
if err != nil {
|
||||
common.LogError(" -> failed to write approval comment", err)
|
||||
gitea.AddReviewComment(pr, common.ReviewStateApproved, "Signed off by: "+reviewer)
|
||||
UnrequestReviews(gitea, org, repo, id, requestReviewers)
|
||||
if !common.IsDryRun {
|
||||
if err := gitea.SetNotificationRead(notification.ID); err != nil {
|
||||
common.LogDebug(" Cannot set notification as read", err)
|
||||
}
|
||||
UnrequestReviews(gitea, org, repo, id, requestReviewers)
|
||||
}
|
||||
}
|
||||
common.LogInfo(" -> approved by", reviewer)
|
||||
common.LogInfo(" review at", review.Created)
|
||||
return nil
|
||||
return
|
||||
} else if ReviewRejected(review.Body) {
|
||||
if !common.IsDryRun {
|
||||
text := reviewer + " requested changes on behalf of " + groupName + ". See " + review.HTMLURL
|
||||
if review := FindOurLastReviewInTimeline(timeline); review == nil || review.Body != text {
|
||||
_, err := gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, "Changes requested. See review by: "+reviewer)
|
||||
if err != nil {
|
||||
common.LogError(" -> failed to write rejecting comment", err)
|
||||
}
|
||||
UnrequestReviews(gitea, org, repo, id, requestReviewers)
|
||||
gitea.AddReviewComment(pr, common.ReviewStateRequestChanges, "Changes requested. See review by: "+reviewer)
|
||||
UnrequestReviews(gitea, org, repo, id, requestReviewers)
|
||||
if err := gitea.SetNotificationRead(notification.ID); err != nil {
|
||||
common.LogDebug(" Cannot set notification as read", err)
|
||||
}
|
||||
}
|
||||
common.LogInfo(" -> declined by", reviewer)
|
||||
return nil
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// request group member reviews, if missing
|
||||
common.LogDebug(" Review incomplete...")
|
||||
if !groupConfig.Silent && len(requestReviewers) > 0 {
|
||||
if len(requestReviewers) > 0 {
|
||||
common.LogDebug(" Requesting reviews for:", requestReviewers)
|
||||
if !common.IsDryRun {
|
||||
if _, err := gitea.RequestReviews(pr, requestReviewers...); err != nil {
|
||||
@@ -290,22 +269,12 @@ func ProcessPR(pr *models.PullRequest) error {
|
||||
}
|
||||
|
||||
if !found_help_comment && !common.IsDryRun {
|
||||
helpComment := fmt.Sprintln("Review by", groupName, "represents a group of reviewers:", strings.Join(requestReviewers, ", "), ".\n\n"+
|
||||
"Do **not** use standard review interface to review on behalf of the group.\n"+
|
||||
"To accept the review on behalf of the group, create the following comment: `@"+groupName+": approve`.\n"+
|
||||
"To request changes on behalf of the group, create the following comment: `@"+groupName+": decline` followed with lines justifying the decision.\n"+
|
||||
"Future edits of the comments are ignored, a new comment is required to change the review state.")
|
||||
if slices.Contains(groupConfig.Reviewers, pr.User.UserName) {
|
||||
helpComment = helpComment + "\n\n" +
|
||||
"Submitter is member of this review group, hence they are excluded from being one of the reviewers here"
|
||||
}
|
||||
helpComment := fmt.Sprintln("Review by", groupName, "represents a group of reviewers:", strings.Join(requestReviewers, ", "), ". To review as part of this group, create a comment with contents @"+groupName+": LGTM on a separate line to accept a review. To request changes, write @"+groupName+": followed by reason for rejection. Do not use reviews to review as a group. Editing a comment invalidates that comment.")
|
||||
gitea.AddComment(pr, helpComment)
|
||||
}
|
||||
|
||||
return ReviewNotFinished
|
||||
}
|
||||
|
||||
func PeriodReviewCheck() {
|
||||
func PeriodReviewCheck(gitea common.Gitea) {
|
||||
notifications, err := gitea.GetNotifications(common.GiteaNotificationType_Pull, nil)
|
||||
if err != nil {
|
||||
common.LogError(" Error fetching unread notifications: %w", err)
|
||||
@@ -314,15 +283,14 @@ func PeriodReviewCheck() {
|
||||
|
||||
for _, notification := range notifications {
|
||||
ProcessNotifications(notification, gitea)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
var gitea common.Gitea
|
||||
|
||||
func main() {
|
||||
giteaUrl := flag.String("gitea-url", "https://src.opensuse.org", "Gitea instance used for reviews")
|
||||
rabbitMqHost := flag.String("rabbit-url", "amqps://rabbit.opensuse.org", "RabbitMQ instance where Gitea webhook notifications are sent")
|
||||
interval := flag.Int64("interval", 10, "Notification polling interval in minutes (min 1 min)")
|
||||
interval := flag.Int64("interval", 5, "Notification polling interval in minutes (min 1 min)")
|
||||
configFile := flag.String("config", "", "PrjGit listing config file")
|
||||
logging := flag.String("logging", "info", "Logging level: [none, error, info, debug]")
|
||||
flag.BoolVar(&common.IsDryRun, "dry", false, "Dry run, no effect. For debugging")
|
||||
@@ -359,7 +327,7 @@ func main() {
|
||||
return
|
||||
}
|
||||
|
||||
gitea = common.AllocateGiteaTransport(*giteaUrl)
|
||||
gitea := common.AllocateGiteaTransport(*giteaUrl)
|
||||
configs, err = common.ResolveWorkflowConfigs(gitea, configData)
|
||||
if err != nil {
|
||||
common.LogError("Cannot parse workflow configs:", err)
|
||||
@@ -403,22 +371,19 @@ func main() {
|
||||
config_modified: make(chan *common.AutogitConfig),
|
||||
}
|
||||
|
||||
process_issue_pr := IssueCommentProcessor{}
|
||||
|
||||
configUpdates := &common.RabbitMQGiteaEventsProcessor{
|
||||
Orgs: []string{},
|
||||
configUpdates := &common.ListenDefinitions{
|
||||
RabbitURL: u,
|
||||
Orgs: []string{},
|
||||
Handlers: map[string]common.RequestProcessor{
|
||||
common.RequestType_Push: &config_update,
|
||||
common.RequestType_IssueComment: &process_issue_pr,
|
||||
common.RequestType_Push: &config_update,
|
||||
},
|
||||
}
|
||||
configUpdates.Connection().RabbitURL = u
|
||||
for _, c := range configs {
|
||||
if org, _, _ := c.GetPrjGit(); !slices.Contains(configUpdates.Orgs, org) {
|
||||
configUpdates.Orgs = append(configUpdates.Orgs, org)
|
||||
}
|
||||
}
|
||||
go common.ProcessRabbitMQEvents(configUpdates)
|
||||
go configUpdates.ProcessRabbitMQEvents()
|
||||
|
||||
for {
|
||||
config_update_loop:
|
||||
@@ -446,7 +411,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
PeriodReviewCheck()
|
||||
PeriodReviewCheck(gitea)
|
||||
time.Sleep(time.Duration(*interval * int64(time.Minute)))
|
||||
}
|
||||
}
|
||||
|
@@ -7,25 +7,6 @@ import (
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
type IssueCommentProcessor struct{}
|
||||
|
||||
func (s *IssueCommentProcessor) ProcessFunc(req *common.Request) error {
|
||||
if req.Type != common.RequestType_IssueComment {
|
||||
return fmt.Errorf("Unhandled, ignored request type: %s", req.Type)
|
||||
}
|
||||
|
||||
data := req.Data.(*common.IssueCommentWebhookEvent)
|
||||
org := data.Repository.Owner.Username
|
||||
repo := data.Repository.Name
|
||||
index := int64(data.Issue.Number)
|
||||
|
||||
pr, err := gitea.GetPullRequest(org, repo, index)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to fetch PullRequest from event: %s/%s!%d Error: %w", org, repo, index, err)
|
||||
}
|
||||
return ProcessPR(pr)
|
||||
}
|
||||
|
||||
type ConfigUpdatePush struct {
|
||||
config_modified chan *common.AutogitConfig
|
||||
}
|
||||
|
@@ -1,27 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/tailscale/hujson"
|
||||
)
|
||||
|
||||
func main() {
|
||||
data, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
json, err := hujson.Standardize(data)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
os.Exit(2)
|
||||
}
|
||||
_, err = os.Stdout.Write(json)
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
os.Exit(3)
|
||||
}
|
||||
}
|
@@ -108,7 +108,7 @@ func ProcessNotification(notification *models.NotificationThread) {
|
||||
repo := match[2]
|
||||
id, _ := strconv.ParseInt(match[3], 10, 64)
|
||||
|
||||
common.LogInfo("processing:", fmt.Sprintf("%s/%s!%d", org, repo, id))
|
||||
common.LogInfo("processing:", fmt.Sprintf("%s/%s#%d", org, repo, id))
|
||||
pr, err := Gitea.GetPullRequest(org, repo, id)
|
||||
if err != nil {
|
||||
common.LogError(" ** Cannot fetch PR associated with review:", subject.URL, "Error:", err)
|
||||
|
@@ -120,7 +120,7 @@ func ProcessBuildStatus(project, refProject *common.BuildResultList) BuildStatus
|
||||
// the repositories should be setup equally between the projects. We
|
||||
// need to verify that packages that are building in `refProject` are not
|
||||
// failing in the `project`
|
||||
BuildResultSorter := func(a, b *common.BuildResult) int {
|
||||
BuildResultSorter := func(a, b common.BuildResult) int {
|
||||
if c := strings.Compare(a.Repository, b.Repository); c != 0 {
|
||||
return c
|
||||
}
|
||||
@@ -136,7 +136,7 @@ func ProcessBuildStatus(project, refProject *common.BuildResultList) BuildStatus
|
||||
common.LogInfo("New package. Only need some success...")
|
||||
SomeSuccess := false
|
||||
for i := 0; i < len(project.Result); i++ {
|
||||
repoRes := project.Result[i]
|
||||
repoRes := &project.Result[i]
|
||||
repoResStatus, ok := common.ObsRepoStatusDetails[repoRes.Code]
|
||||
if !ok {
|
||||
common.LogDebug("cannot find code:", repoRes.Code)
|
||||
@@ -205,8 +205,8 @@ func ProcessBuildStatus(project, refProject *common.BuildResultList) BuildStatus
|
||||
return BuildStatusSummaryFailed
|
||||
}
|
||||
|
||||
func ProcessRepoBuildStatus(results, ref []*common.PackageBuildStatus) (status BuildStatusSummary, SomeSuccess bool) {
|
||||
PackageBuildStatusSorter := func(a, b *common.PackageBuildStatus) int {
|
||||
func ProcessRepoBuildStatus(results, ref []common.PackageBuildStatus) (status BuildStatusSummary, SomeSuccess bool) {
|
||||
PackageBuildStatusSorter := func(a, b common.PackageBuildStatus) int {
|
||||
return strings.Compare(a.Package, b.Package)
|
||||
}
|
||||
|
||||
@@ -263,7 +263,7 @@ func ProcessRepoBuildStatus(results, ref []*common.PackageBuildStatus) (status B
|
||||
return BuildStatusSummarySuccess, SomeSuccess
|
||||
}
|
||||
|
||||
func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingPrj, buildPrj string, stagingMasterPrj string) (*common.ProjectMeta, error) {
|
||||
func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullRequest, stagingPrj, buildPrj string) (*common.ProjectMeta, error) {
|
||||
common.LogDebug("repo content fetching ...")
|
||||
err := FetchPrGit(git, pr)
|
||||
if err != nil {
|
||||
@@ -289,15 +289,7 @@ func GenerateObsPrjMeta(git common.Git, gitea common.Gitea, pr *models.PullReque
|
||||
}
|
||||
}
|
||||
|
||||
common.LogDebug("Trying first staging master project: ", stagingMasterPrj)
|
||||
meta, err := ObsClient.GetProjectMeta(stagingMasterPrj)
|
||||
if err == nil {
|
||||
// success, so we use that staging master project as our build project
|
||||
buildPrj = stagingMasterPrj
|
||||
} else {
|
||||
common.LogInfo("error fetching project meta for ", stagingMasterPrj, ". Fall Back to ", buildPrj)
|
||||
meta, err = ObsClient.GetProjectMeta(buildPrj)
|
||||
}
|
||||
meta, err := ObsClient.GetProjectMeta(buildPrj)
|
||||
if err != nil {
|
||||
common.LogError("error fetching project meta for", buildPrj, ". Err:", err)
|
||||
return nil, err
|
||||
@@ -422,8 +414,7 @@ func StartOrUpdateBuild(config *common.StagingConfig, git common.Git, gitea comm
|
||||
var state RequestModification = RequestModificationSourceChanged
|
||||
if meta == nil {
|
||||
// new build
|
||||
common.LogDebug(" Staging master:", config.StagingProject)
|
||||
meta, err = GenerateObsPrjMeta(git, gitea, pr, obsPrProject, config.ObsProject, config.StagingProject)
|
||||
meta, err = GenerateObsPrjMeta(git, gitea, pr, obsPrProject, config.ObsProject)
|
||||
if err != nil {
|
||||
return RequestModificationNoChange, err
|
||||
}
|
||||
@@ -437,8 +428,6 @@ func StartOrUpdateBuild(config *common.StagingConfig, git common.Git, gitea comm
|
||||
} else {
|
||||
err = ObsClient.SetProjectMeta(meta)
|
||||
if err != nil {
|
||||
x, _ := xml.MarshalIndent(meta, "", " ")
|
||||
common.LogDebug(" meta:", string(x))
|
||||
common.LogError("cannot create meta project:", err)
|
||||
return RequestModificationNoChange, err
|
||||
}
|
||||
@@ -595,7 +584,7 @@ func CleanupPullNotification(gitea common.Gitea, thread *models.NotificationThre
|
||||
}
|
||||
|
||||
if !pr.HasMerged && time.Since(time.Time(pr.Closed)) < time.Duration(config.CleanupDelay)*time.Hour {
|
||||
common.LogInfo("Cooldown period for cleanup of", thread.Subject.HTMLURL)
|
||||
common.LogInfo("Cooldown period for cleanup of", thread.URL)
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -816,12 +805,7 @@ func ProcessPullRequest(gitea common.Gitea, org, repo string, id int64) (bool, e
|
||||
if !rebuild_all {
|
||||
common.LogInfo("No package changes detected. Ignoring")
|
||||
if !IsDryRun {
|
||||
_, err := gitea.AddReviewComment(pr, common.ReviewStateApproved, "No package changes, not rebuilding project by default, accepting change")
|
||||
if err != nil {
|
||||
common.LogError(err)
|
||||
} else {
|
||||
return true, nil
|
||||
}
|
||||
_, err = gitea.AddReviewComment(pr, common.ReviewStateComment, "No package changes. Not rebuilding project by default")
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
1
obs-status-service/.gitignore
vendored
1
obs-status-service/.gitignore
vendored
@@ -1,2 +1 @@
|
||||
obs-status-service
|
||||
*.svg
|
||||
|
@@ -25,10 +25,6 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
@@ -38,18 +34,27 @@ const (
|
||||
)
|
||||
|
||||
var obs *common.ObsClient
|
||||
var debug bool
|
||||
|
||||
func ProjectStatusSummarySvg(res []*common.BuildResult) []byte {
|
||||
list := common.BuildResultList{
|
||||
Result: res,
|
||||
func LogDebug(v ...any) {
|
||||
if debug {
|
||||
log.Println(v...)
|
||||
}
|
||||
pkgs := list.GetPackageList()
|
||||
}
|
||||
|
||||
func ProjectStatusSummarySvg(project string) []byte {
|
||||
res := GetCurrentStatus(project)
|
||||
if res == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
pkgs := res.GetPackageList()
|
||||
maxLen := 0
|
||||
for _, p := range pkgs {
|
||||
maxLen = max(maxLen, len(p))
|
||||
}
|
||||
|
||||
width := float32(len(list.Result))*1.5 + float32(maxLen)*0.8
|
||||
width := float32(len(res.Result))*1.5 + float32(maxLen)*0.8
|
||||
height := 1.5*float32(maxLen) + 30
|
||||
|
||||
ret := bytes.Buffer{}
|
||||
@@ -60,78 +65,21 @@ func ProjectStatusSummarySvg(res []*common.BuildResult) []byte {
|
||||
ret.WriteString(`em" xmlns="http://www.w3.org/2000/svg">`)
|
||||
ret.WriteString(`<defs>
|
||||
<g id="f"> <!-- failed -->
|
||||
<rect width="8em" height="1.5em" fill="#800" />
|
||||
<rect width="1em" height="1em" fill="#800" />
|
||||
</g>
|
||||
<g id="s"> <!--succeeded-->
|
||||
<rect width="8em" height="1.5em" fill="#080" />
|
||||
<rect width="1em" height="1em" fill="#080" />
|
||||
</g>
|
||||
<g id="buidling"> <!--building-->
|
||||
<rect width="8em" height="1.5em" fill="#880" />
|
||||
<rect width="1em" height="1em" fill="#880" />
|
||||
</g>
|
||||
</defs>`)
|
||||
|
||||
ret.WriteString(`<use href="#f" x="1em" y="2em"/>`)
|
||||
ret.WriteString(`</svg>`)
|
||||
return ret.Bytes()
|
||||
}
|
||||
|
||||
func LinkToBuildlog(R *common.BuildResult, S *common.PackageBuildStatus) string {
|
||||
if R != nil && S != nil {
|
||||
switch S.Code {
|
||||
case "succeeded", "failed", "building":
|
||||
return "/buildlog/" + R.Project + "/" + S.Package + "/" + R.Repository + "/" + R.Arch
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func PackageStatusSummarySvg(pkg string, res []*common.BuildResult) []byte {
|
||||
// per repo, per arch status bins
|
||||
repo_names := []string{}
|
||||
package_names := []string{}
|
||||
multibuild_prefix := pkg + ":"
|
||||
for _, r := range res {
|
||||
if pos, found := slices.BinarySearchFunc(repo_names, r.Repository, strings.Compare); !found {
|
||||
repo_names = slices.Insert(repo_names, pos, r.Repository)
|
||||
}
|
||||
|
||||
for _, p := range r.Status {
|
||||
if p.Package == pkg || strings.HasPrefix(p.Package, multibuild_prefix) {
|
||||
if pos, found := slices.BinarySearchFunc(package_names, p.Package, strings.Compare); !found {
|
||||
package_names = slices.Insert(package_names, pos, p.Package)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret := NewSvg()
|
||||
for _, pkg = range package_names {
|
||||
// if len(package_names) > 1 {
|
||||
ret.WriteTitle(pkg)
|
||||
// }
|
||||
|
||||
for _, name := range repo_names {
|
||||
ret.WriteSubtitle(name)
|
||||
// print all repo arches here and build results
|
||||
for _, r := range res {
|
||||
if r.Repository != name {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, s := range r.Status {
|
||||
if s.Package == pkg {
|
||||
link := LinkToBuildlog(r, s)
|
||||
ret.WritePackageStatus(link, r.Arch, s.Code, s.Details)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ret.GenerateSvg()
|
||||
}
|
||||
|
||||
func BuildStatusSvg(repo *common.BuildResult, status *common.PackageBuildStatus) []byte {
|
||||
func PackageStatusSummarySvg(status *common.PackageBuildStatus) []byte {
|
||||
buildStatus, ok := common.ObsBuildStatusDetails[status.Code]
|
||||
if !ok {
|
||||
buildStatus = common.ObsBuildStatusDetails["error"]
|
||||
@@ -148,18 +96,12 @@ func BuildStatusSvg(repo *common.BuildResult, status *common.PackageBuildStatus)
|
||||
}
|
||||
}
|
||||
|
||||
buildlog := LinkToBuildlog(repo, status)
|
||||
startTag := ""
|
||||
endTag := ""
|
||||
log.Println(status, " -> ", buildStatus)
|
||||
|
||||
if len(buildlog) > 0 {
|
||||
startTag = "<a href=\"" + buildlog + "\">"
|
||||
endTag = "</a>"
|
||||
}
|
||||
|
||||
return []byte(`<svg version="2.0" width="8em" height="1.5em" xmlns="http://www.w3.org/2000/svg">` +
|
||||
`<rect width="100%" height="100%" fill="` + fillColor + `"/>` + startTag +
|
||||
`<text x="4em" y="1.1em" text-anchor="middle" fill="` + textColor + `">` + buildStatus.Code + `</text>` + endTag + `</svg>`)
|
||||
return []byte(`<svg version="2.0" width="8em" height="1.5em" xmlns="http://www.w3.org/2000/svg">
|
||||
<rect width="100%" height="100%" fill="` + fillColor + `"/>
|
||||
<text x="4em" y="1.1em" text-anchor="middle" fill="` + textColor + `">` + buildStatus.Code + `</text>
|
||||
</svg>`)
|
||||
}
|
||||
|
||||
func main() {
|
||||
@@ -167,116 +109,51 @@ func main() {
|
||||
key := flag.String("key-file", "", "Private key for the TLS certificate")
|
||||
listen := flag.String("listen", "[::1]:8080", "Listening string")
|
||||
disableTls := flag.Bool("no-tls", false, "Disable TLS")
|
||||
obsUrl := flag.String("obs-url", "https://api.opensuse.org", "OBS API endpoint for package buildlog information")
|
||||
debug := flag.Bool("debug", false, "Enable debug logging")
|
||||
// RabbitMQHost := flag.String("rabbit-mq", "amqps://rabbit.opensuse.org", "RabbitMQ message bus server")
|
||||
// Topic := flag.String("topic", "opensuse.obs", "RabbitMQ topic prefix")
|
||||
obsHost := flag.String("obs-host", "https://api.opensuse.org", "OBS API endpoint for package status information")
|
||||
flag.BoolVar(&debug, "debug", false, "Enable debug logging")
|
||||
RabbitMQHost := flag.String("rabbit-mq", "amqps://rabbit.opensuse.org", "RabbitMQ message bus server")
|
||||
Topic := flag.String("topic", "opensuse.obs", "RabbitMQ topic prefix")
|
||||
flag.Parse()
|
||||
|
||||
if *debug {
|
||||
common.SetLoggingLevel(common.LogLevelDebug)
|
||||
}
|
||||
|
||||
// common.PanicOnError(common.RequireObsSecretToken())
|
||||
common.PanicOnError(common.RequireObsSecretToken())
|
||||
|
||||
var err error
|
||||
if obs, err = common.NewObsClient(*obsUrl); err != nil {
|
||||
if obs, err = common.NewObsClient(*obsHost); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if redisUrl := os.Getenv("REDIS"); len(redisUrl) > 0 {
|
||||
RedisConnect(redisUrl)
|
||||
} else {
|
||||
common.LogError("REDIS needs to contains URL of the OBS Redis instance with login information")
|
||||
return
|
||||
}
|
||||
|
||||
var rescanRepoError error
|
||||
go func() {
|
||||
for {
|
||||
if rescanRepoError = RescanRepositories(); rescanRepoError != nil {
|
||||
common.LogError("Failed to rescan repositories.", err)
|
||||
}
|
||||
time.Sleep(time.Minute * 5)
|
||||
}
|
||||
}()
|
||||
|
||||
http.HandleFunc("GET /", func(res http.ResponseWriter, req *http.Request) {
|
||||
if rescanRepoError != nil {
|
||||
res.WriteHeader(500)
|
||||
return
|
||||
}
|
||||
res.WriteHeader(404)
|
||||
res.Write([]byte("404 page not found\n"))
|
||||
})
|
||||
http.HandleFunc("GET /status/{Project}", func(res http.ResponseWriter, req *http.Request) {
|
||||
obsPrj := req.PathValue("Project")
|
||||
common.LogInfo(" request: GET /status/" + obsPrj)
|
||||
http.HandleFunc("GET /{Project}", func(res http.ResponseWriter, req *http.Request) {
|
||||
res.WriteHeader(http.StatusBadRequest)
|
||||
})
|
||||
http.HandleFunc("GET /status/{Project}/{Package}", func(res http.ResponseWriter, req *http.Request) {
|
||||
obsPrj := req.PathValue("Project")
|
||||
obsPkg := req.PathValue("Package")
|
||||
common.LogInfo(" request: GET /status/" + obsPrj + "/" + obsPkg)
|
||||
http.HandleFunc("GET /{Project}/{Package}", func(res http.ResponseWriter, req *http.Request) {
|
||||
/*
|
||||
obsPrj := req.PathValue("Project")
|
||||
obsPkg := req.PathValue("Package")
|
||||
|
||||
status := FindAndUpdateProjectResults(obsPrj)
|
||||
if len(status) == 0 {
|
||||
res.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
svg := PackageStatusSummarySvg(obsPkg, status)
|
||||
status, _ := PackageBuildStatus(obsPrj, obsPkg)
|
||||
svg := PackageStatusSummarySvg(status)
|
||||
*/
|
||||
|
||||
res.Header().Add("content-type", "image/svg+xml")
|
||||
res.Header().Add("size", fmt.Sprint(len(svg)))
|
||||
res.Write(svg)
|
||||
//res.Header().Add("size", fmt.Sprint(len(svg)))
|
||||
//res.Write(svg)
|
||||
})
|
||||
http.HandleFunc("GET /status/{Project}/{Package}/{Repository}", func(res http.ResponseWriter, req *http.Request) {
|
||||
obsPrj := req.PathValue("Project")
|
||||
obsPkg := req.PathValue("Package")
|
||||
repo := req.PathValue("Repository")
|
||||
common.LogInfo(" request: GET /status/" + obsPrj + "/" + obsPkg)
|
||||
|
||||
status := FindAndUpdateRepoResults(obsPrj, repo)
|
||||
if len(status) == 0 {
|
||||
res.WriteHeader(404)
|
||||
return
|
||||
}
|
||||
svg := PackageStatusSummarySvg(obsPkg, status)
|
||||
|
||||
res.Header().Add("content-type", "image/svg+xml")
|
||||
res.Header().Add("size", fmt.Sprint(len(svg)))
|
||||
res.Write(svg)
|
||||
})
|
||||
http.HandleFunc("GET /status/{Project}/{Package}/{Repository}/{Arch}", func(res http.ResponseWriter, req *http.Request) {
|
||||
prj := req.PathValue("Project")
|
||||
pkg := req.PathValue("Package")
|
||||
repo := req.PathValue("Repository")
|
||||
arch := req.PathValue("Arch")
|
||||
common.LogInfo("GET /status/" + prj + "/" + pkg + "/" + repo + "/" + arch)
|
||||
|
||||
res.Header().Add("content-type", "image/svg+xml")
|
||||
|
||||
for _, r := range FindAndUpdateProjectResults(prj) {
|
||||
if r.Arch == arch && r.Repository == repo {
|
||||
if idx, found := slices.BinarySearchFunc(r.Status, &common.PackageBuildStatus{Package: pkg}, common.PackageBuildStatusComp); found {
|
||||
res.Write(BuildStatusSvg(r, r.Status[idx]))
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
res.Write(BuildStatusSvg(nil, &common.PackageBuildStatus{Package: pkg, Code: "unknown"}))
|
||||
})
|
||||
http.HandleFunc("GET /buildlog/{Project}/{Package}/{Repository}/{Arch}", func(res http.ResponseWriter, req *http.Request) {
|
||||
http.HandleFunc("GET /{Project}/{Package}/{Repository}/{Arch}", func(res http.ResponseWriter, req *http.Request) {
|
||||
prj := req.PathValue("Project")
|
||||
pkg := req.PathValue("Package")
|
||||
repo := req.PathValue("Repository")
|
||||
arch := req.PathValue("Arch")
|
||||
|
||||
res.Header().Add("location", "https://build.opensuse.org/package/live_build_log/"+prj+"/"+pkg+"/"+repo+"/"+arch)
|
||||
res.WriteHeader(307)
|
||||
return
|
||||
res.Header().Add("content-type", "image/svg+xml")
|
||||
|
||||
status := GetDetailedBuildStatus(prj, pkg, repo, arch)
|
||||
res.Write(PackageStatusSummarySvg(status))
|
||||
})
|
||||
http.HandleFunc("GET /{Project}/{Package}/{Repository}/{Arch}/buildlog", func(res http.ResponseWriter, req *http.Request) {
|
||||
prj := req.PathValue("Project")
|
||||
pkg := req.PathValue("Package")
|
||||
repo := req.PathValue("Repository")
|
||||
arch := req.PathValue("Arch")
|
||||
|
||||
// status := GetDetailedBuildStatus(prj, pkg, repo, arch)
|
||||
data, err := obs.BuildLog(prj, pkg, repo, arch)
|
||||
@@ -290,6 +167,8 @@ func main() {
|
||||
io.Copy(res, data)
|
||||
})
|
||||
|
||||
go ProcessUpdates()
|
||||
|
||||
if *disableTls {
|
||||
log.Fatal(http.ListenAndServe(*listen, nil))
|
||||
} else {
|
||||
|
@@ -1,82 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
func TestStatusSvg(t *testing.T) {
|
||||
os.WriteFile("teststatus.svg", BuildStatusSvg(nil, &common.PackageBuildStatus{
|
||||
Package: "foo",
|
||||
Code: "succeeded",
|
||||
Details: "more success here",
|
||||
}), 0o777)
|
||||
data := []*common.BuildResult{
|
||||
{
|
||||
Project: "project:foo",
|
||||
Repository: "repo1",
|
||||
Arch: "x86_64",
|
||||
Status: []*common.PackageBuildStatus{
|
||||
{
|
||||
Package: "pkg1",
|
||||
Code: "succeeded",
|
||||
},
|
||||
{
|
||||
Package: "pkg2",
|
||||
Code: "failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Project: "project:foo",
|
||||
Repository: "repo1",
|
||||
Arch: "s390x",
|
||||
Status: []*common.PackageBuildStatus{
|
||||
{
|
||||
Package: "pkg1",
|
||||
Code: "succeeded",
|
||||
},
|
||||
{
|
||||
Package: "pkg2",
|
||||
Code: "unresolveable",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Project: "project:foo",
|
||||
Repository: "repo1",
|
||||
Arch: "i586",
|
||||
Status: []*common.PackageBuildStatus{
|
||||
{
|
||||
Package: "pkg1",
|
||||
Code: "succeeded",
|
||||
},
|
||||
{
|
||||
Package: "pkg2",
|
||||
Code: "blocked",
|
||||
Details: "foo bar is why",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Project: "project:foo",
|
||||
Repository: "TW",
|
||||
Arch: "s390",
|
||||
Status: []*common.PackageBuildStatus{
|
||||
{
|
||||
Package: "pkg1",
|
||||
Code: "excluded",
|
||||
},
|
||||
{
|
||||
Package: "pkg2",
|
||||
Code: "failed",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
os.WriteFile("testpackage.svg", PackageStatusSummarySvg("pkg2", data), 0o777)
|
||||
os.WriteFile("testproject.svg", ProjectStatusSummarySvg(data), 0o777)
|
||||
}
|
3
obs-status-service/rabbit.go
Normal file
3
obs-status-service/rabbit.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package main
|
||||
|
||||
|
@@ -1,214 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/redis/go-redis/v9"
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
var RepoStatus []*common.BuildResult = []*common.BuildResult{}
|
||||
var RepoStatusLock *sync.RWMutex = &sync.RWMutex{}
|
||||
|
||||
var redisClient *redis.Client
|
||||
|
||||
func RedisConnect(RedisUrl string) {
|
||||
opts, err := redis.ParseURL(RedisUrl)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
redisClient = redis.NewClient(opts)
|
||||
}
|
||||
|
||||
func UpdateResults(r *common.BuildResult) {
|
||||
RepoStatusLock.Lock()
|
||||
defer RepoStatusLock.Unlock()
|
||||
|
||||
key := "result." + r.Project + "/" + r.Repository + "/" + r.Arch
|
||||
common.LogDebug(" + Updating", key)
|
||||
data, err := redisClient.HGetAll(context.Background(), key).Result()
|
||||
if err != nil {
|
||||
common.LogError("Failed fetching build results for", key, err)
|
||||
}
|
||||
common.LogDebug(" + Update size", len(data))
|
||||
|
||||
reset_time := time.Date(1000, 1, 1, 1, 1, 1, 1, time.Local)
|
||||
for _, pkg := range r.Status {
|
||||
pkg.LastUpdate = reset_time
|
||||
}
|
||||
r.LastUpdate = time.Now()
|
||||
for pkg, result := range data {
|
||||
if strings.HasPrefix(result, "scheduled") {
|
||||
// TODO: lookup where's building
|
||||
result = "building"
|
||||
}
|
||||
|
||||
var idx int
|
||||
var found bool
|
||||
var code string
|
||||
var details string
|
||||
|
||||
if pos := strings.IndexByte(result, ':'); pos > -1 && pos < len(result) {
|
||||
code = result[0:pos]
|
||||
details = result[pos+1:]
|
||||
} else {
|
||||
code = result
|
||||
details = ""
|
||||
}
|
||||
|
||||
if idx, found = slices.BinarySearchFunc(r.Status, &common.PackageBuildStatus{Package: pkg}, common.PackageBuildStatusComp); found {
|
||||
res := r.Status[idx]
|
||||
res.LastUpdate = r.LastUpdate
|
||||
res.Code = code
|
||||
res.Details = details
|
||||
} else {
|
||||
r.Status = slices.Insert(r.Status, idx, &common.PackageBuildStatus{
|
||||
Package: pkg,
|
||||
Code: code,
|
||||
Details: details,
|
||||
LastUpdate: r.LastUpdate,
|
||||
})
|
||||
}
|
||||
}
|
||||
for idx := 0; idx < len(r.Status); {
|
||||
if r.Status[idx].LastUpdate == reset_time {
|
||||
r.Status = slices.Delete(r.Status, idx, idx+1)
|
||||
} else {
|
||||
idx++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func FindProjectResults(project string) []*common.BuildResult {
|
||||
RepoStatusLock.RLock()
|
||||
defer RepoStatusLock.RUnlock()
|
||||
|
||||
ret := make([]*common.BuildResult, 0, 8)
|
||||
idx, _ := slices.BinarySearchFunc(RepoStatus, &common.BuildResult{Project: project}, common.BuildResultComp)
|
||||
for idx < len(RepoStatus) && RepoStatus[idx].Project == project {
|
||||
ret = append(ret, RepoStatus[idx])
|
||||
idx++
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func FindRepoResults(project, repo string) []*common.BuildResult {
|
||||
RepoStatusLock.RLock()
|
||||
defer RepoStatusLock.RUnlock()
|
||||
|
||||
ret := make([]*common.BuildResult, 0, 8)
|
||||
idx, _ := slices.BinarySearchFunc(RepoStatus, &common.BuildResult{Project: project, Repository: repo}, common.BuildResultComp)
|
||||
for idx < len(RepoStatus) && RepoStatus[idx].Project == project && RepoStatus[idx].Repository == repo {
|
||||
ret = append(ret, RepoStatus[idx])
|
||||
idx++
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func FindAndUpdateProjectResults(project string) []*common.BuildResult {
|
||||
res := FindProjectResults(project)
|
||||
wg := &sync.WaitGroup{}
|
||||
now := time.Now()
|
||||
for _, r := range res {
|
||||
if now.Sub(r.LastUpdate).Abs() < time.Second*10 {
|
||||
// 1 update per 10 second for now
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
UpdateResults(r)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return res
|
||||
}
|
||||
|
||||
func FindAndUpdateRepoResults(project, repo string) []*common.BuildResult {
|
||||
res := FindRepoResults(project, repo)
|
||||
wg := &sync.WaitGroup{}
|
||||
now := time.Now()
|
||||
for _, r := range res {
|
||||
if now.Sub(r.LastUpdate).Abs() < time.Second*10 {
|
||||
// 1 update per 10 second for now
|
||||
continue
|
||||
}
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
UpdateResults(r)
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
return res
|
||||
}
|
||||
|
||||
func RescanRepositories() error {
|
||||
ctx := context.Background()
|
||||
var cursor uint64
|
||||
var err error
|
||||
|
||||
common.LogDebug("** starting rescanning ...")
|
||||
RepoStatusLock.Lock()
|
||||
for _, repo := range RepoStatus {
|
||||
repo.Dirty = false
|
||||
}
|
||||
RepoStatusLock.Unlock()
|
||||
var count int
|
||||
|
||||
for {
|
||||
var data []string
|
||||
data, cursor, err = redisClient.ScanType(ctx, cursor, "", 1000, "hash").Result()
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
RepoStatusLock.Lock()
|
||||
for _, repo := range data {
|
||||
r := strings.Split(repo, "/")
|
||||
if len(r) != 3 || len(r[0]) < 8 || r[0][0:7] != "result." {
|
||||
continue
|
||||
}
|
||||
d := &common.BuildResult{
|
||||
Project: r[0][7:],
|
||||
Repository: r[1],
|
||||
Arch: r[2],
|
||||
}
|
||||
if pos, found := slices.BinarySearchFunc(RepoStatus, d, common.BuildResultComp); found {
|
||||
RepoStatus[pos].Dirty = true
|
||||
} else {
|
||||
d.Dirty = true
|
||||
RepoStatus = slices.Insert(RepoStatus, pos, d)
|
||||
count++
|
||||
}
|
||||
}
|
||||
RepoStatusLock.Unlock()
|
||||
|
||||
if cursor == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
common.LogDebug(" added a total", count, "repos")
|
||||
count = 0
|
||||
|
||||
RepoStatusLock.Lock()
|
||||
for i := 0; i < len(RepoStatus); {
|
||||
if !RepoStatus[i].Dirty {
|
||||
RepoStatus = slices.Delete(RepoStatus, i, i+1)
|
||||
count++
|
||||
} else {
|
||||
i++
|
||||
}
|
||||
}
|
||||
RepoStatusLock.Unlock()
|
||||
common.LogDebug(" removed", count, "repos")
|
||||
common.LogDebug(" total repos:", len(RepoStatus))
|
||||
|
||||
return nil
|
||||
}
|
127
obs-status-service/status.go
Normal file
127
obs-status-service/status.go
Normal file
@@ -0,0 +1,127 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"src.opensuse.org/autogits/common"
|
||||
)
|
||||
|
||||
var WatchedRepos []string
|
||||
var mutex sync.Mutex
|
||||
|
||||
var StatusUpdateCh chan StatusUpdateMsg = make(chan StatusUpdateMsg)
|
||||
|
||||
var statusMutex sync.RWMutex
|
||||
var CurrentStatus map[string]*common.BuildResultList = make(map[string]*common.BuildResultList)
|
||||
|
||||
type StatusUpdateMsg struct {
|
||||
ObsProject string
|
||||
Result *common.BuildResultList
|
||||
}
|
||||
|
||||
func GetCurrentStatus(project string) *common.BuildResultList {
|
||||
statusMutex.RLock()
|
||||
|
||||
if ret, found := CurrentStatus[project]; found {
|
||||
statusMutex.RUnlock()
|
||||
return ret
|
||||
}
|
||||
|
||||
res, err := obs.BuildStatus(project)
|
||||
statusMutex.RUnlock()
|
||||
statusMutex.Lock()
|
||||
defer statusMutex.Unlock()
|
||||
|
||||
if err != nil {
|
||||
return res
|
||||
}
|
||||
CurrentStatus[project] = res
|
||||
|
||||
now := time.Now().Unix()
|
||||
CurrentStatus[project].LastUpdate = now
|
||||
for _, r := range res.Result {
|
||||
r.LastUpdate = now
|
||||
for _, p := range r.Status {
|
||||
p.LastUpdate = now
|
||||
}
|
||||
slices.SortFunc(r.Status, packageSort)
|
||||
}
|
||||
slices.SortFunc(res.Result, repoSort)
|
||||
return res
|
||||
}
|
||||
|
||||
|
||||
func updatePrjPackage(prjState *common.BuildResultList, pkg string, now int64, pkgState *common.BuildResultList) {
|
||||
for prjState.
|
||||
Result[0].Status[0].Package
|
||||
}
|
||||
|
||||
func extractPackageBuildStatus(prjState *common.BuildResultList, pkg string) []*common.PackageBuildStatus {
|
||||
|
||||
}
|
||||
|
||||
func GetDetailedPackageBuildStatus(prj, pkg string) []*common.PackageBuildStatus {
|
||||
statusMutex.RLock()
|
||||
now := time.Now().Unix()
|
||||
|
||||
cachedPrj, found := CurrentStatus[prj]
|
||||
if found {
|
||||
statusMutex.Unlock()
|
||||
if now-cachedPrj.LastUpdate < 60 {
|
||||
return extractPackageBuildStatus(cachedPrj, pkg)
|
||||
}
|
||||
}
|
||||
|
||||
ret, err := obs.BuildStatus(prj, pkg)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
statusMutex.Lock()
|
||||
defer statusMutex.Unlock()
|
||||
|
||||
updatePrjPackage(cachedPrj, pkg, now, ret)
|
||||
return extractPackageBuildStatus(cachedPrj, pkg)
|
||||
}
|
||||
|
||||
func GetDetailedBuildStatus(prj, pkg, repo, arch string) *common.PackageBuildStatus {
|
||||
prjStatus := GetCurrentStatus(prj)
|
||||
if prjStatus == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, r := range prjStatus.Result {
|
||||
if r.Arch == arch && r.Repository == repo {
|
||||
for _, status := range r.Status {
|
||||
if status.Package == pkg {
|
||||
return &status
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ProcessUpdates() {
|
||||
for {
|
||||
msg := <-StatusUpdateCh
|
||||
|
||||
statusMutex.Lock()
|
||||
CurrentStatus[msg.ObsProject] = msg.Result
|
||||
|
||||
drainedChannel:
|
||||
for {
|
||||
select {
|
||||
case msg = <-StatusUpdateCh:
|
||||
CurrentStatus[msg.ObsProject] = msg.Result
|
||||
default:
|
||||
statusMutex.Unlock()
|
||||
break drainedChannel
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
34
obs-status-service/status_test.go
Normal file
34
obs-status-service/status_test.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"go.uber.org/mock/gomock"
|
||||
"src.opensuse.org/autogits/common"
|
||||
mock_common "src.opensuse.org/autogits/common/mock"
|
||||
)
|
||||
|
||||
func TestWatchObsProject(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
res common.BuildResultList
|
||||
}{
|
||||
{
|
||||
name: "two requests",
|
||||
res: common.BuildResultList{
|
||||
State: "success",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ctl := gomock.NewController(t)
|
||||
obs := mock_common.NewMockObsStatusFetcherWithState(ctl)
|
||||
|
||||
obs.EXPECT().BuildStatusWithState("test:foo", "").Return(&test.res, nil)
|
||||
|
||||
WatchObsProject(obs, "test:foo")
|
||||
})
|
||||
}
|
||||
}
|
@@ -1,119 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
type SvgWriter struct {
|
||||
ypos float64
|
||||
header []byte
|
||||
out bytes.Buffer
|
||||
}
|
||||
|
||||
func NewSvg() *SvgWriter {
|
||||
svg := &SvgWriter{}
|
||||
svg.header = []byte(`<svg version="2.0" overflow="auto" width="40ex" height="`)
|
||||
svg.out.WriteString(`em" xmlns="http://www.w3.org/2000/svg">`)
|
||||
svg.out.WriteString(`<defs>
|
||||
<g id="s">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="green" fill="#efe" rx="5" />
|
||||
<text x="2.5ex" y="1.1em">succeeded</text>
|
||||
</g>
|
||||
<g id="f">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="red" fill="#fee" rx="5" />
|
||||
<text x="5ex" y="1.1em">failed</text>
|
||||
</g>
|
||||
<g id="b">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="grey" fill="#fbf" rx="5" />
|
||||
<text x="3.75ex" y="1.1em">blocked</text>
|
||||
</g>
|
||||
<g id="broken">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="grey" fill="#fff" rx="5" />
|
||||
<text x="4.5ex" y="1.1em" stroke="red" fill="red">broken</text>
|
||||
</g>
|
||||
<g id="build">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="yellow" fill="#664" rx="5" />
|
||||
<text x="3.75ex" y="1.1em" fill="yellow">building</text>
|
||||
</g>
|
||||
<g id="u">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="yellow" fill="#555" rx="5" />
|
||||
<text x="2ex" y="1.1em" fill="orange">unresolvable</text>
|
||||
</g>
|
||||
<g id="scheduled">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="blue" fill="none" rx="5" />
|
||||
<text x="3ex" y="1.1em" stroke="none" fill="blue">scheduled</text>
|
||||
</g>
|
||||
<g id="d">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="grey" fill="none" rx="5" />
|
||||
<text x="4ex" y="1.1em" stroke="none" fill="grey">disabled</text>
|
||||
</g>
|
||||
<g id="e">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="grey" fill="none" rx="5" />
|
||||
<text x="4ex" y="1.1em" stroke="none" fill="#aaf">excluded</text>
|
||||
</g>
|
||||
<g id="un">
|
||||
<rect width="15ex" height="1.5em" stroke-width="1" stroke="grey" fill="none" rx="5" />
|
||||
<text x="4ex" y="1.1em" stroke="none" fill="grey">unknown</text>
|
||||
</g>
|
||||
<rect id="repotitle" width="100%" height="2em" stroke-width="1" stroke="grey" fill="grey" rx="2" />
|
||||
</defs>`)
|
||||
|
||||
return svg
|
||||
}
|
||||
|
||||
func (svg *SvgWriter) WriteTitle(title string) {
|
||||
svg.out.WriteString(`<text stroke="black" fill="black" x="1ex" y="` + fmt.Sprint(svg.ypos-.5) + `em">` + title + "</text>")
|
||||
svg.ypos += 2.5
|
||||
}
|
||||
|
||||
func (svg *SvgWriter) WriteSubtitle(subtitle string) {
|
||||
svg.out.WriteString(`<use href="#repotitle" y="` + fmt.Sprint(svg.ypos-2) + `em"/>`)
|
||||
svg.out.WriteString(`<text stroke="black" fill="black" x="3ex" y="` + fmt.Sprint(svg.ypos-.6) + `em">` + subtitle + `</text>`)
|
||||
svg.ypos += 2
|
||||
}
|
||||
|
||||
func (svg *SvgWriter) WritePackageStatus(loglink, arch, status, detail string) {
|
||||
StatusToSVG := func(S string) string {
|
||||
switch S {
|
||||
case "succeeded":
|
||||
return "s"
|
||||
case "failed":
|
||||
return "f"
|
||||
case "broken", "scheduled":
|
||||
return S
|
||||
case "blocked":
|
||||
return "b"
|
||||
case "building":
|
||||
return "build"
|
||||
case "unresolvable":
|
||||
return "u"
|
||||
case "disabled":
|
||||
return "d"
|
||||
case "excluded":
|
||||
return "e"
|
||||
}
|
||||
return "un"
|
||||
}
|
||||
|
||||
svg.out.WriteString(`<text fill="#113" x="5ex" y="` + fmt.Sprint(svg.ypos-.6) + `em">` + arch + `</text>`)
|
||||
svg.out.WriteString(`<g>`)
|
||||
if len(loglink) > 0 {
|
||||
svg.out.WriteString(`<a href="` + loglink + `" target="_blank" rel="noopener">`)
|
||||
}
|
||||
svg.out.WriteString(`<use href="#` + StatusToSVG(status) + `" x="20ex" y="` + fmt.Sprint(svg.ypos-1.7) + `em"/>`)
|
||||
if len(loglink) > 0 {
|
||||
svg.out.WriteString(`</a>`)
|
||||
}
|
||||
if len(detail) > 0 {
|
||||
svg.out.WriteString(`<title>` + fmt.Sprint(detail) + "</title>")
|
||||
}
|
||||
|
||||
svg.out.WriteString("</g>\n")
|
||||
svg.ypos += 2
|
||||
}
|
||||
|
||||
func (svg *SvgWriter) GenerateSvg() []byte {
|
||||
return slices.Concat(svg.header, []byte(fmt.Sprint(svg.ypos)), svg.out.Bytes(), []byte("</svg>"))
|
||||
}
|
@@ -1,16 +0,0 @@
|
||||
[Unit]
|
||||
Description=Staging bot for project git PRs in OBS
|
||||
After=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=exec
|
||||
ExecStart=/usr/bin/obs-staging-bot
|
||||
EnvironmentFile=-/etc/sysconfig/obs-staging-bot.env
|
||||
DynamicUser=yes
|
||||
NoNewPrivileges=yes
|
||||
ProtectSystem=strict
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
|
@@ -1,4 +0,0 @@
|
||||
package main
|
||||
|
||||
// exists only to import this for go.modules
|
||||
import "go.uber.org/mock/mockgen/model"
|
BIN
vendor.tar.zst
(Stored with Git LFS)
Normal file
BIN
vendor.tar.zst
(Stored with Git LFS)
Normal file
Binary file not shown.
15
vendor/github.com/asaskevich/govalidator/.gitignore
generated
vendored
15
vendor/github.com/asaskevich/govalidator/.gitignore
generated
vendored
@@ -1,15 +0,0 @@
|
||||
bin/
|
||||
.idea/
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
12
vendor/github.com/asaskevich/govalidator/.travis.yml
generated
vendored
12
vendor/github.com/asaskevich/govalidator/.travis.yml
generated
vendored
@@ -1,12 +0,0 @@
|
||||
language: go
|
||||
dist: xenial
|
||||
go:
|
||||
- '1.10'
|
||||
- '1.11'
|
||||
- '1.12'
|
||||
- '1.13'
|
||||
- 'tip'
|
||||
|
||||
script:
|
||||
- go test -coverpkg=./... -coverprofile=coverage.info -timeout=5s
|
||||
- bash <(curl -s https://codecov.io/bash)
|
43
vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
generated
vendored
43
vendor/github.com/asaskevich/govalidator/CODE_OF_CONDUCT.md
generated
vendored
@@ -1,43 +0,0 @@
|
||||
# Contributor Code of Conduct
|
||||
|
||||
This project adheres to [The Code Manifesto](http://codemanifesto.com)
|
||||
as its guidelines for contributor interactions.
|
||||
|
||||
## The Code Manifesto
|
||||
|
||||
We want to work in an ecosystem that empowers developers to reach their
|
||||
potential — one that encourages growth and effective collaboration. A space
|
||||
that is safe for all.
|
||||
|
||||
A space such as this benefits everyone that participates in it. It encourages
|
||||
new developers to enter our field. It is through discussion and collaboration
|
||||
that we grow, and through growth that we improve.
|
||||
|
||||
In the effort to create such a place, we hold to these values:
|
||||
|
||||
1. **Discrimination limits us.** This includes discrimination on the basis of
|
||||
race, gender, sexual orientation, gender identity, age, nationality,
|
||||
technology and any other arbitrary exclusion of a group of people.
|
||||
2. **Boundaries honor us.** Your comfort levels are not everyone’s comfort
|
||||
levels. Remember that, and if brought to your attention, heed it.
|
||||
3. **We are our biggest assets.** None of us were born masters of our trade.
|
||||
Each of us has been helped along the way. Return that favor, when and where
|
||||
you can.
|
||||
4. **We are resources for the future.** As an extension of #3, share what you
|
||||
know. Make yourself a resource to help those that come after you.
|
||||
5. **Respect defines us.** Treat others as you wish to be treated. Make your
|
||||
discussions, criticisms and debates from a position of respectfulness. Ask
|
||||
yourself, is it true? Is it necessary? Is it constructive? Anything less is
|
||||
unacceptable.
|
||||
6. **Reactions require grace.** Angry responses are valid, but abusive language
|
||||
and vindictive actions are toxic. When something happens that offends you,
|
||||
handle it assertively, but be respectful. Escalate reasonably, and try to
|
||||
allow the offender an opportunity to explain themselves, and possibly
|
||||
correct the issue.
|
||||
7. **Opinions are just that: opinions.** Each and every one of us, due to our
|
||||
background and upbringing, have varying opinions. That is perfectly
|
||||
acceptable. Remember this: if you respect your own opinions, you should
|
||||
respect the opinions of others.
|
||||
8. **To err is human.** You might not intend it, but mistakes do happen and
|
||||
contribute to build experience. Tolerate honest mistakes, and don't
|
||||
hesitate to apologize if you make one yourself.
|
63
vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
generated
vendored
63
vendor/github.com/asaskevich/govalidator/CONTRIBUTING.md
generated
vendored
@@ -1,63 +0,0 @@
|
||||
#### Support
|
||||
If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
|
||||
|
||||
#### What to contribute
|
||||
If you don't know what to do, there are some features and functions that need to be done
|
||||
|
||||
- [ ] Refactor code
|
||||
- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
|
||||
- [ ] Create actual list of contributors and projects that currently using this package
|
||||
- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
|
||||
- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
|
||||
- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
|
||||
- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
|
||||
- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
|
||||
- [ ] Implement fuzzing testing
|
||||
- [ ] Implement some struct/map/array utilities
|
||||
- [ ] Implement map/array validation
|
||||
- [ ] Implement benchmarking
|
||||
- [ ] Implement batch of examples
|
||||
- [ ] Look at forks for new features and fixes
|
||||
|
||||
#### Advice
|
||||
Feel free to create what you want, but keep in mind when you implement new features:
|
||||
- Code must be clear and readable, names of variables/constants clearly describes what they are doing
|
||||
- Public functions must be documented and described in source file and added to README.md to the list of available functions
|
||||
- There are must be unit-tests for any new functions and improvements
|
||||
|
||||
## Financial contributions
|
||||
|
||||
We also welcome financial contributions in full transparency on our [open collective](https://opencollective.com/govalidator).
|
||||
Anyone can file an expense. If the expense makes sense for the development of the community, it will be "merged" in the ledger of our open collective by the core contributors and the person who filed the expense will be reimbursed.
|
||||
|
||||
|
||||
## Credits
|
||||
|
||||
|
||||
### Contributors
|
||||
|
||||
Thank you to all the people who have already contributed to govalidator!
|
||||
<a href="https://github.com/asaskevich/govalidator/graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a>
|
||||
|
||||
|
||||
### Backers
|
||||
|
||||
Thank you to all our backers! [[Become a backer](https://opencollective.com/govalidator#backer)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a>
|
||||
|
||||
|
||||
### Sponsors
|
||||
|
||||
Thank you to all our sponsors! (please ask your company to also support this open source project by [becoming a sponsor](https://opencollective.com/govalidator#sponsor))
|
||||
|
||||
<a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
|
21
vendor/github.com/asaskevich/govalidator/LICENSE
generated
vendored
21
vendor/github.com/asaskevich/govalidator/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014-2020 Alex Saskevich
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
622
vendor/github.com/asaskevich/govalidator/README.md
generated
vendored
622
vendor/github.com/asaskevich/govalidator/README.md
generated
vendored
@@ -1,622 +0,0 @@
|
||||
govalidator
|
||||
===========
|
||||
[](https://gitter.im/asaskevich/govalidator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [](https://godoc.org/github.com/asaskevich/govalidator)
|
||||
[](https://travis-ci.org/asaskevich/govalidator)
|
||||
[](https://codecov.io/gh/asaskevich/govalidator) [](https://goreportcard.com/report/github.com/asaskevich/govalidator) [](http://go-search.org/view?id=github.com%2Fasaskevich%2Fgovalidator) [](#backers) [](#sponsors) [](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_shield)
|
||||
|
||||
A package of validators and sanitizers for strings, structs and collections. Based on [validator.js](https://github.com/chriso/validator.js).
|
||||
|
||||
#### Installation
|
||||
Make sure that Go is installed on your computer.
|
||||
Type the following command in your terminal:
|
||||
|
||||
go get github.com/asaskevich/govalidator
|
||||
|
||||
or you can get specified release of the package with `gopkg.in`:
|
||||
|
||||
go get gopkg.in/asaskevich/govalidator.v10
|
||||
|
||||
After it the package is ready to use.
|
||||
|
||||
|
||||
#### Import package in your project
|
||||
Add following line in your `*.go` file:
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
```
|
||||
If you are unhappy to use long `govalidator`, you can do something like this:
|
||||
```go
|
||||
import (
|
||||
valid "github.com/asaskevich/govalidator"
|
||||
)
|
||||
```
|
||||
|
||||
#### Activate behavior to require all fields have a validation tag by default
|
||||
`SetFieldsRequiredByDefault` causes validation to fail when struct fields do not include validations or are not explicitly marked as exempt (using `valid:"-"` or `valid:"email,optional"`). A good place to activate this is a package init function or the main() function.
|
||||
|
||||
`SetNilPtrAllowedByRequired` causes validation to pass when struct fields marked by `required` are set to nil. This is disabled by default for consistency, but some packages that need to be able to determine between `nil` and `zero value` state can use this. If disabled, both `nil` and `zero` values cause validation errors.
|
||||
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
func init() {
|
||||
govalidator.SetFieldsRequiredByDefault(true)
|
||||
}
|
||||
```
|
||||
|
||||
Here's some code to explain it:
|
||||
```go
|
||||
// this struct definition will fail govalidator.ValidateStruct() (and the field values do not matter):
|
||||
type exampleStruct struct {
|
||||
Name string ``
|
||||
Email string `valid:"email"`
|
||||
}
|
||||
|
||||
// this, however, will only fail when Email is empty or an invalid email address:
|
||||
type exampleStruct2 struct {
|
||||
Name string `valid:"-"`
|
||||
Email string `valid:"email"`
|
||||
}
|
||||
|
||||
// lastly, this will only fail when Email is an invalid email address but not when it's empty:
|
||||
type exampleStruct2 struct {
|
||||
Name string `valid:"-"`
|
||||
Email string `valid:"email,optional"`
|
||||
}
|
||||
```
|
||||
|
||||
#### Recent breaking changes (see [#123](https://github.com/asaskevich/govalidator/pull/123))
|
||||
##### Custom validator function signature
|
||||
A context was added as the second parameter, for structs this is the object being validated – this makes dependent validation possible.
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
// old signature
|
||||
func(i interface{}) bool
|
||||
|
||||
// new signature
|
||||
func(i interface{}, o interface{}) bool
|
||||
```
|
||||
|
||||
##### Adding a custom validator
|
||||
This was changed to prevent data races when accessing custom validators.
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
// before
|
||||
govalidator.CustomTypeTagMap["customByteArrayValidator"] = func(i interface{}, o interface{}) bool {
|
||||
// ...
|
||||
}
|
||||
|
||||
// after
|
||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, o interface{}) bool {
|
||||
// ...
|
||||
})
|
||||
```
|
||||
|
||||
#### List of functions:
|
||||
```go
|
||||
func Abs(value float64) float64
|
||||
func BlackList(str, chars string) string
|
||||
func ByteLength(str string, params ...string) bool
|
||||
func CamelCaseToUnderscore(str string) string
|
||||
func Contains(str, substring string) bool
|
||||
func Count(array []interface{}, iterator ConditionIterator) int
|
||||
func Each(array []interface{}, iterator Iterator)
|
||||
func ErrorByField(e error, field string) string
|
||||
func ErrorsByField(e error) map[string]string
|
||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{}
|
||||
func Find(array []interface{}, iterator ConditionIterator) interface{}
|
||||
func GetLine(s string, index int) (string, error)
|
||||
func GetLines(s string) []string
|
||||
func HasLowerCase(str string) bool
|
||||
func HasUpperCase(str string) bool
|
||||
func HasWhitespace(str string) bool
|
||||
func HasWhitespaceOnly(str string) bool
|
||||
func InRange(value interface{}, left interface{}, right interface{}) bool
|
||||
func InRangeFloat32(value, left, right float32) bool
|
||||
func InRangeFloat64(value, left, right float64) bool
|
||||
func InRangeInt(value, left, right interface{}) bool
|
||||
func IsASCII(str string) bool
|
||||
func IsAlpha(str string) bool
|
||||
func IsAlphanumeric(str string) bool
|
||||
func IsBase64(str string) bool
|
||||
func IsByteLength(str string, min, max int) bool
|
||||
func IsCIDR(str string) bool
|
||||
func IsCRC32(str string) bool
|
||||
func IsCRC32b(str string) bool
|
||||
func IsCreditCard(str string) bool
|
||||
func IsDNSName(str string) bool
|
||||
func IsDataURI(str string) bool
|
||||
func IsDialString(str string) bool
|
||||
func IsDivisibleBy(str, num string) bool
|
||||
func IsEmail(str string) bool
|
||||
func IsExistingEmail(email string) bool
|
||||
func IsFilePath(str string) (bool, int)
|
||||
func IsFloat(str string) bool
|
||||
func IsFullWidth(str string) bool
|
||||
func IsHalfWidth(str string) bool
|
||||
func IsHash(str string, algorithm string) bool
|
||||
func IsHexadecimal(str string) bool
|
||||
func IsHexcolor(str string) bool
|
||||
func IsHost(str string) bool
|
||||
func IsIP(str string) bool
|
||||
func IsIPv4(str string) bool
|
||||
func IsIPv6(str string) bool
|
||||
func IsISBN(str string, version int) bool
|
||||
func IsISBN10(str string) bool
|
||||
func IsISBN13(str string) bool
|
||||
func IsISO3166Alpha2(str string) bool
|
||||
func IsISO3166Alpha3(str string) bool
|
||||
func IsISO4217(str string) bool
|
||||
func IsISO693Alpha2(str string) bool
|
||||
func IsISO693Alpha3b(str string) bool
|
||||
func IsIn(str string, params ...string) bool
|
||||
func IsInRaw(str string, params ...string) bool
|
||||
func IsInt(str string) bool
|
||||
func IsJSON(str string) bool
|
||||
func IsLatitude(str string) bool
|
||||
func IsLongitude(str string) bool
|
||||
func IsLowerCase(str string) bool
|
||||
func IsMAC(str string) bool
|
||||
func IsMD4(str string) bool
|
||||
func IsMD5(str string) bool
|
||||
func IsMagnetURI(str string) bool
|
||||
func IsMongoID(str string) bool
|
||||
func IsMultibyte(str string) bool
|
||||
func IsNatural(value float64) bool
|
||||
func IsNegative(value float64) bool
|
||||
func IsNonNegative(value float64) bool
|
||||
func IsNonPositive(value float64) bool
|
||||
func IsNotNull(str string) bool
|
||||
func IsNull(str string) bool
|
||||
func IsNumeric(str string) bool
|
||||
func IsPort(str string) bool
|
||||
func IsPositive(value float64) bool
|
||||
func IsPrintableASCII(str string) bool
|
||||
func IsRFC3339(str string) bool
|
||||
func IsRFC3339WithoutZone(str string) bool
|
||||
func IsRGBcolor(str string) bool
|
||||
func IsRegex(str string) bool
|
||||
func IsRequestURI(rawurl string) bool
|
||||
func IsRequestURL(rawurl string) bool
|
||||
func IsRipeMD128(str string) bool
|
||||
func IsRipeMD160(str string) bool
|
||||
func IsRsaPub(str string, params ...string) bool
|
||||
func IsRsaPublicKey(str string, keylen int) bool
|
||||
func IsSHA1(str string) bool
|
||||
func IsSHA256(str string) bool
|
||||
func IsSHA384(str string) bool
|
||||
func IsSHA512(str string) bool
|
||||
func IsSSN(str string) bool
|
||||
func IsSemver(str string) bool
|
||||
func IsTiger128(str string) bool
|
||||
func IsTiger160(str string) bool
|
||||
func IsTiger192(str string) bool
|
||||
func IsTime(str string, format string) bool
|
||||
func IsType(v interface{}, params ...string) bool
|
||||
func IsURL(str string) bool
|
||||
func IsUTFDigit(str string) bool
|
||||
func IsUTFLetter(str string) bool
|
||||
func IsUTFLetterNumeric(str string) bool
|
||||
func IsUTFNumeric(str string) bool
|
||||
func IsUUID(str string) bool
|
||||
func IsUUIDv3(str string) bool
|
||||
func IsUUIDv4(str string) bool
|
||||
func IsUUIDv5(str string) bool
|
||||
func IsULID(str string) bool
|
||||
func IsUnixTime(str string) bool
|
||||
func IsUpperCase(str string) bool
|
||||
func IsVariableWidth(str string) bool
|
||||
func IsWhole(value float64) bool
|
||||
func LeftTrim(str, chars string) string
|
||||
func Map(array []interface{}, iterator ResultIterator) []interface{}
|
||||
func Matches(str, pattern string) bool
|
||||
func MaxStringLength(str string, params ...string) bool
|
||||
func MinStringLength(str string, params ...string) bool
|
||||
func NormalizeEmail(str string) (string, error)
|
||||
func PadBoth(str string, padStr string, padLen int) string
|
||||
func PadLeft(str string, padStr string, padLen int) string
|
||||
func PadRight(str string, padStr string, padLen int) string
|
||||
func PrependPathToErrors(err error, path string) error
|
||||
func Range(str string, params ...string) bool
|
||||
func RemoveTags(s string) string
|
||||
func ReplacePattern(str, pattern, replace string) string
|
||||
func Reverse(s string) string
|
||||
func RightTrim(str, chars string) string
|
||||
func RuneLength(str string, params ...string) bool
|
||||
func SafeFileName(str string) string
|
||||
func SetFieldsRequiredByDefault(value bool)
|
||||
func SetNilPtrAllowedByRequired(value bool)
|
||||
func Sign(value float64) float64
|
||||
func StringLength(str string, params ...string) bool
|
||||
func StringMatches(s string, params ...string) bool
|
||||
func StripLow(str string, keepNewLines bool) string
|
||||
func ToBoolean(str string) (bool, error)
|
||||
func ToFloat(str string) (float64, error)
|
||||
func ToInt(value interface{}) (res int64, err error)
|
||||
func ToJSON(obj interface{}) (string, error)
|
||||
func ToString(obj interface{}) string
|
||||
func Trim(str, chars string) string
|
||||
func Truncate(str string, length int, ending string) string
|
||||
func TruncatingErrorf(str string, args ...interface{}) error
|
||||
func UnderscoreToCamelCase(s string) string
|
||||
func ValidateMap(inputMap map[string]interface{}, validationMap map[string]interface{}) (bool, error)
|
||||
func ValidateStruct(s interface{}) (bool, error)
|
||||
func WhiteList(str, chars string) string
|
||||
type ConditionIterator
|
||||
type CustomTypeValidator
|
||||
type Error
|
||||
func (e Error) Error() string
|
||||
type Errors
|
||||
func (es Errors) Error() string
|
||||
func (es Errors) Errors() []error
|
||||
type ISO3166Entry
|
||||
type ISO693Entry
|
||||
type InterfaceParamValidator
|
||||
type Iterator
|
||||
type ParamValidator
|
||||
type ResultIterator
|
||||
type UnsupportedTypeError
|
||||
func (e *UnsupportedTypeError) Error() string
|
||||
type Validator
|
||||
```
|
||||
|
||||
#### Examples
|
||||
###### IsURL
|
||||
```go
|
||||
println(govalidator.IsURL(`http://user@pass:domain.com/path/page`))
|
||||
```
|
||||
###### IsType
|
||||
```go
|
||||
println(govalidator.IsType("Bob", "string"))
|
||||
println(govalidator.IsType(1, "int"))
|
||||
i := 1
|
||||
println(govalidator.IsType(&i, "*int"))
|
||||
```
|
||||
|
||||
IsType can be used through the tag `type` which is essential for map validation:
|
||||
```go
|
||||
type User struct {
|
||||
Name string `valid:"type(string)"`
|
||||
Age int `valid:"type(int)"`
|
||||
Meta interface{} `valid:"type(string)"`
|
||||
}
|
||||
result, err := govalidator.ValidateStruct(User{"Bob", 20, "meta"})
|
||||
if err != nil {
|
||||
println("error: " + err.Error())
|
||||
}
|
||||
println(result)
|
||||
```
|
||||
###### ToString
|
||||
```go
|
||||
type User struct {
|
||||
FirstName string
|
||||
LastName string
|
||||
}
|
||||
|
||||
str := govalidator.ToString(&User{"John", "Juan"})
|
||||
println(str)
|
||||
```
|
||||
###### Each, Map, Filter, Count for slices
|
||||
Each iterates over the slice/array and calls Iterator for every item
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5}
|
||||
var fn govalidator.Iterator = func(value interface{}, index int) {
|
||||
println(value.(int))
|
||||
}
|
||||
govalidator.Each(data, fn)
|
||||
```
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5}
|
||||
var fn govalidator.ResultIterator = func(value interface{}, index int) interface{} {
|
||||
return value.(int) * 3
|
||||
}
|
||||
_ = govalidator.Map(data, fn) // result = []interface{}{1, 6, 9, 12, 15}
|
||||
```
|
||||
```go
|
||||
data := []interface{}{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
|
||||
var fn govalidator.ConditionIterator = func(value interface{}, index int) bool {
|
||||
return value.(int)%2 == 0
|
||||
}
|
||||
_ = govalidator.Filter(data, fn) // result = []interface{}{2, 4, 6, 8, 10}
|
||||
_ = govalidator.Count(data, fn) // result = 5
|
||||
```
|
||||
###### ValidateStruct [#2](https://github.com/asaskevich/govalidator/pull/2)
|
||||
If you want to validate structs, you can use tag `valid` for any field in your structure. All validators used with this field in one tag are separated by comma. If you want to skip validation, place `-` in your tag. If you need a validator that is not on the list below, you can add it like this:
|
||||
```go
|
||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||
return str == "duck"
|
||||
})
|
||||
```
|
||||
For completely custom validators (interface-based), see below.
|
||||
|
||||
Here is a list of available validators for struct fields (validator - used function):
|
||||
```go
|
||||
"email": IsEmail,
|
||||
"url": IsURL,
|
||||
"dialstring": IsDialString,
|
||||
"requrl": IsRequestURL,
|
||||
"requri": IsRequestURI,
|
||||
"alpha": IsAlpha,
|
||||
"utfletter": IsUTFLetter,
|
||||
"alphanum": IsAlphanumeric,
|
||||
"utfletternum": IsUTFLetterNumeric,
|
||||
"numeric": IsNumeric,
|
||||
"utfnumeric": IsUTFNumeric,
|
||||
"utfdigit": IsUTFDigit,
|
||||
"hexadecimal": IsHexadecimal,
|
||||
"hexcolor": IsHexcolor,
|
||||
"rgbcolor": IsRGBcolor,
|
||||
"lowercase": IsLowerCase,
|
||||
"uppercase": IsUpperCase,
|
||||
"int": IsInt,
|
||||
"float": IsFloat,
|
||||
"null": IsNull,
|
||||
"uuid": IsUUID,
|
||||
"uuidv3": IsUUIDv3,
|
||||
"uuidv4": IsUUIDv4,
|
||||
"uuidv5": IsUUIDv5,
|
||||
"creditcard": IsCreditCard,
|
||||
"isbn10": IsISBN10,
|
||||
"isbn13": IsISBN13,
|
||||
"json": IsJSON,
|
||||
"multibyte": IsMultibyte,
|
||||
"ascii": IsASCII,
|
||||
"printableascii": IsPrintableASCII,
|
||||
"fullwidth": IsFullWidth,
|
||||
"halfwidth": IsHalfWidth,
|
||||
"variablewidth": IsVariableWidth,
|
||||
"base64": IsBase64,
|
||||
"datauri": IsDataURI,
|
||||
"ip": IsIP,
|
||||
"port": IsPort,
|
||||
"ipv4": IsIPv4,
|
||||
"ipv6": IsIPv6,
|
||||
"dns": IsDNSName,
|
||||
"host": IsHost,
|
||||
"mac": IsMAC,
|
||||
"latitude": IsLatitude,
|
||||
"longitude": IsLongitude,
|
||||
"ssn": IsSSN,
|
||||
"semver": IsSemver,
|
||||
"rfc3339": IsRFC3339,
|
||||
"rfc3339WithoutZone": IsRFC3339WithoutZone,
|
||||
"ISO3166Alpha2": IsISO3166Alpha2,
|
||||
"ISO3166Alpha3": IsISO3166Alpha3,
|
||||
"ulid": IsULID,
|
||||
```
|
||||
Validators with parameters
|
||||
|
||||
```go
|
||||
"range(min|max)": Range,
|
||||
"length(min|max)": ByteLength,
|
||||
"runelength(min|max)": RuneLength,
|
||||
"stringlength(min|max)": StringLength,
|
||||
"matches(pattern)": StringMatches,
|
||||
"in(string1|string2|...|stringN)": IsIn,
|
||||
"rsapub(keylength)" : IsRsaPub,
|
||||
"minstringlength(int): MinStringLength,
|
||||
"maxstringlength(int): MaxStringLength,
|
||||
```
|
||||
Validators with parameters for any type
|
||||
|
||||
```go
|
||||
"type(type)": IsType,
|
||||
```
|
||||
|
||||
And here is small example of usage:
|
||||
```go
|
||||
type Post struct {
|
||||
Title string `valid:"alphanum,required"`
|
||||
Message string `valid:"duck,ascii"`
|
||||
Message2 string `valid:"animal(dog)"`
|
||||
AuthorIP string `valid:"ipv4"`
|
||||
Date string `valid:"-"`
|
||||
}
|
||||
post := &Post{
|
||||
Title: "My Example Post",
|
||||
Message: "duck",
|
||||
Message2: "dog",
|
||||
AuthorIP: "123.234.54.3",
|
||||
}
|
||||
|
||||
// Add your own struct validation tags
|
||||
govalidator.TagMap["duck"] = govalidator.Validator(func(str string) bool {
|
||||
return str == "duck"
|
||||
})
|
||||
|
||||
// Add your own struct validation tags with parameter
|
||||
govalidator.ParamTagMap["animal"] = govalidator.ParamValidator(func(str string, params ...string) bool {
|
||||
species := params[0]
|
||||
return str == species
|
||||
})
|
||||
govalidator.ParamTagRegexMap["animal"] = regexp.MustCompile("^animal\\((\\w+)\\)$")
|
||||
|
||||
result, err := govalidator.ValidateStruct(post)
|
||||
if err != nil {
|
||||
println("error: " + err.Error())
|
||||
}
|
||||
println(result)
|
||||
```
|
||||
###### ValidateMap [#2](https://github.com/asaskevich/govalidator/pull/338)
|
||||
If you want to validate maps, you can use the map to be validated and a validation map that contain the same tags used in ValidateStruct, both maps have to be in the form `map[string]interface{}`
|
||||
|
||||
So here is small example of usage:
|
||||
```go
|
||||
var mapTemplate = map[string]interface{}{
|
||||
"name":"required,alpha",
|
||||
"family":"required,alpha",
|
||||
"email":"required,email",
|
||||
"cell-phone":"numeric",
|
||||
"address":map[string]interface{}{
|
||||
"line1":"required,alphanum",
|
||||
"line2":"alphanum",
|
||||
"postal-code":"numeric",
|
||||
},
|
||||
}
|
||||
|
||||
var inputMap = map[string]interface{}{
|
||||
"name":"Bob",
|
||||
"family":"Smith",
|
||||
"email":"foo@bar.baz",
|
||||
"address":map[string]interface{}{
|
||||
"line1":"",
|
||||
"line2":"",
|
||||
"postal-code":"",
|
||||
},
|
||||
}
|
||||
|
||||
result, err := govalidator.ValidateMap(inputMap, mapTemplate)
|
||||
if err != nil {
|
||||
println("error: " + err.Error())
|
||||
}
|
||||
println(result)
|
||||
```
|
||||
|
||||
###### WhiteList
|
||||
```go
|
||||
// Remove all characters from string ignoring characters between "a" and "z"
|
||||
println(govalidator.WhiteList("a3a43a5a4a3a2a23a4a5a4a3a4", "a-z") == "aaaaaaaaaaaa")
|
||||
```
|
||||
|
||||
###### Custom validation functions
|
||||
Custom validation using your own domain specific validators is also available - here's an example of how to use it:
|
||||
```go
|
||||
import "github.com/asaskevich/govalidator"
|
||||
|
||||
type CustomByteArray [6]byte // custom types are supported and can be validated
|
||||
|
||||
type StructWithCustomByteArray struct {
|
||||
ID CustomByteArray `valid:"customByteArrayValidator,customMinLengthValidator"` // multiple custom validators are possible as well and will be evaluated in sequence
|
||||
Email string `valid:"email"`
|
||||
CustomMinLength int `valid:"-"`
|
||||
}
|
||||
|
||||
govalidator.CustomTypeTagMap.Set("customByteArrayValidator", func(i interface{}, context interface{}) bool {
|
||||
switch v := context.(type) { // you can type switch on the context interface being validated
|
||||
case StructWithCustomByteArray:
|
||||
// you can check and validate against some other field in the context,
|
||||
// return early or not validate against the context at all – your choice
|
||||
case SomeOtherType:
|
||||
// ...
|
||||
default:
|
||||
// expecting some other type? Throw/panic here or continue
|
||||
}
|
||||
|
||||
switch v := i.(type) { // type switch on the struct field being validated
|
||||
case CustomByteArray:
|
||||
for _, e := range v { // this validator checks that the byte array is not empty, i.e. not all zeroes
|
||||
if e != 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
govalidator.CustomTypeTagMap.Set("customMinLengthValidator", func(i interface{}, context interface{}) bool {
|
||||
switch v := context.(type) { // this validates a field against the value in another field, i.e. dependent validation
|
||||
case StructWithCustomByteArray:
|
||||
return len(v.ID) >= v.CustomMinLength
|
||||
}
|
||||
return false
|
||||
})
|
||||
```
|
||||
|
||||
###### Loop over Error()
|
||||
By default .Error() returns all errors in a single String. To access each error you can do this:
|
||||
```go
|
||||
if err != nil {
|
||||
errs := err.(govalidator.Errors).Errors()
|
||||
for _, e := range errs {
|
||||
fmt.Println(e.Error())
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
###### Custom error messages
|
||||
Custom error messages are supported via annotations by adding the `~` separator - here's an example of how to use it:
|
||||
```go
|
||||
type Ticket struct {
|
||||
Id int64 `json:"id"`
|
||||
FirstName string `json:"firstname" valid:"required~First name is blank"`
|
||||
}
|
||||
```
|
||||
|
||||
#### Notes
|
||||
Documentation is available here: [godoc.org](https://godoc.org/github.com/asaskevich/govalidator).
|
||||
Full information about code coverage is also available here: [govalidator on gocover.io](http://gocover.io/github.com/asaskevich/govalidator).
|
||||
|
||||
#### Support
|
||||
If you do have a contribution to the package, feel free to create a Pull Request or an Issue.
|
||||
|
||||
#### What to contribute
|
||||
If you don't know what to do, there are some features and functions that need to be done
|
||||
|
||||
- [ ] Refactor code
|
||||
- [ ] Edit docs and [README](https://github.com/asaskevich/govalidator/README.md): spellcheck, grammar and typo check
|
||||
- [ ] Create actual list of contributors and projects that currently using this package
|
||||
- [ ] Resolve [issues and bugs](https://github.com/asaskevich/govalidator/issues)
|
||||
- [ ] Update actual [list of functions](https://github.com/asaskevich/govalidator#list-of-functions)
|
||||
- [ ] Update [list of validators](https://github.com/asaskevich/govalidator#validatestruct-2) that available for `ValidateStruct` and add new
|
||||
- [ ] Implement new validators: `IsFQDN`, `IsIMEI`, `IsPostalCode`, `IsISIN`, `IsISRC` etc
|
||||
- [x] Implement [validation by maps](https://github.com/asaskevich/govalidator/issues/224)
|
||||
- [ ] Implement fuzzing testing
|
||||
- [ ] Implement some struct/map/array utilities
|
||||
- [ ] Implement map/array validation
|
||||
- [ ] Implement benchmarking
|
||||
- [ ] Implement batch of examples
|
||||
- [ ] Look at forks for new features and fixes
|
||||
|
||||
#### Advice
|
||||
Feel free to create what you want, but keep in mind when you implement new features:
|
||||
- Code must be clear and readable, names of variables/constants clearly describes what they are doing
|
||||
- Public functions must be documented and described in source file and added to README.md to the list of available functions
|
||||
- There are must be unit-tests for any new functions and improvements
|
||||
|
||||
## Credits
|
||||
### Contributors
|
||||
|
||||
This project exists thanks to all the people who contribute. [[Contribute](CONTRIBUTING.md)].
|
||||
|
||||
#### Special thanks to [contributors](https://github.com/asaskevich/govalidator/graphs/contributors)
|
||||
* [Daniel Lohse](https://github.com/annismckenzie)
|
||||
* [Attila Oláh](https://github.com/attilaolah)
|
||||
* [Daniel Korner](https://github.com/Dadie)
|
||||
* [Steven Wilkin](https://github.com/stevenwilkin)
|
||||
* [Deiwin Sarjas](https://github.com/deiwin)
|
||||
* [Noah Shibley](https://github.com/slugmobile)
|
||||
* [Nathan Davies](https://github.com/nathj07)
|
||||
* [Matt Sanford](https://github.com/mzsanford)
|
||||
* [Simon ccl1115](https://github.com/ccl1115)
|
||||
|
||||
<a href="https://github.com/asaskevich/govalidator/graphs/contributors"><img src="https://opencollective.com/govalidator/contributors.svg?width=890" /></a>
|
||||
|
||||
|
||||
### Backers
|
||||
|
||||
Thank you to all our backers! 🙏 [[Become a backer](https://opencollective.com/govalidator#backer)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator#backers" target="_blank"><img src="https://opencollective.com/govalidator/backers.svg?width=890"></a>
|
||||
|
||||
|
||||
### Sponsors
|
||||
|
||||
Support this project by becoming a sponsor. Your logo will show up here with a link to your website. [[Become a sponsor](https://opencollective.com/govalidator#sponsor)]
|
||||
|
||||
<a href="https://opencollective.com/govalidator/sponsor/0/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/0/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/1/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/1/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/2/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/2/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/3/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/3/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/4/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/4/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/5/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/5/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/6/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/6/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/7/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/7/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/8/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/8/avatar.svg"></a>
|
||||
<a href="https://opencollective.com/govalidator/sponsor/9/website" target="_blank"><img src="https://opencollective.com/govalidator/sponsor/9/avatar.svg"></a>
|
||||
|
||||
|
||||
|
||||
|
||||
## License
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fasaskevich%2Fgovalidator?ref=badge_large)
|
87
vendor/github.com/asaskevich/govalidator/arrays.go
generated
vendored
87
vendor/github.com/asaskevich/govalidator/arrays.go
generated
vendored
@@ -1,87 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
// Iterator is the function that accepts element of slice/array and its index
|
||||
type Iterator func(interface{}, int)
|
||||
|
||||
// ResultIterator is the function that accepts element of slice/array and its index and returns any result
|
||||
type ResultIterator func(interface{}, int) interface{}
|
||||
|
||||
// ConditionIterator is the function that accepts element of slice/array and its index and returns boolean
|
||||
type ConditionIterator func(interface{}, int) bool
|
||||
|
||||
// ReduceIterator is the function that accepts two element of slice/array and returns result of merging those values
|
||||
type ReduceIterator func(interface{}, interface{}) interface{}
|
||||
|
||||
// Some validates that any item of array corresponds to ConditionIterator. Returns boolean.
|
||||
func Some(array []interface{}, iterator ConditionIterator) bool {
|
||||
res := false
|
||||
for index, data := range array {
|
||||
res = res || iterator(data, index)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Every validates that every item of array corresponds to ConditionIterator. Returns boolean.
|
||||
func Every(array []interface{}, iterator ConditionIterator) bool {
|
||||
res := true
|
||||
for index, data := range array {
|
||||
res = res && iterator(data, index)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// Reduce boils down a list of values into a single value by ReduceIterator
|
||||
func Reduce(array []interface{}, iterator ReduceIterator, initialValue interface{}) interface{} {
|
||||
for _, data := range array {
|
||||
initialValue = iterator(initialValue, data)
|
||||
}
|
||||
return initialValue
|
||||
}
|
||||
|
||||
// Each iterates over the slice and apply Iterator to every item
|
||||
func Each(array []interface{}, iterator Iterator) {
|
||||
for index, data := range array {
|
||||
iterator(data, index)
|
||||
}
|
||||
}
|
||||
|
||||
// Map iterates over the slice and apply ResultIterator to every item. Returns new slice as a result.
|
||||
func Map(array []interface{}, iterator ResultIterator) []interface{} {
|
||||
var result = make([]interface{}, len(array))
|
||||
for index, data := range array {
|
||||
result[index] = iterator(data, index)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Find iterates over the slice and apply ConditionIterator to every item. Returns first item that meet ConditionIterator or nil otherwise.
|
||||
func Find(array []interface{}, iterator ConditionIterator) interface{} {
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
return data
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Filter iterates over the slice and apply ConditionIterator to every item. Returns new slice.
|
||||
func Filter(array []interface{}, iterator ConditionIterator) []interface{} {
|
||||
var result = make([]interface{}, 0)
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
result = append(result, data)
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// Count iterates over the slice and apply ConditionIterator to every item. Returns count of items that meets ConditionIterator.
|
||||
func Count(array []interface{}, iterator ConditionIterator) int {
|
||||
count := 0
|
||||
for index, data := range array {
|
||||
if iterator(data, index) {
|
||||
count = count + 1
|
||||
}
|
||||
}
|
||||
return count
|
||||
}
|
81
vendor/github.com/asaskevich/govalidator/converter.go
generated
vendored
81
vendor/github.com/asaskevich/govalidator/converter.go
generated
vendored
@@ -1,81 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ToString convert the input to a string.
|
||||
func ToString(obj interface{}) string {
|
||||
res := fmt.Sprintf("%v", obj)
|
||||
return res
|
||||
}
|
||||
|
||||
// ToJSON convert the input to a valid JSON string
|
||||
func ToJSON(obj interface{}) (string, error) {
|
||||
res, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
res = []byte("")
|
||||
}
|
||||
return string(res), err
|
||||
}
|
||||
|
||||
// ToFloat convert the input string to a float, or 0.0 if the input is not a float.
|
||||
func ToFloat(value interface{}) (res float64, err error) {
|
||||
val := reflect.ValueOf(value)
|
||||
|
||||
switch value.(type) {
|
||||
case int, int8, int16, int32, int64:
|
||||
res = float64(val.Int())
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
res = float64(val.Uint())
|
||||
case float32, float64:
|
||||
res = val.Float()
|
||||
case string:
|
||||
res, err = strconv.ParseFloat(val.String(), 64)
|
||||
if err != nil {
|
||||
res = 0
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("ToInt: unknown interface type %T", value)
|
||||
res = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ToInt convert the input string or any int type to an integer type 64, or 0 if the input is not an integer.
|
||||
func ToInt(value interface{}) (res int64, err error) {
|
||||
val := reflect.ValueOf(value)
|
||||
|
||||
switch value.(type) {
|
||||
case int, int8, int16, int32, int64:
|
||||
res = val.Int()
|
||||
case uint, uint8, uint16, uint32, uint64:
|
||||
res = int64(val.Uint())
|
||||
case float32, float64:
|
||||
res = int64(val.Float())
|
||||
case string:
|
||||
if IsInt(val.String()) {
|
||||
res, err = strconv.ParseInt(val.String(), 0, 64)
|
||||
if err != nil {
|
||||
res = 0
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("ToInt: invalid numeric format %g", value)
|
||||
res = 0
|
||||
}
|
||||
default:
|
||||
err = fmt.Errorf("ToInt: unknown interface type %T", value)
|
||||
res = 0
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ToBoolean convert the input string to a boolean.
|
||||
func ToBoolean(str string) (bool, error) {
|
||||
return strconv.ParseBool(str)
|
||||
}
|
3
vendor/github.com/asaskevich/govalidator/doc.go
generated
vendored
3
vendor/github.com/asaskevich/govalidator/doc.go
generated
vendored
@@ -1,3 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
// A package of validators and sanitizers for strings, structures and collections.
|
47
vendor/github.com/asaskevich/govalidator/error.go
generated
vendored
47
vendor/github.com/asaskevich/govalidator/error.go
generated
vendored
@@ -1,47 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Errors is an array of multiple errors and conforms to the error interface.
|
||||
type Errors []error
|
||||
|
||||
// Errors returns itself.
|
||||
func (es Errors) Errors() []error {
|
||||
return es
|
||||
}
|
||||
|
||||
func (es Errors) Error() string {
|
||||
var errs []string
|
||||
for _, e := range es {
|
||||
errs = append(errs, e.Error())
|
||||
}
|
||||
sort.Strings(errs)
|
||||
return strings.Join(errs, ";")
|
||||
}
|
||||
|
||||
// Error encapsulates a name, an error and whether there's a custom error message or not.
|
||||
type Error struct {
|
||||
Name string
|
||||
Err error
|
||||
CustomErrorMessageExists bool
|
||||
|
||||
// Validator indicates the name of the validator that failed
|
||||
Validator string
|
||||
Path []string
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
if e.CustomErrorMessageExists {
|
||||
return e.Err.Error()
|
||||
}
|
||||
|
||||
errName := e.Name
|
||||
if len(e.Path) > 0 {
|
||||
errName = strings.Join(append(e.Path, e.Name), ".")
|
||||
}
|
||||
|
||||
return errName + ": " + e.Err.Error()
|
||||
}
|
100
vendor/github.com/asaskevich/govalidator/numerics.go
generated
vendored
100
vendor/github.com/asaskevich/govalidator/numerics.go
generated
vendored
@@ -1,100 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"math"
|
||||
)
|
||||
|
||||
// Abs returns absolute value of number
|
||||
func Abs(value float64) float64 {
|
||||
return math.Abs(value)
|
||||
}
|
||||
|
||||
// Sign returns signum of number: 1 in case of value > 0, -1 in case of value < 0, 0 otherwise
|
||||
func Sign(value float64) float64 {
|
||||
if value > 0 {
|
||||
return 1
|
||||
} else if value < 0 {
|
||||
return -1
|
||||
} else {
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// IsNegative returns true if value < 0
|
||||
func IsNegative(value float64) bool {
|
||||
return value < 0
|
||||
}
|
||||
|
||||
// IsPositive returns true if value > 0
|
||||
func IsPositive(value float64) bool {
|
||||
return value > 0
|
||||
}
|
||||
|
||||
// IsNonNegative returns true if value >= 0
|
||||
func IsNonNegative(value float64) bool {
|
||||
return value >= 0
|
||||
}
|
||||
|
||||
// IsNonPositive returns true if value <= 0
|
||||
func IsNonPositive(value float64) bool {
|
||||
return value <= 0
|
||||
}
|
||||
|
||||
// InRangeInt returns true if value lies between left and right border
|
||||
func InRangeInt(value, left, right interface{}) bool {
|
||||
value64, _ := ToInt(value)
|
||||
left64, _ := ToInt(left)
|
||||
right64, _ := ToInt(right)
|
||||
if left64 > right64 {
|
||||
left64, right64 = right64, left64
|
||||
}
|
||||
return value64 >= left64 && value64 <= right64
|
||||
}
|
||||
|
||||
// InRangeFloat32 returns true if value lies between left and right border
|
||||
func InRangeFloat32(value, left, right float32) bool {
|
||||
if left > right {
|
||||
left, right = right, left
|
||||
}
|
||||
return value >= left && value <= right
|
||||
}
|
||||
|
||||
// InRangeFloat64 returns true if value lies between left and right border
|
||||
func InRangeFloat64(value, left, right float64) bool {
|
||||
if left > right {
|
||||
left, right = right, left
|
||||
}
|
||||
return value >= left && value <= right
|
||||
}
|
||||
|
||||
// InRange returns true if value lies between left and right border, generic type to handle int, float32, float64 and string.
|
||||
// All types must the same type.
|
||||
// False if value doesn't lie in range or if it incompatible or not comparable
|
||||
func InRange(value interface{}, left interface{}, right interface{}) bool {
|
||||
switch value.(type) {
|
||||
case int:
|
||||
intValue, _ := ToInt(value)
|
||||
intLeft, _ := ToInt(left)
|
||||
intRight, _ := ToInt(right)
|
||||
return InRangeInt(intValue, intLeft, intRight)
|
||||
case float32, float64:
|
||||
intValue, _ := ToFloat(value)
|
||||
intLeft, _ := ToFloat(left)
|
||||
intRight, _ := ToFloat(right)
|
||||
return InRangeFloat64(intValue, intLeft, intRight)
|
||||
case string:
|
||||
return value.(string) >= left.(string) && value.(string) <= right.(string)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// IsWhole returns true if value is whole number
|
||||
func IsWhole(value float64) bool {
|
||||
return math.Remainder(value, 1) == 0
|
||||
}
|
||||
|
||||
// IsNatural returns true if value is natural number (positive and whole)
|
||||
func IsNatural(value float64) bool {
|
||||
return IsWhole(value) && IsPositive(value)
|
||||
}
|
113
vendor/github.com/asaskevich/govalidator/patterns.go
generated
vendored
113
vendor/github.com/asaskevich/govalidator/patterns.go
generated
vendored
@@ -1,113 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import "regexp"
|
||||
|
||||
// Basic regular expressions for validating strings
|
||||
const (
|
||||
Email string = "^(((([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|((\\x22)((((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(([\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(\\([\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(((\\x20|\\x09)*(\\x0d\\x0a))?(\\x20|\\x09)+)?(\\x22)))@((([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|\\.|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])([a-zA-Z]|\\d|-|_|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*([a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$"
|
||||
CreditCard string = "^(?:4[0-9]{12}(?:[0-9]{3})?|5[1-5][0-9]{14}|(222[1-9]|22[3-9][0-9]|2[3-6][0-9]{2}|27[01][0-9]|2720)[0-9]{12}|6(?:011|5[0-9][0-9])[0-9]{12}|3[47][0-9]{13}|3(?:0[0-5]|[68][0-9])[0-9]{11}|(?:2131|1800|35\\d{3})\\d{11}|6[27][0-9]{14})$"
|
||||
ISBN10 string = "^(?:[0-9]{9}X|[0-9]{10})$"
|
||||
ISBN13 string = "^(?:[0-9]{13})$"
|
||||
UUID3 string = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||
UUID4 string = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||
UUID5 string = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$"
|
||||
UUID string = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$"
|
||||
Alpha string = "^[a-zA-Z]+$"
|
||||
Alphanumeric string = "^[a-zA-Z0-9]+$"
|
||||
Numeric string = "^[0-9]+$"
|
||||
Int string = "^(?:[-+]?(?:0|[1-9][0-9]*))$"
|
||||
Float string = "^(?:[-+]?(?:[0-9]+))?(?:\\.[0-9]*)?(?:[eE][\\+\\-]?(?:[0-9]+))?$"
|
||||
Hexadecimal string = "^[0-9a-fA-F]+$"
|
||||
Hexcolor string = "^#?([0-9a-fA-F]{3}|[0-9a-fA-F]{6})$"
|
||||
RGBcolor string = "^rgb\\(\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*\\)$"
|
||||
ASCII string = "^[\x00-\x7F]+$"
|
||||
Multibyte string = "[^\x00-\x7F]"
|
||||
FullWidth string = "[^\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||
HalfWidth string = "[\u0020-\u007E\uFF61-\uFF9F\uFFA0-\uFFDC\uFFE8-\uFFEE0-9a-zA-Z]"
|
||||
Base64 string = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$"
|
||||
PrintableASCII string = "^[\x20-\x7E]+$"
|
||||
DataURI string = "^data:.+\\/(.+);base64$"
|
||||
MagnetURI string = "^magnet:\\?xt=urn:[a-zA-Z0-9]+:[a-zA-Z0-9]{32,40}&dn=.+&tr=.+$"
|
||||
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
|
||||
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
|
||||
DNSName string = `^([a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*[\._]?$`
|
||||
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
|
||||
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
|
||||
URLUsername string = `(\S+(:\S*)?@)`
|
||||
URLPath string = `((\/|\?|#)[^\s]*)`
|
||||
URLPort string = `(:(\d{1,5}))`
|
||||
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3]|24\d|25[0-5])(\.(\d{1,2}|1\d\d|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-5]))`
|
||||
URLSubdomain string = `((www\.)|([a-zA-Z0-9]+([-_\.]?[a-zA-Z0-9])*[a-zA-Z0-9]\.[a-zA-Z0-9]+))`
|
||||
URL = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-_]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))\.?` + URLPort + `?` + URLPath + `?$`
|
||||
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
|
||||
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
||||
UnixPath string = `^(/[^/\x00]*)+/?$`
|
||||
WinARPath string = `^(?:(?:[a-zA-Z]:|\\\\[a-z0-9_.$●-]+\\[a-z0-9_.$●-]+)\\|\\?[^\\/:*?"<>|\r\n]+\\?)(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
|
||||
UnixARPath string = `^((\.{0,2}/)?([^/\x00]*))+/?$`
|
||||
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
|
||||
tagName string = "valid"
|
||||
hasLowerCase string = ".*[[:lower:]]"
|
||||
hasUpperCase string = ".*[[:upper:]]"
|
||||
hasWhitespace string = ".*[[:space:]]"
|
||||
hasWhitespaceOnly string = "^[[:space:]]+$"
|
||||
IMEI string = "^[0-9a-f]{14}$|^\\d{15}$|^\\d{18}$"
|
||||
IMSI string = "^\\d{14,15}$"
|
||||
E164 string = `^\+?[1-9]\d{1,14}$`
|
||||
)
|
||||
|
||||
// Used by IsFilePath func
|
||||
const (
|
||||
// Unknown is unresolved OS type
|
||||
Unknown = iota
|
||||
// Win is Windows type
|
||||
Win
|
||||
// Unix is *nix OS types
|
||||
Unix
|
||||
)
|
||||
|
||||
var (
|
||||
userRegexp = regexp.MustCompile("^[a-zA-Z0-9!#$%&'*+/=?^_`{|}~.-]+$")
|
||||
hostRegexp = regexp.MustCompile("^[^\\s]+\\.[^\\s]+$")
|
||||
userDotRegexp = regexp.MustCompile("(^[.]{1})|([.]{1}$)|([.]{2,})")
|
||||
rxEmail = regexp.MustCompile(Email)
|
||||
rxCreditCard = regexp.MustCompile(CreditCard)
|
||||
rxISBN10 = regexp.MustCompile(ISBN10)
|
||||
rxISBN13 = regexp.MustCompile(ISBN13)
|
||||
rxUUID3 = regexp.MustCompile(UUID3)
|
||||
rxUUID4 = regexp.MustCompile(UUID4)
|
||||
rxUUID5 = regexp.MustCompile(UUID5)
|
||||
rxUUID = regexp.MustCompile(UUID)
|
||||
rxAlpha = regexp.MustCompile(Alpha)
|
||||
rxAlphanumeric = regexp.MustCompile(Alphanumeric)
|
||||
rxNumeric = regexp.MustCompile(Numeric)
|
||||
rxInt = regexp.MustCompile(Int)
|
||||
rxFloat = regexp.MustCompile(Float)
|
||||
rxHexadecimal = regexp.MustCompile(Hexadecimal)
|
||||
rxHexcolor = regexp.MustCompile(Hexcolor)
|
||||
rxRGBcolor = regexp.MustCompile(RGBcolor)
|
||||
rxASCII = regexp.MustCompile(ASCII)
|
||||
rxPrintableASCII = regexp.MustCompile(PrintableASCII)
|
||||
rxMultibyte = regexp.MustCompile(Multibyte)
|
||||
rxFullWidth = regexp.MustCompile(FullWidth)
|
||||
rxHalfWidth = regexp.MustCompile(HalfWidth)
|
||||
rxBase64 = regexp.MustCompile(Base64)
|
||||
rxDataURI = regexp.MustCompile(DataURI)
|
||||
rxMagnetURI = regexp.MustCompile(MagnetURI)
|
||||
rxLatitude = regexp.MustCompile(Latitude)
|
||||
rxLongitude = regexp.MustCompile(Longitude)
|
||||
rxDNSName = regexp.MustCompile(DNSName)
|
||||
rxURL = regexp.MustCompile(URL)
|
||||
rxSSN = regexp.MustCompile(SSN)
|
||||
rxWinPath = regexp.MustCompile(WinPath)
|
||||
rxUnixPath = regexp.MustCompile(UnixPath)
|
||||
rxARWinPath = regexp.MustCompile(WinARPath)
|
||||
rxARUnixPath = regexp.MustCompile(UnixARPath)
|
||||
rxSemver = regexp.MustCompile(Semver)
|
||||
rxHasLowerCase = regexp.MustCompile(hasLowerCase)
|
||||
rxHasUpperCase = regexp.MustCompile(hasUpperCase)
|
||||
rxHasWhitespace = regexp.MustCompile(hasWhitespace)
|
||||
rxHasWhitespaceOnly = regexp.MustCompile(hasWhitespaceOnly)
|
||||
rxIMEI = regexp.MustCompile(IMEI)
|
||||
rxIMSI = regexp.MustCompile(IMSI)
|
||||
rxE164 = regexp.MustCompile(E164)
|
||||
)
|
656
vendor/github.com/asaskevich/govalidator/types.go
generated
vendored
656
vendor/github.com/asaskevich/govalidator/types.go
generated
vendored
@@ -1,656 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Validator is a wrapper for a validator function that returns bool and accepts string.
|
||||
type Validator func(str string) bool
|
||||
|
||||
// CustomTypeValidator is a wrapper for validator functions that returns bool and accepts any type.
|
||||
// The second parameter should be the context (in the case of validating a struct: the whole object being validated).
|
||||
type CustomTypeValidator func(i interface{}, o interface{}) bool
|
||||
|
||||
// ParamValidator is a wrapper for validator functions that accept additional parameters.
|
||||
type ParamValidator func(str string, params ...string) bool
|
||||
|
||||
// InterfaceParamValidator is a wrapper for functions that accept variants parameters for an interface value
|
||||
type InterfaceParamValidator func(in interface{}, params ...string) bool
|
||||
type tagOptionsMap map[string]tagOption
|
||||
|
||||
func (t tagOptionsMap) orderedKeys() []string {
|
||||
var keys []string
|
||||
for k := range t {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
|
||||
sort.Slice(keys, func(a, b int) bool {
|
||||
return t[keys[a]].order < t[keys[b]].order
|
||||
})
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
type tagOption struct {
|
||||
name string
|
||||
customErrorMessage string
|
||||
order int
|
||||
}
|
||||
|
||||
// UnsupportedTypeError is a wrapper for reflect.Type
|
||||
type UnsupportedTypeError struct {
|
||||
Type reflect.Type
|
||||
}
|
||||
|
||||
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
|
||||
// It implements the methods to sort by string.
|
||||
type stringValues []reflect.Value
|
||||
|
||||
// InterfaceParamTagMap is a map of functions accept variants parameters for an interface value
|
||||
var InterfaceParamTagMap = map[string]InterfaceParamValidator{
|
||||
"type": IsType,
|
||||
}
|
||||
|
||||
// InterfaceParamTagRegexMap maps interface param tags to their respective regexes.
|
||||
var InterfaceParamTagRegexMap = map[string]*regexp.Regexp{
|
||||
"type": regexp.MustCompile(`^type\((.*)\)$`),
|
||||
}
|
||||
|
||||
// ParamTagMap is a map of functions accept variants parameters
|
||||
var ParamTagMap = map[string]ParamValidator{
|
||||
"length": ByteLength,
|
||||
"range": Range,
|
||||
"runelength": RuneLength,
|
||||
"stringlength": StringLength,
|
||||
"matches": StringMatches,
|
||||
"in": IsInRaw,
|
||||
"rsapub": IsRsaPub,
|
||||
"minstringlength": MinStringLength,
|
||||
"maxstringlength": MaxStringLength,
|
||||
}
|
||||
|
||||
// ParamTagRegexMap maps param tags to their respective regexes.
|
||||
var ParamTagRegexMap = map[string]*regexp.Regexp{
|
||||
"range": regexp.MustCompile("^range\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"runelength": regexp.MustCompile("^runelength\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
|
||||
"in": regexp.MustCompile(`^in\((.*)\)`),
|
||||
"matches": regexp.MustCompile(`^matches\((.+)\)$`),
|
||||
"rsapub": regexp.MustCompile("^rsapub\\((\\d+)\\)$"),
|
||||
"minstringlength": regexp.MustCompile("^minstringlength\\((\\d+)\\)$"),
|
||||
"maxstringlength": regexp.MustCompile("^maxstringlength\\((\\d+)\\)$"),
|
||||
}
|
||||
|
||||
type customTypeTagMap struct {
|
||||
validators map[string]CustomTypeValidator
|
||||
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
func (tm *customTypeTagMap) Get(name string) (CustomTypeValidator, bool) {
|
||||
tm.RLock()
|
||||
defer tm.RUnlock()
|
||||
v, ok := tm.validators[name]
|
||||
return v, ok
|
||||
}
|
||||
|
||||
func (tm *customTypeTagMap) Set(name string, ctv CustomTypeValidator) {
|
||||
tm.Lock()
|
||||
defer tm.Unlock()
|
||||
tm.validators[name] = ctv
|
||||
}
|
||||
|
||||
// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
|
||||
// Use this to validate compound or custom types that need to be handled as a whole, e.g.
|
||||
// `type UUID [16]byte` (this would be handled as an array of bytes).
|
||||
var CustomTypeTagMap = &customTypeTagMap{validators: make(map[string]CustomTypeValidator)}
|
||||
|
||||
// TagMap is a map of functions, that can be used as tags for ValidateStruct function.
|
||||
var TagMap = map[string]Validator{
|
||||
"email": IsEmail,
|
||||
"url": IsURL,
|
||||
"dialstring": IsDialString,
|
||||
"requrl": IsRequestURL,
|
||||
"requri": IsRequestURI,
|
||||
"alpha": IsAlpha,
|
||||
"utfletter": IsUTFLetter,
|
||||
"alphanum": IsAlphanumeric,
|
||||
"utfletternum": IsUTFLetterNumeric,
|
||||
"numeric": IsNumeric,
|
||||
"utfnumeric": IsUTFNumeric,
|
||||
"utfdigit": IsUTFDigit,
|
||||
"hexadecimal": IsHexadecimal,
|
||||
"hexcolor": IsHexcolor,
|
||||
"rgbcolor": IsRGBcolor,
|
||||
"lowercase": IsLowerCase,
|
||||
"uppercase": IsUpperCase,
|
||||
"int": IsInt,
|
||||
"float": IsFloat,
|
||||
"null": IsNull,
|
||||
"notnull": IsNotNull,
|
||||
"uuid": IsUUID,
|
||||
"uuidv3": IsUUIDv3,
|
||||
"uuidv4": IsUUIDv4,
|
||||
"uuidv5": IsUUIDv5,
|
||||
"creditcard": IsCreditCard,
|
||||
"isbn10": IsISBN10,
|
||||
"isbn13": IsISBN13,
|
||||
"json": IsJSON,
|
||||
"multibyte": IsMultibyte,
|
||||
"ascii": IsASCII,
|
||||
"printableascii": IsPrintableASCII,
|
||||
"fullwidth": IsFullWidth,
|
||||
"halfwidth": IsHalfWidth,
|
||||
"variablewidth": IsVariableWidth,
|
||||
"base64": IsBase64,
|
||||
"datauri": IsDataURI,
|
||||
"ip": IsIP,
|
||||
"port": IsPort,
|
||||
"ipv4": IsIPv4,
|
||||
"ipv6": IsIPv6,
|
||||
"dns": IsDNSName,
|
||||
"host": IsHost,
|
||||
"mac": IsMAC,
|
||||
"latitude": IsLatitude,
|
||||
"longitude": IsLongitude,
|
||||
"ssn": IsSSN,
|
||||
"semver": IsSemver,
|
||||
"rfc3339": IsRFC3339,
|
||||
"rfc3339WithoutZone": IsRFC3339WithoutZone,
|
||||
"ISO3166Alpha2": IsISO3166Alpha2,
|
||||
"ISO3166Alpha3": IsISO3166Alpha3,
|
||||
"ISO4217": IsISO4217,
|
||||
"IMEI": IsIMEI,
|
||||
"ulid": IsULID,
|
||||
}
|
||||
|
||||
// ISO3166Entry stores country codes
|
||||
type ISO3166Entry struct {
|
||||
EnglishShortName string
|
||||
FrenchShortName string
|
||||
Alpha2Code string
|
||||
Alpha3Code string
|
||||
Numeric string
|
||||
}
|
||||
|
||||
//ISO3166List based on https://www.iso.org/obp/ui/#search/code/ Code Type "Officially Assigned Codes"
|
||||
var ISO3166List = []ISO3166Entry{
|
||||
{"Afghanistan", "Afghanistan (l')", "AF", "AFG", "004"},
|
||||
{"Albania", "Albanie (l')", "AL", "ALB", "008"},
|
||||
{"Antarctica", "Antarctique (l')", "AQ", "ATA", "010"},
|
||||
{"Algeria", "Algérie (l')", "DZ", "DZA", "012"},
|
||||
{"American Samoa", "Samoa américaines (les)", "AS", "ASM", "016"},
|
||||
{"Andorra", "Andorre (l')", "AD", "AND", "020"},
|
||||
{"Angola", "Angola (l')", "AO", "AGO", "024"},
|
||||
{"Antigua and Barbuda", "Antigua-et-Barbuda", "AG", "ATG", "028"},
|
||||
{"Azerbaijan", "Azerbaïdjan (l')", "AZ", "AZE", "031"},
|
||||
{"Argentina", "Argentine (l')", "AR", "ARG", "032"},
|
||||
{"Australia", "Australie (l')", "AU", "AUS", "036"},
|
||||
{"Austria", "Autriche (l')", "AT", "AUT", "040"},
|
||||
{"Bahamas (the)", "Bahamas (les)", "BS", "BHS", "044"},
|
||||
{"Bahrain", "Bahreïn", "BH", "BHR", "048"},
|
||||
{"Bangladesh", "Bangladesh (le)", "BD", "BGD", "050"},
|
||||
{"Armenia", "Arménie (l')", "AM", "ARM", "051"},
|
||||
{"Barbados", "Barbade (la)", "BB", "BRB", "052"},
|
||||
{"Belgium", "Belgique (la)", "BE", "BEL", "056"},
|
||||
{"Bermuda", "Bermudes (les)", "BM", "BMU", "060"},
|
||||
{"Bhutan", "Bhoutan (le)", "BT", "BTN", "064"},
|
||||
{"Bolivia (Plurinational State of)", "Bolivie (État plurinational de)", "BO", "BOL", "068"},
|
||||
{"Bosnia and Herzegovina", "Bosnie-Herzégovine (la)", "BA", "BIH", "070"},
|
||||
{"Botswana", "Botswana (le)", "BW", "BWA", "072"},
|
||||
{"Bouvet Island", "Bouvet (l'Île)", "BV", "BVT", "074"},
|
||||
{"Brazil", "Brésil (le)", "BR", "BRA", "076"},
|
||||
{"Belize", "Belize (le)", "BZ", "BLZ", "084"},
|
||||
{"British Indian Ocean Territory (the)", "Indien (le Territoire britannique de l'océan)", "IO", "IOT", "086"},
|
||||
{"Solomon Islands", "Salomon (Îles)", "SB", "SLB", "090"},
|
||||
{"Virgin Islands (British)", "Vierges britanniques (les Îles)", "VG", "VGB", "092"},
|
||||
{"Brunei Darussalam", "Brunéi Darussalam (le)", "BN", "BRN", "096"},
|
||||
{"Bulgaria", "Bulgarie (la)", "BG", "BGR", "100"},
|
||||
{"Myanmar", "Myanmar (le)", "MM", "MMR", "104"},
|
||||
{"Burundi", "Burundi (le)", "BI", "BDI", "108"},
|
||||
{"Belarus", "Bélarus (le)", "BY", "BLR", "112"},
|
||||
{"Cambodia", "Cambodge (le)", "KH", "KHM", "116"},
|
||||
{"Cameroon", "Cameroun (le)", "CM", "CMR", "120"},
|
||||
{"Canada", "Canada (le)", "CA", "CAN", "124"},
|
||||
{"Cabo Verde", "Cabo Verde", "CV", "CPV", "132"},
|
||||
{"Cayman Islands (the)", "Caïmans (les Îles)", "KY", "CYM", "136"},
|
||||
{"Central African Republic (the)", "République centrafricaine (la)", "CF", "CAF", "140"},
|
||||
{"Sri Lanka", "Sri Lanka", "LK", "LKA", "144"},
|
||||
{"Chad", "Tchad (le)", "TD", "TCD", "148"},
|
||||
{"Chile", "Chili (le)", "CL", "CHL", "152"},
|
||||
{"China", "Chine (la)", "CN", "CHN", "156"},
|
||||
{"Taiwan (Province of China)", "Taïwan (Province de Chine)", "TW", "TWN", "158"},
|
||||
{"Christmas Island", "Christmas (l'Île)", "CX", "CXR", "162"},
|
||||
{"Cocos (Keeling) Islands (the)", "Cocos (les Îles)/ Keeling (les Îles)", "CC", "CCK", "166"},
|
||||
{"Colombia", "Colombie (la)", "CO", "COL", "170"},
|
||||
{"Comoros (the)", "Comores (les)", "KM", "COM", "174"},
|
||||
{"Mayotte", "Mayotte", "YT", "MYT", "175"},
|
||||
{"Congo (the)", "Congo (le)", "CG", "COG", "178"},
|
||||
{"Congo (the Democratic Republic of the)", "Congo (la République démocratique du)", "CD", "COD", "180"},
|
||||
{"Cook Islands (the)", "Cook (les Îles)", "CK", "COK", "184"},
|
||||
{"Costa Rica", "Costa Rica (le)", "CR", "CRI", "188"},
|
||||
{"Croatia", "Croatie (la)", "HR", "HRV", "191"},
|
||||
{"Cuba", "Cuba", "CU", "CUB", "192"},
|
||||
{"Cyprus", "Chypre", "CY", "CYP", "196"},
|
||||
{"Czech Republic (the)", "tchèque (la République)", "CZ", "CZE", "203"},
|
||||
{"Benin", "Bénin (le)", "BJ", "BEN", "204"},
|
||||
{"Denmark", "Danemark (le)", "DK", "DNK", "208"},
|
||||
{"Dominica", "Dominique (la)", "DM", "DMA", "212"},
|
||||
{"Dominican Republic (the)", "dominicaine (la République)", "DO", "DOM", "214"},
|
||||
{"Ecuador", "Équateur (l')", "EC", "ECU", "218"},
|
||||
{"El Salvador", "El Salvador", "SV", "SLV", "222"},
|
||||
{"Equatorial Guinea", "Guinée équatoriale (la)", "GQ", "GNQ", "226"},
|
||||
{"Ethiopia", "Éthiopie (l')", "ET", "ETH", "231"},
|
||||
{"Eritrea", "Érythrée (l')", "ER", "ERI", "232"},
|
||||
{"Estonia", "Estonie (l')", "EE", "EST", "233"},
|
||||
{"Faroe Islands (the)", "Féroé (les Îles)", "FO", "FRO", "234"},
|
||||
{"Falkland Islands (the) [Malvinas]", "Falkland (les Îles)/Malouines (les Îles)", "FK", "FLK", "238"},
|
||||
{"South Georgia and the South Sandwich Islands", "Géorgie du Sud-et-les Îles Sandwich du Sud (la)", "GS", "SGS", "239"},
|
||||
{"Fiji", "Fidji (les)", "FJ", "FJI", "242"},
|
||||
{"Finland", "Finlande (la)", "FI", "FIN", "246"},
|
||||
{"Åland Islands", "Åland(les Îles)", "AX", "ALA", "248"},
|
||||
{"France", "France (la)", "FR", "FRA", "250"},
|
||||
{"French Guiana", "Guyane française (la )", "GF", "GUF", "254"},
|
||||
{"French Polynesia", "Polynésie française (la)", "PF", "PYF", "258"},
|
||||
{"French Southern Territories (the)", "Terres australes françaises (les)", "TF", "ATF", "260"},
|
||||
{"Djibouti", "Djibouti", "DJ", "DJI", "262"},
|
||||
{"Gabon", "Gabon (le)", "GA", "GAB", "266"},
|
||||
{"Georgia", "Géorgie (la)", "GE", "GEO", "268"},
|
||||
{"Gambia (the)", "Gambie (la)", "GM", "GMB", "270"},
|
||||
{"Palestine, State of", "Palestine, État de", "PS", "PSE", "275"},
|
||||
{"Germany", "Allemagne (l')", "DE", "DEU", "276"},
|
||||
{"Ghana", "Ghana (le)", "GH", "GHA", "288"},
|
||||
{"Gibraltar", "Gibraltar", "GI", "GIB", "292"},
|
||||
{"Kiribati", "Kiribati", "KI", "KIR", "296"},
|
||||
{"Greece", "Grèce (la)", "GR", "GRC", "300"},
|
||||
{"Greenland", "Groenland (le)", "GL", "GRL", "304"},
|
||||
{"Grenada", "Grenade (la)", "GD", "GRD", "308"},
|
||||
{"Guadeloupe", "Guadeloupe (la)", "GP", "GLP", "312"},
|
||||
{"Guam", "Guam", "GU", "GUM", "316"},
|
||||
{"Guatemala", "Guatemala (le)", "GT", "GTM", "320"},
|
||||
{"Guinea", "Guinée (la)", "GN", "GIN", "324"},
|
||||
{"Guyana", "Guyana (le)", "GY", "GUY", "328"},
|
||||
{"Haiti", "Haïti", "HT", "HTI", "332"},
|
||||
{"Heard Island and McDonald Islands", "Heard-et-Îles MacDonald (l'Île)", "HM", "HMD", "334"},
|
||||
{"Holy See (the)", "Saint-Siège (le)", "VA", "VAT", "336"},
|
||||
{"Honduras", "Honduras (le)", "HN", "HND", "340"},
|
||||
{"Hong Kong", "Hong Kong", "HK", "HKG", "344"},
|
||||
{"Hungary", "Hongrie (la)", "HU", "HUN", "348"},
|
||||
{"Iceland", "Islande (l')", "IS", "ISL", "352"},
|
||||
{"India", "Inde (l')", "IN", "IND", "356"},
|
||||
{"Indonesia", "Indonésie (l')", "ID", "IDN", "360"},
|
||||
{"Iran (Islamic Republic of)", "Iran (République Islamique d')", "IR", "IRN", "364"},
|
||||
{"Iraq", "Iraq (l')", "IQ", "IRQ", "368"},
|
||||
{"Ireland", "Irlande (l')", "IE", "IRL", "372"},
|
||||
{"Israel", "Israël", "IL", "ISR", "376"},
|
||||
{"Italy", "Italie (l')", "IT", "ITA", "380"},
|
||||
{"Côte d'Ivoire", "Côte d'Ivoire (la)", "CI", "CIV", "384"},
|
||||
{"Jamaica", "Jamaïque (la)", "JM", "JAM", "388"},
|
||||
{"Japan", "Japon (le)", "JP", "JPN", "392"},
|
||||
{"Kazakhstan", "Kazakhstan (le)", "KZ", "KAZ", "398"},
|
||||
{"Jordan", "Jordanie (la)", "JO", "JOR", "400"},
|
||||
{"Kenya", "Kenya (le)", "KE", "KEN", "404"},
|
||||
{"Korea (the Democratic People's Republic of)", "Corée (la République populaire démocratique de)", "KP", "PRK", "408"},
|
||||
{"Korea (the Republic of)", "Corée (la République de)", "KR", "KOR", "410"},
|
||||
{"Kuwait", "Koweït (le)", "KW", "KWT", "414"},
|
||||
{"Kyrgyzstan", "Kirghizistan (le)", "KG", "KGZ", "417"},
|
||||
{"Lao People's Democratic Republic (the)", "Lao, République démocratique populaire", "LA", "LAO", "418"},
|
||||
{"Lebanon", "Liban (le)", "LB", "LBN", "422"},
|
||||
{"Lesotho", "Lesotho (le)", "LS", "LSO", "426"},
|
||||
{"Latvia", "Lettonie (la)", "LV", "LVA", "428"},
|
||||
{"Liberia", "Libéria (le)", "LR", "LBR", "430"},
|
||||
{"Libya", "Libye (la)", "LY", "LBY", "434"},
|
||||
{"Liechtenstein", "Liechtenstein (le)", "LI", "LIE", "438"},
|
||||
{"Lithuania", "Lituanie (la)", "LT", "LTU", "440"},
|
||||
{"Luxembourg", "Luxembourg (le)", "LU", "LUX", "442"},
|
||||
{"Macao", "Macao", "MO", "MAC", "446"},
|
||||
{"Madagascar", "Madagascar", "MG", "MDG", "450"},
|
||||
{"Malawi", "Malawi (le)", "MW", "MWI", "454"},
|
||||
{"Malaysia", "Malaisie (la)", "MY", "MYS", "458"},
|
||||
{"Maldives", "Maldives (les)", "MV", "MDV", "462"},
|
||||
{"Mali", "Mali (le)", "ML", "MLI", "466"},
|
||||
{"Malta", "Malte", "MT", "MLT", "470"},
|
||||
{"Martinique", "Martinique (la)", "MQ", "MTQ", "474"},
|
||||
{"Mauritania", "Mauritanie (la)", "MR", "MRT", "478"},
|
||||
{"Mauritius", "Maurice", "MU", "MUS", "480"},
|
||||
{"Mexico", "Mexique (le)", "MX", "MEX", "484"},
|
||||
{"Monaco", "Monaco", "MC", "MCO", "492"},
|
||||
{"Mongolia", "Mongolie (la)", "MN", "MNG", "496"},
|
||||
{"Moldova (the Republic of)", "Moldova , République de", "MD", "MDA", "498"},
|
||||
{"Montenegro", "Monténégro (le)", "ME", "MNE", "499"},
|
||||
{"Montserrat", "Montserrat", "MS", "MSR", "500"},
|
||||
{"Morocco", "Maroc (le)", "MA", "MAR", "504"},
|
||||
{"Mozambique", "Mozambique (le)", "MZ", "MOZ", "508"},
|
||||
{"Oman", "Oman", "OM", "OMN", "512"},
|
||||
{"Namibia", "Namibie (la)", "NA", "NAM", "516"},
|
||||
{"Nauru", "Nauru", "NR", "NRU", "520"},
|
||||
{"Nepal", "Népal (le)", "NP", "NPL", "524"},
|
||||
{"Netherlands (the)", "Pays-Bas (les)", "NL", "NLD", "528"},
|
||||
{"Curaçao", "Curaçao", "CW", "CUW", "531"},
|
||||
{"Aruba", "Aruba", "AW", "ABW", "533"},
|
||||
{"Sint Maarten (Dutch part)", "Saint-Martin (partie néerlandaise)", "SX", "SXM", "534"},
|
||||
{"Bonaire, Sint Eustatius and Saba", "Bonaire, Saint-Eustache et Saba", "BQ", "BES", "535"},
|
||||
{"New Caledonia", "Nouvelle-Calédonie (la)", "NC", "NCL", "540"},
|
||||
{"Vanuatu", "Vanuatu (le)", "VU", "VUT", "548"},
|
||||
{"New Zealand", "Nouvelle-Zélande (la)", "NZ", "NZL", "554"},
|
||||
{"Nicaragua", "Nicaragua (le)", "NI", "NIC", "558"},
|
||||
{"Niger (the)", "Niger (le)", "NE", "NER", "562"},
|
||||
{"Nigeria", "Nigéria (le)", "NG", "NGA", "566"},
|
||||
{"Niue", "Niue", "NU", "NIU", "570"},
|
||||
{"Norfolk Island", "Norfolk (l'Île)", "NF", "NFK", "574"},
|
||||
{"Norway", "Norvège (la)", "NO", "NOR", "578"},
|
||||
{"Northern Mariana Islands (the)", "Mariannes du Nord (les Îles)", "MP", "MNP", "580"},
|
||||
{"United States Minor Outlying Islands (the)", "Îles mineures éloignées des États-Unis (les)", "UM", "UMI", "581"},
|
||||
{"Micronesia (Federated States of)", "Micronésie (États fédérés de)", "FM", "FSM", "583"},
|
||||
{"Marshall Islands (the)", "Marshall (Îles)", "MH", "MHL", "584"},
|
||||
{"Palau", "Palaos (les)", "PW", "PLW", "585"},
|
||||
{"Pakistan", "Pakistan (le)", "PK", "PAK", "586"},
|
||||
{"Panama", "Panama (le)", "PA", "PAN", "591"},
|
||||
{"Papua New Guinea", "Papouasie-Nouvelle-Guinée (la)", "PG", "PNG", "598"},
|
||||
{"Paraguay", "Paraguay (le)", "PY", "PRY", "600"},
|
||||
{"Peru", "Pérou (le)", "PE", "PER", "604"},
|
||||
{"Philippines (the)", "Philippines (les)", "PH", "PHL", "608"},
|
||||
{"Pitcairn", "Pitcairn", "PN", "PCN", "612"},
|
||||
{"Poland", "Pologne (la)", "PL", "POL", "616"},
|
||||
{"Portugal", "Portugal (le)", "PT", "PRT", "620"},
|
||||
{"Guinea-Bissau", "Guinée-Bissau (la)", "GW", "GNB", "624"},
|
||||
{"Timor-Leste", "Timor-Leste (le)", "TL", "TLS", "626"},
|
||||
{"Puerto Rico", "Porto Rico", "PR", "PRI", "630"},
|
||||
{"Qatar", "Qatar (le)", "QA", "QAT", "634"},
|
||||
{"Réunion", "Réunion (La)", "RE", "REU", "638"},
|
||||
{"Romania", "Roumanie (la)", "RO", "ROU", "642"},
|
||||
{"Russian Federation (the)", "Russie (la Fédération de)", "RU", "RUS", "643"},
|
||||
{"Rwanda", "Rwanda (le)", "RW", "RWA", "646"},
|
||||
{"Saint Barthélemy", "Saint-Barthélemy", "BL", "BLM", "652"},
|
||||
{"Saint Helena, Ascension and Tristan da Cunha", "Sainte-Hélène, Ascension et Tristan da Cunha", "SH", "SHN", "654"},
|
||||
{"Saint Kitts and Nevis", "Saint-Kitts-et-Nevis", "KN", "KNA", "659"},
|
||||
{"Anguilla", "Anguilla", "AI", "AIA", "660"},
|
||||
{"Saint Lucia", "Sainte-Lucie", "LC", "LCA", "662"},
|
||||
{"Saint Martin (French part)", "Saint-Martin (partie française)", "MF", "MAF", "663"},
|
||||
{"Saint Pierre and Miquelon", "Saint-Pierre-et-Miquelon", "PM", "SPM", "666"},
|
||||
{"Saint Vincent and the Grenadines", "Saint-Vincent-et-les Grenadines", "VC", "VCT", "670"},
|
||||
{"San Marino", "Saint-Marin", "SM", "SMR", "674"},
|
||||
{"Sao Tome and Principe", "Sao Tomé-et-Principe", "ST", "STP", "678"},
|
||||
{"Saudi Arabia", "Arabie saoudite (l')", "SA", "SAU", "682"},
|
||||
{"Senegal", "Sénégal (le)", "SN", "SEN", "686"},
|
||||
{"Serbia", "Serbie (la)", "RS", "SRB", "688"},
|
||||
{"Seychelles", "Seychelles (les)", "SC", "SYC", "690"},
|
||||
{"Sierra Leone", "Sierra Leone (la)", "SL", "SLE", "694"},
|
||||
{"Singapore", "Singapour", "SG", "SGP", "702"},
|
||||
{"Slovakia", "Slovaquie (la)", "SK", "SVK", "703"},
|
||||
{"Viet Nam", "Viet Nam (le)", "VN", "VNM", "704"},
|
||||
{"Slovenia", "Slovénie (la)", "SI", "SVN", "705"},
|
||||
{"Somalia", "Somalie (la)", "SO", "SOM", "706"},
|
||||
{"South Africa", "Afrique du Sud (l')", "ZA", "ZAF", "710"},
|
||||
{"Zimbabwe", "Zimbabwe (le)", "ZW", "ZWE", "716"},
|
||||
{"Spain", "Espagne (l')", "ES", "ESP", "724"},
|
||||
{"South Sudan", "Soudan du Sud (le)", "SS", "SSD", "728"},
|
||||
{"Sudan (the)", "Soudan (le)", "SD", "SDN", "729"},
|
||||
{"Western Sahara*", "Sahara occidental (le)*", "EH", "ESH", "732"},
|
||||
{"Suriname", "Suriname (le)", "SR", "SUR", "740"},
|
||||
{"Svalbard and Jan Mayen", "Svalbard et l'Île Jan Mayen (le)", "SJ", "SJM", "744"},
|
||||
{"Swaziland", "Swaziland (le)", "SZ", "SWZ", "748"},
|
||||
{"Sweden", "Suède (la)", "SE", "SWE", "752"},
|
||||
{"Switzerland", "Suisse (la)", "CH", "CHE", "756"},
|
||||
{"Syrian Arab Republic", "République arabe syrienne (la)", "SY", "SYR", "760"},
|
||||
{"Tajikistan", "Tadjikistan (le)", "TJ", "TJK", "762"},
|
||||
{"Thailand", "Thaïlande (la)", "TH", "THA", "764"},
|
||||
{"Togo", "Togo (le)", "TG", "TGO", "768"},
|
||||
{"Tokelau", "Tokelau (les)", "TK", "TKL", "772"},
|
||||
{"Tonga", "Tonga (les)", "TO", "TON", "776"},
|
||||
{"Trinidad and Tobago", "Trinité-et-Tobago (la)", "TT", "TTO", "780"},
|
||||
{"United Arab Emirates (the)", "Émirats arabes unis (les)", "AE", "ARE", "784"},
|
||||
{"Tunisia", "Tunisie (la)", "TN", "TUN", "788"},
|
||||
{"Turkey", "Turquie (la)", "TR", "TUR", "792"},
|
||||
{"Turkmenistan", "Turkménistan (le)", "TM", "TKM", "795"},
|
||||
{"Turks and Caicos Islands (the)", "Turks-et-Caïcos (les Îles)", "TC", "TCA", "796"},
|
||||
{"Tuvalu", "Tuvalu (les)", "TV", "TUV", "798"},
|
||||
{"Uganda", "Ouganda (l')", "UG", "UGA", "800"},
|
||||
{"Ukraine", "Ukraine (l')", "UA", "UKR", "804"},
|
||||
{"Macedonia (the former Yugoslav Republic of)", "Macédoine (l'ex‑République yougoslave de)", "MK", "MKD", "807"},
|
||||
{"Egypt", "Égypte (l')", "EG", "EGY", "818"},
|
||||
{"United Kingdom of Great Britain and Northern Ireland (the)", "Royaume-Uni de Grande-Bretagne et d'Irlande du Nord (le)", "GB", "GBR", "826"},
|
||||
{"Guernsey", "Guernesey", "GG", "GGY", "831"},
|
||||
{"Jersey", "Jersey", "JE", "JEY", "832"},
|
||||
{"Isle of Man", "Île de Man", "IM", "IMN", "833"},
|
||||
{"Tanzania, United Republic of", "Tanzanie, République-Unie de", "TZ", "TZA", "834"},
|
||||
{"United States of America (the)", "États-Unis d'Amérique (les)", "US", "USA", "840"},
|
||||
{"Virgin Islands (U.S.)", "Vierges des États-Unis (les Îles)", "VI", "VIR", "850"},
|
||||
{"Burkina Faso", "Burkina Faso (le)", "BF", "BFA", "854"},
|
||||
{"Uruguay", "Uruguay (l')", "UY", "URY", "858"},
|
||||
{"Uzbekistan", "Ouzbékistan (l')", "UZ", "UZB", "860"},
|
||||
{"Venezuela (Bolivarian Republic of)", "Venezuela (République bolivarienne du)", "VE", "VEN", "862"},
|
||||
{"Wallis and Futuna", "Wallis-et-Futuna", "WF", "WLF", "876"},
|
||||
{"Samoa", "Samoa (le)", "WS", "WSM", "882"},
|
||||
{"Yemen", "Yémen (le)", "YE", "YEM", "887"},
|
||||
{"Zambia", "Zambie (la)", "ZM", "ZMB", "894"},
|
||||
}
|
||||
|
||||
// ISO4217List is the list of ISO currency codes
|
||||
var ISO4217List = []string{
|
||||
"AED", "AFN", "ALL", "AMD", "ANG", "AOA", "ARS", "AUD", "AWG", "AZN",
|
||||
"BAM", "BBD", "BDT", "BGN", "BHD", "BIF", "BMD", "BND", "BOB", "BOV", "BRL", "BSD", "BTN", "BWP", "BYN", "BZD",
|
||||
"CAD", "CDF", "CHE", "CHF", "CHW", "CLF", "CLP", "CNY", "COP", "COU", "CRC", "CUC", "CUP", "CVE", "CZK",
|
||||
"DJF", "DKK", "DOP", "DZD",
|
||||
"EGP", "ERN", "ETB", "EUR",
|
||||
"FJD", "FKP",
|
||||
"GBP", "GEL", "GHS", "GIP", "GMD", "GNF", "GTQ", "GYD",
|
||||
"HKD", "HNL", "HRK", "HTG", "HUF",
|
||||
"IDR", "ILS", "INR", "IQD", "IRR", "ISK",
|
||||
"JMD", "JOD", "JPY",
|
||||
"KES", "KGS", "KHR", "KMF", "KPW", "KRW", "KWD", "KYD", "KZT",
|
||||
"LAK", "LBP", "LKR", "LRD", "LSL", "LYD",
|
||||
"MAD", "MDL", "MGA", "MKD", "MMK", "MNT", "MOP", "MRO", "MUR", "MVR", "MWK", "MXN", "MXV", "MYR", "MZN",
|
||||
"NAD", "NGN", "NIO", "NOK", "NPR", "NZD",
|
||||
"OMR",
|
||||
"PAB", "PEN", "PGK", "PHP", "PKR", "PLN", "PYG",
|
||||
"QAR",
|
||||
"RON", "RSD", "RUB", "RWF",
|
||||
"SAR", "SBD", "SCR", "SDG", "SEK", "SGD", "SHP", "SLL", "SOS", "SRD", "SSP", "STD", "STN", "SVC", "SYP", "SZL",
|
||||
"THB", "TJS", "TMT", "TND", "TOP", "TRY", "TTD", "TWD", "TZS",
|
||||
"UAH", "UGX", "USD", "USN", "UYI", "UYU", "UYW", "UZS",
|
||||
"VEF", "VES", "VND", "VUV",
|
||||
"WST",
|
||||
"XAF", "XAG", "XAU", "XBA", "XBB", "XBC", "XBD", "XCD", "XDR", "XOF", "XPD", "XPF", "XPT", "XSU", "XTS", "XUA", "XXX",
|
||||
"YER",
|
||||
"ZAR", "ZMW", "ZWL",
|
||||
}
|
||||
|
||||
// ISO693Entry stores ISO language codes
|
||||
type ISO693Entry struct {
|
||||
Alpha3bCode string
|
||||
Alpha2Code string
|
||||
English string
|
||||
}
|
||||
|
||||
//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
|
||||
var ISO693List = []ISO693Entry{
|
||||
{Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
|
||||
{Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
|
||||
{Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
|
||||
{Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
|
||||
{Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
|
||||
{Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
|
||||
{Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
|
||||
{Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
|
||||
{Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
|
||||
{Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
|
||||
{Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
|
||||
{Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
|
||||
{Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
|
||||
{Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
|
||||
{Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
|
||||
{Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
|
||||
{Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
|
||||
{Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
|
||||
{Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
|
||||
{Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
|
||||
{Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
|
||||
{Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
|
||||
{Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
|
||||
{Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
|
||||
{Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
|
||||
{Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
|
||||
{Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
|
||||
{Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
|
||||
{Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
|
||||
{Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
|
||||
{Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
|
||||
{Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
|
||||
{Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
|
||||
{Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
|
||||
{Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
|
||||
{Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
|
||||
{Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
|
||||
{Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
|
||||
{Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
|
||||
{Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
|
||||
{Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
|
||||
{Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
|
||||
{Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
|
||||
{Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
|
||||
{Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
|
||||
{Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
|
||||
{Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
|
||||
{Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
|
||||
{Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
|
||||
{Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
|
||||
{Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
|
||||
{Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
|
||||
{Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
|
||||
{Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
|
||||
{Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
|
||||
{Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
|
||||
{Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
|
||||
{Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
|
||||
{Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
|
||||
{Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
|
||||
{Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
|
||||
{Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
|
||||
{Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
|
||||
{Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
|
||||
{Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
|
||||
{Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
|
||||
{Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
|
||||
{Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
|
||||
{Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
|
||||
{Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
|
||||
{Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
|
||||
{Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
|
||||
{Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
|
||||
{Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
|
||||
{Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
|
||||
{Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
|
||||
{Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
|
||||
{Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
|
||||
{Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
|
||||
{Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
|
||||
{Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
|
||||
{Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
|
||||
{Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
|
||||
{Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
|
||||
{Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
|
||||
{Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
|
||||
{Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
|
||||
{Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
|
||||
{Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
|
||||
{Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
|
||||
{Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
|
||||
{Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
|
||||
{Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
|
||||
{Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
|
||||
{Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
|
||||
{Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
|
||||
{Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
|
||||
{Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
|
||||
{Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
|
||||
{Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
|
||||
{Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
|
||||
{Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
|
||||
{Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
|
||||
{Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
|
||||
{Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
|
||||
{Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
|
||||
{Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
|
||||
{Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
|
||||
{Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
|
||||
{Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
|
||||
{Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
|
||||
{Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
|
||||
{Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
|
||||
{Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
|
||||
{Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
|
||||
{Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
|
||||
{Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
|
||||
{Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
|
||||
{Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
|
||||
{Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
|
||||
{Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
|
||||
{Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
|
||||
{Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
|
||||
{Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
|
||||
{Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
|
||||
{Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
|
||||
{Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
|
||||
{Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
|
||||
{Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
|
||||
{Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
|
||||
{Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
|
||||
{Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
|
||||
{Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
|
||||
{Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
|
||||
{Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
|
||||
{Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
|
||||
{Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
|
||||
{Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
|
||||
{Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
|
||||
{Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
|
||||
{Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
|
||||
{Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
|
||||
{Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
|
||||
{Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
|
||||
{Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
|
||||
{Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
|
||||
{Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
|
||||
{Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
|
||||
{Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
|
||||
{Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
|
||||
{Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
|
||||
{Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
|
||||
{Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
|
||||
{Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
|
||||
{Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
|
||||
{Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
|
||||
{Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
|
||||
{Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
|
||||
{Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
|
||||
{Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
|
||||
{Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
|
||||
{Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
|
||||
{Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
|
||||
{Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
|
||||
{Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
|
||||
{Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
|
||||
{Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
|
||||
{Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
|
||||
{Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
|
||||
{Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
|
||||
{Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
|
||||
{Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
|
||||
{Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
|
||||
{Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
|
||||
{Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
|
||||
{Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
|
||||
{Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
|
||||
{Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
|
||||
{Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
|
||||
{Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
|
||||
{Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
|
||||
{Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
|
||||
{Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
|
||||
{Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
|
||||
}
|
270
vendor/github.com/asaskevich/govalidator/utils.go
generated
vendored
270
vendor/github.com/asaskevich/govalidator/utils.go
generated
vendored
@@ -1,270 +0,0 @@
|
||||
package govalidator
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"html"
|
||||
"math"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Contains checks if the string contains the substring.
|
||||
func Contains(str, substring string) bool {
|
||||
return strings.Contains(str, substring)
|
||||
}
|
||||
|
||||
// Matches checks if string matches the pattern (pattern is regular expression)
|
||||
// In case of error return false
|
||||
func Matches(str, pattern string) bool {
|
||||
match, _ := regexp.MatchString(pattern, str)
|
||||
return match
|
||||
}
|
||||
|
||||
// LeftTrim trims characters from the left side of the input.
|
||||
// If second argument is empty, it will remove leading spaces.
|
||||
func LeftTrim(str, chars string) string {
|
||||
if chars == "" {
|
||||
return strings.TrimLeftFunc(str, unicode.IsSpace)
|
||||
}
|
||||
r, _ := regexp.Compile("^[" + chars + "]+")
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// RightTrim trims characters from the right side of the input.
|
||||
// If second argument is empty, it will remove trailing spaces.
|
||||
func RightTrim(str, chars string) string {
|
||||
if chars == "" {
|
||||
return strings.TrimRightFunc(str, unicode.IsSpace)
|
||||
}
|
||||
r, _ := regexp.Compile("[" + chars + "]+$")
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// Trim trims characters from both sides of the input.
|
||||
// If second argument is empty, it will remove spaces.
|
||||
func Trim(str, chars string) string {
|
||||
return LeftTrim(RightTrim(str, chars), chars)
|
||||
}
|
||||
|
||||
// WhiteList removes characters that do not appear in the whitelist.
|
||||
func WhiteList(str, chars string) string {
|
||||
pattern := "[^" + chars + "]+"
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// BlackList removes characters that appear in the blacklist.
|
||||
func BlackList(str, chars string) string {
|
||||
pattern := "[" + chars + "]+"
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, "")
|
||||
}
|
||||
|
||||
// StripLow removes characters with a numerical value < 32 and 127, mostly control characters.
|
||||
// If keep_new_lines is true, newline characters are preserved (\n and \r, hex 0xA and 0xD).
|
||||
func StripLow(str string, keepNewLines bool) string {
|
||||
chars := ""
|
||||
if keepNewLines {
|
||||
chars = "\x00-\x09\x0B\x0C\x0E-\x1F\x7F"
|
||||
} else {
|
||||
chars = "\x00-\x1F\x7F"
|
||||
}
|
||||
return BlackList(str, chars)
|
||||
}
|
||||
|
||||
// ReplacePattern replaces regular expression pattern in string
|
||||
func ReplacePattern(str, pattern, replace string) string {
|
||||
r, _ := regexp.Compile(pattern)
|
||||
return r.ReplaceAllString(str, replace)
|
||||
}
|
||||
|
||||
// Escape replaces <, >, & and " with HTML entities.
|
||||
var Escape = html.EscapeString
|
||||
|
||||
func addSegment(inrune, segment []rune) []rune {
|
||||
if len(segment) == 0 {
|
||||
return inrune
|
||||
}
|
||||
if len(inrune) != 0 {
|
||||
inrune = append(inrune, '_')
|
||||
}
|
||||
inrune = append(inrune, segment...)
|
||||
return inrune
|
||||
}
|
||||
|
||||
// UnderscoreToCamelCase converts from underscore separated form to camel case form.
|
||||
// Ex.: my_func => MyFunc
|
||||
func UnderscoreToCamelCase(s string) string {
|
||||
return strings.Replace(strings.Title(strings.Replace(strings.ToLower(s), "_", " ", -1)), " ", "", -1)
|
||||
}
|
||||
|
||||
// CamelCaseToUnderscore converts from camel case form to underscore separated form.
|
||||
// Ex.: MyFunc => my_func
|
||||
func CamelCaseToUnderscore(str string) string {
|
||||
var output []rune
|
||||
var segment []rune
|
||||
for _, r := range str {
|
||||
|
||||
// not treat number as separate segment
|
||||
if !unicode.IsLower(r) && string(r) != "_" && !unicode.IsNumber(r) {
|
||||
output = addSegment(output, segment)
|
||||
segment = nil
|
||||
}
|
||||
segment = append(segment, unicode.ToLower(r))
|
||||
}
|
||||
output = addSegment(output, segment)
|
||||
return string(output)
|
||||
}
|
||||
|
||||
// Reverse returns reversed string
|
||||
func Reverse(s string) string {
|
||||
r := []rune(s)
|
||||
for i, j := 0, len(r)-1; i < j; i, j = i+1, j-1 {
|
||||
r[i], r[j] = r[j], r[i]
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
|
||||
// GetLines splits string by "\n" and return array of lines
|
||||
func GetLines(s string) []string {
|
||||
return strings.Split(s, "\n")
|
||||
}
|
||||
|
||||
// GetLine returns specified line of multiline string
|
||||
func GetLine(s string, index int) (string, error) {
|
||||
lines := GetLines(s)
|
||||
if index < 0 || index >= len(lines) {
|
||||
return "", errors.New("line index out of bounds")
|
||||
}
|
||||
return lines[index], nil
|
||||
}
|
||||
|
||||
// RemoveTags removes all tags from HTML string
|
||||
func RemoveTags(s string) string {
|
||||
return ReplacePattern(s, "<[^>]*>", "")
|
||||
}
|
||||
|
||||
// SafeFileName returns safe string that can be used in file names
|
||||
func SafeFileName(str string) string {
|
||||
name := strings.ToLower(str)
|
||||
name = path.Clean(path.Base(name))
|
||||
name = strings.Trim(name, " ")
|
||||
separators, err := regexp.Compile(`[ &_=+:]`)
|
||||
if err == nil {
|
||||
name = separators.ReplaceAllString(name, "-")
|
||||
}
|
||||
legal, err := regexp.Compile(`[^[:alnum:]-.]`)
|
||||
if err == nil {
|
||||
name = legal.ReplaceAllString(name, "")
|
||||
}
|
||||
for strings.Contains(name, "--") {
|
||||
name = strings.Replace(name, "--", "-", -1)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// NormalizeEmail canonicalize an email address.
|
||||
// The local part of the email address is lowercased for all domains; the hostname is always lowercased and
|
||||
// the local part of the email address is always lowercased for hosts that are known to be case-insensitive (currently only GMail).
|
||||
// Normalization follows special rules for known providers: currently, GMail addresses have dots removed in the local part and
|
||||
// are stripped of tags (e.g. some.one+tag@gmail.com becomes someone@gmail.com) and all @googlemail.com addresses are
|
||||
// normalized to @gmail.com.
|
||||
func NormalizeEmail(str string) (string, error) {
|
||||
if !IsEmail(str) {
|
||||
return "", fmt.Errorf("%s is not an email", str)
|
||||
}
|
||||
parts := strings.Split(str, "@")
|
||||
parts[0] = strings.ToLower(parts[0])
|
||||
parts[1] = strings.ToLower(parts[1])
|
||||
if parts[1] == "gmail.com" || parts[1] == "googlemail.com" {
|
||||
parts[1] = "gmail.com"
|
||||
parts[0] = strings.Split(ReplacePattern(parts[0], `\.`, ""), "+")[0]
|
||||
}
|
||||
return strings.Join(parts, "@"), nil
|
||||
}
|
||||
|
||||
// Truncate a string to the closest length without breaking words.
|
||||
func Truncate(str string, length int, ending string) string {
|
||||
var aftstr, befstr string
|
||||
if len(str) > length {
|
||||
words := strings.Fields(str)
|
||||
before, present := 0, 0
|
||||
for i := range words {
|
||||
befstr = aftstr
|
||||
before = present
|
||||
aftstr = aftstr + words[i] + " "
|
||||
present = len(aftstr)
|
||||
if present > length && i != 0 {
|
||||
if (length - before) < (present - length) {
|
||||
return Trim(befstr, " /\\.,\"'#!?&@+-") + ending
|
||||
}
|
||||
return Trim(aftstr, " /\\.,\"'#!?&@+-") + ending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return str
|
||||
}
|
||||
|
||||
// PadLeft pads left side of a string if size of string is less then indicated pad length
|
||||
func PadLeft(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, true, false)
|
||||
}
|
||||
|
||||
// PadRight pads right side of a string if size of string is less then indicated pad length
|
||||
func PadRight(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, false, true)
|
||||
}
|
||||
|
||||
// PadBoth pads both sides of a string if size of string is less then indicated pad length
|
||||
func PadBoth(str string, padStr string, padLen int) string {
|
||||
return buildPadStr(str, padStr, padLen, true, true)
|
||||
}
|
||||
|
||||
// PadString either left, right or both sides.
|
||||
// Note that padding string can be unicode and more then one character
|
||||
func buildPadStr(str string, padStr string, padLen int, padLeft bool, padRight bool) string {
|
||||
|
||||
// When padded length is less then the current string size
|
||||
if padLen < utf8.RuneCountInString(str) {
|
||||
return str
|
||||
}
|
||||
|
||||
padLen -= utf8.RuneCountInString(str)
|
||||
|
||||
targetLen := padLen
|
||||
|
||||
targetLenLeft := targetLen
|
||||
targetLenRight := targetLen
|
||||
if padLeft && padRight {
|
||||
targetLenLeft = padLen / 2
|
||||
targetLenRight = padLen - targetLenLeft
|
||||
}
|
||||
|
||||
strToRepeatLen := utf8.RuneCountInString(padStr)
|
||||
|
||||
repeatTimes := int(math.Ceil(float64(targetLen) / float64(strToRepeatLen)))
|
||||
repeatedString := strings.Repeat(padStr, repeatTimes)
|
||||
|
||||
leftSide := ""
|
||||
if padLeft {
|
||||
leftSide = repeatedString[0:targetLenLeft]
|
||||
}
|
||||
|
||||
rightSide := ""
|
||||
if padRight {
|
||||
rightSide = repeatedString[0:targetLenRight]
|
||||
}
|
||||
|
||||
return leftSide + str + rightSide
|
||||
}
|
||||
|
||||
// TruncatingErrorf removes extra args from fmt.Errorf if not formatted in the str object
|
||||
func TruncatingErrorf(str string, args ...interface{}) error {
|
||||
n := strings.Count(str, "%s")
|
||||
return fmt.Errorf(str, args[:n]...)
|
||||
}
|
1768
vendor/github.com/asaskevich/govalidator/validator.go
generated
vendored
1768
vendor/github.com/asaskevich/govalidator/validator.go
generated
vendored
File diff suppressed because it is too large
Load Diff
15
vendor/github.com/asaskevich/govalidator/wercker.yml
generated
vendored
15
vendor/github.com/asaskevich/govalidator/wercker.yml
generated
vendored
@@ -1,15 +0,0 @@
|
||||
box: golang
|
||||
build:
|
||||
steps:
|
||||
- setup-go-workspace
|
||||
|
||||
- script:
|
||||
name: go get
|
||||
code: |
|
||||
go version
|
||||
go get -t ./...
|
||||
|
||||
- script:
|
||||
name: go test
|
||||
code: |
|
||||
go test -race -v ./...
|
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/LICENSE.txt
generated
vendored
@@ -1,22 +0,0 @@
|
||||
Copyright (c) 2016 Caleb Spare
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
74
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
74
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,74 +0,0 @@
|
||||
# xxhash
|
||||
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||
high-quality hashing algorithm that is much faster than anything in the Go
|
||||
standard library.
|
||||
|
||||
This package provides a straightforward API:
|
||||
|
||||
```
|
||||
func Sum64(b []byte) uint64
|
||||
func Sum64String(s string) uint64
|
||||
type Digest struct{ ... }
|
||||
func New() *Digest
|
||||
```
|
||||
|
||||
The `Digest` type implements hash.Hash64. Its key methods are:
|
||||
|
||||
```
|
||||
func (*Digest) Write([]byte) (int, error)
|
||||
func (*Digest) WriteString(string) (int, error)
|
||||
func (*Digest) Sum64() uint64
|
||||
```
|
||||
|
||||
The package is written with optimized pure Go and also contains even faster
|
||||
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||
opts into using the Go code even on those architectures.
|
||||
|
||||
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||
|
||||
## Compatibility
|
||||
|
||||
This package is in a module and the latest code is in version 2 of the module.
|
||||
You need a version of Go with at least "minimal module compatibility" to use
|
||||
github.com/cespare/xxhash/v2:
|
||||
|
||||
* 1.9.7+ for Go 1.9
|
||||
* 1.10.3+ for Go 1.10
|
||||
* Go 1.11 or later
|
||||
|
||||
I recommend using the latest release of Go.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||
implementations of Sum64.
|
||||
|
||||
| input size | purego | asm |
|
||||
| ---------- | --------- | --------- |
|
||||
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||
|
||||
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||
CPU using the following commands under Go 1.19.2:
|
||||
|
||||
```
|
||||
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||
```
|
||||
|
||||
## Projects using this package
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
- [Ristretto](https://github.com/dgraph-io/ristretto)
|
||||
- [Badger](https://github.com/dgraph-io/badger)
|
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
@@ -1,10 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -eu -o pipefail
|
||||
|
||||
# Small convenience script for running the tests with various combinations of
|
||||
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||
|
||||
go test ./...
|
||||
go test -tags purego ./...
|
||||
GOARCH=arm64 go test
|
||||
GOARCH=arm64 go test -tags purego
|
243
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
243
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -1,243 +0,0 @@
|
||||
// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
|
||||
// at http://cyan4973.github.io/xxHash/.
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"math/bits"
|
||||
)
|
||||
|
||||
const (
|
||||
prime1 uint64 = 11400714785074694791
|
||||
prime2 uint64 = 14029467366897019727
|
||||
prime3 uint64 = 1609587929392839161
|
||||
prime4 uint64 = 9650029242287828579
|
||||
prime5 uint64 = 2870177450012600261
|
||||
)
|
||||
|
||||
// Store the primes in an array as well.
|
||||
//
|
||||
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||
// contiguous array for the assembly code.
|
||||
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||
|
||||
// Digest implements hash.Hash64.
|
||||
//
|
||||
// Note that a zero-valued Digest is not ready to receive writes.
|
||||
// Call Reset or create a Digest using New before calling other methods.
|
||||
type Digest struct {
|
||||
v1 uint64
|
||||
v2 uint64
|
||||
v3 uint64
|
||||
v4 uint64
|
||||
total uint64
|
||||
mem [32]byte
|
||||
n int // how much of mem is used
|
||||
}
|
||||
|
||||
// New creates a new Digest with a zero seed.
|
||||
func New() *Digest {
|
||||
return NewWithSeed(0)
|
||||
}
|
||||
|
||||
// NewWithSeed creates a new Digest with the given seed.
|
||||
func NewWithSeed(seed uint64) *Digest {
|
||||
var d Digest
|
||||
d.ResetWithSeed(seed)
|
||||
return &d
|
||||
}
|
||||
|
||||
// Reset clears the Digest's state so that it can be reused.
|
||||
// It uses a seed value of zero.
|
||||
func (d *Digest) Reset() {
|
||||
d.ResetWithSeed(0)
|
||||
}
|
||||
|
||||
// ResetWithSeed clears the Digest's state so that it can be reused.
|
||||
// It uses the given seed to initialize the state.
|
||||
func (d *Digest) ResetWithSeed(seed uint64) {
|
||||
d.v1 = seed + prime1 + prime2
|
||||
d.v2 = seed + prime2
|
||||
d.v3 = seed
|
||||
d.v4 = seed - prime1
|
||||
d.total = 0
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
// Size always returns 8 bytes.
|
||||
func (d *Digest) Size() int { return 8 }
|
||||
|
||||
// BlockSize always returns 32 bytes.
|
||||
func (d *Digest) BlockSize() int { return 32 }
|
||||
|
||||
// Write adds more data to d. It always returns len(b), nil.
|
||||
func (d *Digest) Write(b []byte) (n int, err error) {
|
||||
n = len(b)
|
||||
d.total += uint64(n)
|
||||
|
||||
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||
|
||||
if d.n+n < 32 {
|
||||
// This new data doesn't even fill the current block.
|
||||
copy(memleft, b)
|
||||
d.n += n
|
||||
return
|
||||
}
|
||||
|
||||
if d.n > 0 {
|
||||
// Finish off the partial block.
|
||||
c := copy(memleft, b)
|
||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||
b = b[c:]
|
||||
d.n = 0
|
||||
}
|
||||
|
||||
if len(b) >= 32 {
|
||||
// One or more full blocks left.
|
||||
nw := writeBlocks(d, b)
|
||||
b = b[nw:]
|
||||
}
|
||||
|
||||
// Store any remaining partial block.
|
||||
copy(d.mem[:], b)
|
||||
d.n = len(b)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Sum appends the current hash to b and returns the resulting slice.
|
||||
func (d *Digest) Sum(b []byte) []byte {
|
||||
s := d.Sum64()
|
||||
return append(
|
||||
b,
|
||||
byte(s>>56),
|
||||
byte(s>>48),
|
||||
byte(s>>40),
|
||||
byte(s>>32),
|
||||
byte(s>>24),
|
||||
byte(s>>16),
|
||||
byte(s>>8),
|
||||
byte(s),
|
||||
)
|
||||
}
|
||||
|
||||
// Sum64 returns the current hash.
|
||||
func (d *Digest) Sum64() uint64 {
|
||||
var h uint64
|
||||
|
||||
if d.total >= 32 {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = d.v3 + prime5
|
||||
}
|
||||
|
||||
h += d.total
|
||||
|
||||
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
const (
|
||||
magic = "xxh\x06"
|
||||
marshaledSize = len(magic) + 8*5 + 32
|
||||
)
|
||||
|
||||
// MarshalBinary implements the encoding.BinaryMarshaler interface.
|
||||
func (d *Digest) MarshalBinary() ([]byte, error) {
|
||||
b := make([]byte, 0, marshaledSize)
|
||||
b = append(b, magic...)
|
||||
b = appendUint64(b, d.v1)
|
||||
b = appendUint64(b, d.v2)
|
||||
b = appendUint64(b, d.v3)
|
||||
b = appendUint64(b, d.v4)
|
||||
b = appendUint64(b, d.total)
|
||||
b = append(b, d.mem[:d.n]...)
|
||||
b = b[:len(b)+len(d.mem)-d.n]
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
|
||||
func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
if len(b) < len(magic) || string(b[:len(magic)]) != magic {
|
||||
return errors.New("xxhash: invalid hash state identifier")
|
||||
}
|
||||
if len(b) != marshaledSize {
|
||||
return errors.New("xxhash: invalid hash state size")
|
||||
}
|
||||
b = b[len(magic):]
|
||||
b, d.v1 = consumeUint64(b)
|
||||
b, d.v2 = consumeUint64(b)
|
||||
b, d.v3 = consumeUint64(b)
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
||||
func appendUint64(b []byte, x uint64) []byte {
|
||||
var a [8]byte
|
||||
binary.LittleEndian.PutUint64(a[:], x)
|
||||
return append(b, a[:]...)
|
||||
}
|
||||
|
||||
func consumeUint64(b []byte) ([]byte, uint64) {
|
||||
x := u64(b)
|
||||
return b[8:], x
|
||||
}
|
||||
|
||||
func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
|
||||
func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
|
||||
|
||||
func round(acc, input uint64) uint64 {
|
||||
acc += input * prime2
|
||||
acc = rol31(acc)
|
||||
acc *= prime1
|
||||
return acc
|
||||
}
|
||||
|
||||
func mergeRound(acc, val uint64) uint64 {
|
||||
val = round(0, val)
|
||||
acc ^= val
|
||||
acc = acc*prime1 + prime4
|
||||
return acc
|
||||
}
|
||||
|
||||
func rol1(x uint64) uint64 { return bits.RotateLeft64(x, 1) }
|
||||
func rol7(x uint64) uint64 { return bits.RotateLeft64(x, 7) }
|
||||
func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
|
||||
func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
|
||||
func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
|
||||
func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
|
||||
func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
|
||||
func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
|
209
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
209
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -1,209 +0,0 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define h AX
|
||||
#define d AX
|
||||
#define p SI // pointer to advance through b
|
||||
#define n DX
|
||||
#define end BX // loop end
|
||||
#define v1 R8
|
||||
#define v2 R9
|
||||
#define v3 R10
|
||||
#define v4 R11
|
||||
#define x R12
|
||||
#define prime1 R13
|
||||
#define prime2 R14
|
||||
#define prime4 DI
|
||||
|
||||
#define round(acc, x) \
|
||||
IMULQ prime2, x \
|
||||
ADDQ x, acc \
|
||||
ROLQ $31, acc \
|
||||
IMULQ prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
IMULQ prime2, x \
|
||||
ROLQ $31, x \
|
||||
IMULQ prime1, x
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and x.
|
||||
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
XORQ x, acc \
|
||||
IMULQ prime1, acc \
|
||||
ADDQ prime4, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||
// to process.
|
||||
#define blockLoop() \
|
||||
loop: \
|
||||
MOVQ +0(p), x \
|
||||
round(v1, x) \
|
||||
MOVQ +8(p), x \
|
||||
round(v2, x) \
|
||||
MOVQ +16(p), x \
|
||||
round(v3, x) \
|
||||
MOVQ +24(p), x \
|
||||
round(v4, x) \
|
||||
ADDQ $32, p \
|
||||
CMPQ p, end \
|
||||
JLE loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
MOVQ ·primes+24(SB), prime4
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), p
|
||||
MOVQ b_len+8(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, end
|
||||
|
||||
// Check whether we have at least one block.
|
||||
CMPQ n, $32
|
||||
JLT noBlocks
|
||||
|
||||
// Set up initial state (v1, v2, v3, v4).
|
||||
MOVQ prime1, v1
|
||||
ADDQ prime2, v1
|
||||
MOVQ prime2, v2
|
||||
XORQ v3, v3
|
||||
XORQ v4, v4
|
||||
SUBQ prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
MOVQ v1, h
|
||||
ROLQ $1, h
|
||||
MOVQ v2, x
|
||||
ROLQ $7, x
|
||||
ADDQ x, h
|
||||
MOVQ v3, x
|
||||
ROLQ $12, x
|
||||
ADDQ x, h
|
||||
MOVQ v4, x
|
||||
ROLQ $18, x
|
||||
ADDQ x, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
JMP afterBlocks
|
||||
|
||||
noBlocks:
|
||||
MOVQ ·primes+32(SB), h
|
||||
|
||||
afterBlocks:
|
||||
ADDQ n, h
|
||||
|
||||
ADDQ $24, end
|
||||
CMPQ p, end
|
||||
JG try4
|
||||
|
||||
loop8:
|
||||
MOVQ (p), x
|
||||
ADDQ $8, p
|
||||
round0(x)
|
||||
XORQ x, h
|
||||
ROLQ $27, h
|
||||
IMULQ prime1, h
|
||||
ADDQ prime4, h
|
||||
|
||||
CMPQ p, end
|
||||
JLE loop8
|
||||
|
||||
try4:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JG try1
|
||||
|
||||
MOVL (p), x
|
||||
ADDQ $4, p
|
||||
IMULQ prime1, x
|
||||
XORQ x, h
|
||||
|
||||
ROLQ $23, h
|
||||
IMULQ prime2, h
|
||||
ADDQ ·primes+16(SB), h
|
||||
|
||||
try1:
|
||||
ADDQ $4, end
|
||||
CMPQ p, end
|
||||
JGE finalize
|
||||
|
||||
loop1:
|
||||
MOVBQZX (p), x
|
||||
ADDQ $1, p
|
||||
IMULQ ·primes+32(SB), x
|
||||
XORQ x, h
|
||||
ROLQ $11, h
|
||||
IMULQ prime1, h
|
||||
|
||||
CMPQ p, end
|
||||
JL loop1
|
||||
|
||||
finalize:
|
||||
MOVQ h, x
|
||||
SHRQ $33, x
|
||||
XORQ x, h
|
||||
IMULQ prime2, h
|
||||
MOVQ h, x
|
||||
SHRQ $29, x
|
||||
XORQ x, h
|
||||
IMULQ ·primes+16(SB), h
|
||||
MOVQ h, x
|
||||
SHRQ $32, x
|
||||
XORQ x, h
|
||||
|
||||
MOVQ h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
// Load fixed primes needed for round.
|
||||
MOVQ ·primes+0(SB), prime1
|
||||
MOVQ ·primes+8(SB), prime2
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), p
|
||||
MOVQ b_len+16(FP), n
|
||||
LEAQ (p)(n*1), end
|
||||
SUBQ $32, end
|
||||
|
||||
// Load vN from d.
|
||||
MOVQ s+0(FP), d
|
||||
MOVQ 0(d), v1
|
||||
MOVQ 8(d), v2
|
||||
MOVQ 16(d), v3
|
||||
MOVQ 24(d), v4
|
||||
|
||||
// We don't need to check the loop condition here; this function is
|
||||
// always called with at least one block of data to process.
|
||||
blockLoop()
|
||||
|
||||
// Copy vN back to d.
|
||||
MOVQ v1, 0(d)
|
||||
MOVQ v2, 8(d)
|
||||
MOVQ v3, 16(d)
|
||||
MOVQ v4, 24(d)
|
||||
|
||||
// The number of bytes written is p minus the old base pointer.
|
||||
SUBQ b_base+8(FP), p
|
||||
MOVQ p, ret+32(FP)
|
||||
|
||||
RET
|
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
@@ -1,183 +0,0 @@
|
||||
//go:build !appengine && gc && !purego
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Registers:
|
||||
#define digest R1
|
||||
#define h R2 // return value
|
||||
#define p R3 // input pointer
|
||||
#define n R4 // input length
|
||||
#define nblocks R5 // n / 32
|
||||
#define prime1 R7
|
||||
#define prime2 R8
|
||||
#define prime3 R9
|
||||
#define prime4 R10
|
||||
#define prime5 R11
|
||||
#define v1 R12
|
||||
#define v2 R13
|
||||
#define v3 R14
|
||||
#define v4 R15
|
||||
#define x1 R20
|
||||
#define x2 R21
|
||||
#define x3 R22
|
||||
#define x4 R23
|
||||
|
||||
#define round(acc, x) \
|
||||
MADD prime2, acc, x, acc \
|
||||
ROR $64-31, acc \
|
||||
MUL prime1, acc
|
||||
|
||||
// round0 performs the operation x = round(0, x).
|
||||
#define round0(x) \
|
||||
MUL prime2, x \
|
||||
ROR $64-31, x \
|
||||
MUL prime1, x
|
||||
|
||||
#define mergeRound(acc, x) \
|
||||
round0(x) \
|
||||
EOR x, acc \
|
||||
MADD acc, prime4, prime1, acc
|
||||
|
||||
// blockLoop processes as many 32-byte blocks as possible,
|
||||
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||
#define blockLoop() \
|
||||
LSR $5, n, nblocks \
|
||||
PCALIGN $16 \
|
||||
loop: \
|
||||
LDP.P 16(p), (x1, x2) \
|
||||
LDP.P 16(p), (x3, x4) \
|
||||
round(v1, x1) \
|
||||
round(v2, x2) \
|
||||
round(v3, x3) \
|
||||
round(v4, x4) \
|
||||
SUB $1, nblocks \
|
||||
CBNZ nblocks, loop
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||
LDP b_base+0(FP), (p, n)
|
||||
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
LDP ·primes+16(SB), (prime3, prime4)
|
||||
MOVD ·primes+32(SB), prime5
|
||||
|
||||
CMP $32, n
|
||||
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||
BLT afterLoop
|
||||
|
||||
ADD prime1, prime2, v1
|
||||
MOVD prime2, v2
|
||||
MOVD $0, v3
|
||||
NEG prime1, v4
|
||||
|
||||
blockLoop()
|
||||
|
||||
ROR $64-1, v1, x1
|
||||
ROR $64-7, v2, x2
|
||||
ADD x1, x2
|
||||
ROR $64-12, v3, x3
|
||||
ROR $64-18, v4, x4
|
||||
ADD x3, x4
|
||||
ADD x2, x4, h
|
||||
|
||||
mergeRound(h, v1)
|
||||
mergeRound(h, v2)
|
||||
mergeRound(h, v3)
|
||||
mergeRound(h, v4)
|
||||
|
||||
afterLoop:
|
||||
ADD n, h
|
||||
|
||||
TBZ $4, n, try8
|
||||
LDP.P 16(p), (x1, x2)
|
||||
|
||||
round0(x1)
|
||||
|
||||
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||
// rotated register) is worth a small but measurable speedup for small
|
||||
// inputs.
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
round0(x2)
|
||||
ROR $64-27, h
|
||||
EOR x2 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try8:
|
||||
TBZ $3, n, try4
|
||||
MOVD.P 8(p), x1
|
||||
|
||||
round0(x1)
|
||||
ROR $64-27, h
|
||||
EOR x1 @> 64-27, h, h
|
||||
MADD h, prime4, prime1, h
|
||||
|
||||
try4:
|
||||
TBZ $2, n, try2
|
||||
MOVWU.P 4(p), x2
|
||||
|
||||
MUL prime1, x2
|
||||
ROR $64-23, h
|
||||
EOR x2 @> 64-23, h, h
|
||||
MADD h, prime3, prime2, h
|
||||
|
||||
try2:
|
||||
TBZ $1, n, try1
|
||||
MOVHU.P 2(p), x3
|
||||
AND $255, x3, x1
|
||||
LSR $8, x3, x2
|
||||
|
||||
MUL prime5, x1
|
||||
ROR $64-11, h
|
||||
EOR x1 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
MUL prime5, x2
|
||||
ROR $64-11, h
|
||||
EOR x2 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
try1:
|
||||
TBZ $0, n, finalize
|
||||
MOVBU (p), x4
|
||||
|
||||
MUL prime5, x4
|
||||
ROR $64-11, h
|
||||
EOR x4 @> 64-11, h, h
|
||||
MUL prime1, h
|
||||
|
||||
finalize:
|
||||
EOR h >> 33, h
|
||||
MUL prime2, h
|
||||
EOR h >> 29, h
|
||||
MUL prime3, h
|
||||
EOR h >> 32, h
|
||||
|
||||
MOVD h, ret+24(FP)
|
||||
RET
|
||||
|
||||
// func writeBlocks(d *Digest, b []byte) int
|
||||
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||
LDP ·primes+0(SB), (prime1, prime2)
|
||||
|
||||
// Load state. Assume v[1-4] are stored contiguously.
|
||||
MOVD d+0(FP), digest
|
||||
LDP 0(digest), (v1, v2)
|
||||
LDP 16(digest), (v3, v4)
|
||||
|
||||
LDP b_base+8(FP), (p, n)
|
||||
|
||||
blockLoop()
|
||||
|
||||
// Store updated state.
|
||||
STP (v1, v2), 0(digest)
|
||||
STP (v3, v4), 16(digest)
|
||||
|
||||
BIC $31, n
|
||||
MOVD n, ret+32(FP)
|
||||
RET
|
15
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
15
vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||
// +build amd64 arm64
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
//
|
||||
//go:noescape
|
||||
func Sum64(b []byte) uint64
|
||||
|
||||
//go:noescape
|
||||
func writeBlocks(d *Digest, b []byte) int
|
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
76
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@@ -1,76 +0,0 @@
|
||||
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||
// +build !amd64,!arm64 appengine !gc purego
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
|
||||
func Sum64(b []byte) uint64 {
|
||||
// A simpler version would be
|
||||
// d := New()
|
||||
// d.Write(b)
|
||||
// return d.Sum64()
|
||||
// but this is faster, particularly for small inputs.
|
||||
|
||||
n := len(b)
|
||||
var h uint64
|
||||
|
||||
if n >= 32 {
|
||||
v1 := primes[0] + prime2
|
||||
v2 := prime2
|
||||
v3 := uint64(0)
|
||||
v4 := -primes[0]
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
|
||||
h = mergeRound(h, v1)
|
||||
h = mergeRound(h, v2)
|
||||
h = mergeRound(h, v3)
|
||||
h = mergeRound(h, v4)
|
||||
} else {
|
||||
h = prime5
|
||||
}
|
||||
|
||||
h += uint64(n)
|
||||
|
||||
for ; len(b) >= 8; b = b[8:] {
|
||||
k1 := round(0, u64(b[:8]))
|
||||
h ^= k1
|
||||
h = rol27(h)*prime1 + prime4
|
||||
}
|
||||
if len(b) >= 4 {
|
||||
h ^= uint64(u32(b[:4])) * prime1
|
||||
h = rol23(h)*prime2 + prime3
|
||||
b = b[4:]
|
||||
}
|
||||
for ; len(b) > 0; b = b[1:] {
|
||||
h ^= uint64(b[0]) * prime5
|
||||
h = rol11(h) * prime1
|
||||
}
|
||||
|
||||
h ^= h >> 33
|
||||
h *= prime2
|
||||
h ^= h >> 29
|
||||
h *= prime3
|
||||
h ^= h >> 32
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func writeBlocks(d *Digest, b []byte) int {
|
||||
v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
|
||||
n := len(b)
|
||||
for len(b) >= 32 {
|
||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||
v3 = round(v3, u64(b[16:24:len(b)]))
|
||||
v4 = round(v4, u64(b[24:32:len(b)]))
|
||||
b = b[32:len(b):len(b)]
|
||||
}
|
||||
d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
|
||||
return n - len(b)
|
||||
}
|
16
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
16
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
//go:build appengine
|
||||
// +build appengine
|
||||
|
||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||
|
||||
package xxhash
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
func Sum64String(s string) uint64 {
|
||||
return Sum64([]byte(s))
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
return d.Write([]byte(s))
|
||||
}
|
58
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
58
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -1,58 +0,0 @@
|
||||
//go:build !appengine
|
||||
// +build !appengine
|
||||
|
||||
// This file encapsulates usage of unsafe.
|
||||
// xxhash_safe.go contains the safe implementations.
|
||||
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
21
vendor/github.com/dgryski/go-rendezvous/LICENSE
generated
vendored
21
vendor/github.com/dgryski/go-rendezvous/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
79
vendor/github.com/dgryski/go-rendezvous/rdv.go
generated
vendored
79
vendor/github.com/dgryski/go-rendezvous/rdv.go
generated
vendored
@@ -1,79 +0,0 @@
|
||||
package rendezvous
|
||||
|
||||
type Rendezvous struct {
|
||||
nodes map[string]int
|
||||
nstr []string
|
||||
nhash []uint64
|
||||
hash Hasher
|
||||
}
|
||||
|
||||
type Hasher func(s string) uint64
|
||||
|
||||
func New(nodes []string, hash Hasher) *Rendezvous {
|
||||
r := &Rendezvous{
|
||||
nodes: make(map[string]int, len(nodes)),
|
||||
nstr: make([]string, len(nodes)),
|
||||
nhash: make([]uint64, len(nodes)),
|
||||
hash: hash,
|
||||
}
|
||||
|
||||
for i, n := range nodes {
|
||||
r.nodes[n] = i
|
||||
r.nstr[i] = n
|
||||
r.nhash[i] = hash(n)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Lookup(k string) string {
|
||||
// short-circuit if we're empty
|
||||
if len(r.nodes) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
khash := r.hash(k)
|
||||
|
||||
var midx int
|
||||
var mhash = xorshiftMult64(khash ^ r.nhash[0])
|
||||
|
||||
for i, nhash := range r.nhash[1:] {
|
||||
if h := xorshiftMult64(khash ^ nhash); h > mhash {
|
||||
midx = i + 1
|
||||
mhash = h
|
||||
}
|
||||
}
|
||||
|
||||
return r.nstr[midx]
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Add(node string) {
|
||||
r.nodes[node] = len(r.nstr)
|
||||
r.nstr = append(r.nstr, node)
|
||||
r.nhash = append(r.nhash, r.hash(node))
|
||||
}
|
||||
|
||||
func (r *Rendezvous) Remove(node string) {
|
||||
// find index of node to remove
|
||||
nidx := r.nodes[node]
|
||||
|
||||
// remove from the slices
|
||||
l := len(r.nstr)
|
||||
r.nstr[nidx] = r.nstr[l]
|
||||
r.nstr = r.nstr[:l]
|
||||
|
||||
r.nhash[nidx] = r.nhash[l]
|
||||
r.nhash = r.nhash[:l]
|
||||
|
||||
// update the map
|
||||
delete(r.nodes, node)
|
||||
moved := r.nstr[nidx]
|
||||
r.nodes[moved] = nidx
|
||||
}
|
||||
|
||||
func xorshiftMult64(x uint64) uint64 {
|
||||
x ^= x >> 12 // a
|
||||
x ^= x << 25 // b
|
||||
x ^= x >> 27 // c
|
||||
return x * 2685821657736338717
|
||||
}
|
26
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
26
vendor/github.com/go-logr/logr/.golangci.yaml
generated
vendored
@@ -1,26 +0,0 @@
|
||||
run:
|
||||
timeout: 1m
|
||||
tests: true
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- asciicheck
|
||||
- errcheck
|
||||
- forcetypeassert
|
||||
- gocritic
|
||||
- gofmt
|
||||
- goimports
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- misspell
|
||||
- revive
|
||||
- staticcheck
|
||||
- typecheck
|
||||
- unused
|
||||
|
||||
issues:
|
||||
exclude-use-default: false
|
||||
max-issues-per-linter: 0
|
||||
max-same-issues: 10
|
6
vendor/github.com/go-logr/logr/CHANGELOG.md
generated
vendored
6
vendor/github.com/go-logr/logr/CHANGELOG.md
generated
vendored
@@ -1,6 +0,0 @@
|
||||
# CHANGELOG
|
||||
|
||||
## v1.0.0-rc1
|
||||
|
||||
This is the first logged release. Major changes (including breaking changes)
|
||||
have occurred since earlier tags.
|
17
vendor/github.com/go-logr/logr/CONTRIBUTING.md
generated
vendored
17
vendor/github.com/go-logr/logr/CONTRIBUTING.md
generated
vendored
@@ -1,17 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
Logr is open to pull-requests, provided they fit within the intended scope of
|
||||
the project. Specifically, this library aims to be VERY small and minimalist,
|
||||
with no external dependencies.
|
||||
|
||||
## Compatibility
|
||||
|
||||
This project intends to follow [semantic versioning](http://semver.org) and
|
||||
is very strict about compatibility. Any proposed changes MUST follow those
|
||||
rules.
|
||||
|
||||
## Performance
|
||||
|
||||
As a logging library, logr must be as light-weight as possible. Any proposed
|
||||
code change must include results of running the [benchmark](./benchmark)
|
||||
before and after the change.
|
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
201
vendor/github.com/go-logr/logr/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright {yyyy} {name of copyright owner}
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
406
vendor/github.com/go-logr/logr/README.md
generated
vendored
406
vendor/github.com/go-logr/logr/README.md
generated
vendored
@@ -1,406 +0,0 @@
|
||||
# A minimal logging API for Go
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logr/logr)
|
||||
[](https://securityscorecards.dev/viewer/?platform=github.com&org=go-logr&repo=logr)
|
||||
|
||||
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||
without becoming coupled to a particular logging implementation. This is not
|
||||
an implementation of logging - it is an API. In fact it is two APIs with two
|
||||
different sets of users.
|
||||
|
||||
The `Logger` type is intended for application and library authors. It provides
|
||||
a relatively small API which can be used everywhere you want to emit logs. It
|
||||
defers the actual act of writing logs (to files, to stdout, or whatever) to the
|
||||
`LogSink` interface.
|
||||
|
||||
The `LogSink` interface is intended for logging library implementers. It is a
|
||||
pure interface which can be implemented by logging frameworks to provide the actual logging
|
||||
functionality.
|
||||
|
||||
This decoupling allows application and library developers to write code in
|
||||
terms of `logr.Logger` (which has very low dependency fan-out) while the
|
||||
implementation of logging is managed "up stack" (e.g. in or near `main()`.)
|
||||
Application developers can then switch out implementations as necessary.
|
||||
|
||||
Many people assert that libraries should not be logging, and as such efforts
|
||||
like this are pointless. Those people are welcome to convince the authors of
|
||||
the tens-of-thousands of libraries that *DO* write logs that they are all
|
||||
wrong. In the meantime, logr takes a more practical approach.
|
||||
|
||||
## Typical usage
|
||||
|
||||
Somewhere, early in an application's life, it will make a decision about which
|
||||
logging library (implementation) it actually wants to use. Something like:
|
||||
|
||||
```
|
||||
func main() {
|
||||
// ... other setup code ...
|
||||
|
||||
// Create the "root" logger. We have chosen the "logimpl" implementation,
|
||||
// which takes some initial parameters and returns a logr.Logger.
|
||||
logger := logimpl.New(param1, param2)
|
||||
|
||||
// ... other setup code ...
|
||||
```
|
||||
|
||||
Most apps will call into other libraries, create structures to govern the flow,
|
||||
etc. The `logr.Logger` object can be passed to these other libraries, stored
|
||||
in structs, or even used as a package-global variable, if needed. For example:
|
||||
|
||||
```
|
||||
app := createTheAppObject(logger)
|
||||
app.Run()
|
||||
```
|
||||
|
||||
Outside of this early setup, no other packages need to know about the choice of
|
||||
implementation. They write logs in terms of the `logr.Logger` that they
|
||||
received:
|
||||
|
||||
```
|
||||
type appObject struct {
|
||||
// ... other fields ...
|
||||
logger logr.Logger
|
||||
// ... other fields ...
|
||||
}
|
||||
|
||||
func (app *appObject) Run() {
|
||||
app.logger.Info("starting up", "timestamp", time.Now())
|
||||
|
||||
// ... app code ...
|
||||
```
|
||||
|
||||
## Background
|
||||
|
||||
If the Go standard library had defined an interface for logging, this project
|
||||
probably would not be needed. Alas, here we are.
|
||||
|
||||
When the Go developers started developing such an interface with
|
||||
[slog](https://github.com/golang/go/issues/56345), they adopted some of the
|
||||
logr design but also left out some parts and changed others:
|
||||
|
||||
| Feature | logr | slog |
|
||||
|---------|------|------|
|
||||
| High-level API | `Logger` (passed by value) | `Logger` (passed by [pointer](https://github.com/golang/go/issues/59126)) |
|
||||
| Low-level API | `LogSink` | `Handler` |
|
||||
| Stack unwinding | done by `LogSink` | done by `Logger` |
|
||||
| Skipping helper functions | `WithCallDepth`, `WithCallStackHelper` | [not supported by Logger](https://github.com/golang/go/issues/59145) |
|
||||
| Generating a value for logging on demand | `Marshaler` | `LogValuer` |
|
||||
| Log levels | >= 0, higher meaning "less important" | positive and negative, with 0 for "info" and higher meaning "more important" |
|
||||
| Error log entries | always logged, don't have a verbosity level | normal log entries with level >= `LevelError` |
|
||||
| Passing logger via context | `NewContext`, `FromContext` | no API |
|
||||
| Adding a name to a logger | `WithName` | no API |
|
||||
| Modify verbosity of log entries in a call chain | `V` | no API |
|
||||
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
|
||||
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
|
||||
|
||||
The high-level slog API is explicitly meant to be one of many different APIs
|
||||
that can be layered on top of a shared `slog.Handler`. logr is one such
|
||||
alternative API, with [interoperability](#slog-interoperability) provided by
|
||||
some conversion functions.
|
||||
|
||||
### Inspiration
|
||||
|
||||
Before you consider this package, please read [this blog post by the
|
||||
inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
|
||||
he has to say, and it largely aligns with our own experiences.
|
||||
|
||||
### Differences from Dave's ideas
|
||||
|
||||
The main differences are:
|
||||
|
||||
1. Dave basically proposes doing away with the notion of a logging API in favor
|
||||
of `fmt.Printf()`. We disagree, especially when you consider things like output
|
||||
locations, timestamps, file and line decorations, and structured logging. This
|
||||
package restricts the logging API to just 2 types of logs: info and error.
|
||||
|
||||
Info logs are things you want to tell the user which are not errors. Error
|
||||
logs are, well, errors. If your code receives an `error` from a subordinate
|
||||
function call and is logging that `error` *and not returning it*, use error
|
||||
logs.
|
||||
|
||||
2. Verbosity-levels on info logs. This gives developers a chance to indicate
|
||||
arbitrary grades of importance for info logs, without assigning names with
|
||||
semantic meaning such as "warning", "trace", and "debug." Superficially this
|
||||
may feel very similar, but the primary difference is the lack of semantics.
|
||||
Because verbosity is a numerical value, it's safe to assume that an app running
|
||||
with higher verbosity means more (and less important) logs will be generated.
|
||||
|
||||
## Implementations (non-exhaustive)
|
||||
|
||||
There are implementations for the following logging libraries:
|
||||
|
||||
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
|
||||
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
|
||||
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
||||
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
|
||||
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
|
||||
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
||||
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
|
||||
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
||||
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
|
||||
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
|
||||
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
|
||||
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
|
||||
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
|
||||
|
||||
## slog interoperability
|
||||
|
||||
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
|
||||
and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
|
||||
`ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
|
||||
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
|
||||
slog API.
|
||||
|
||||
### Using a `logr.LogSink` as backend for slog
|
||||
|
||||
Ideally, a logr sink implementation should support both logr and slog by
|
||||
implementing both the normal logr interface(s) and `SlogSink`. Because
|
||||
of a conflict in the parameters of the common `Enabled` method, it is [not
|
||||
possible to implement both slog.Handler and logr.Sink in the same
|
||||
type](https://github.com/golang/go/issues/59110).
|
||||
|
||||
If both are supported, log calls can go from the high-level APIs to the backend
|
||||
without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
|
||||
convert back and forth without adding additional wrappers, with one exception:
|
||||
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
|
||||
`ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
|
||||
log calls.
|
||||
|
||||
Such an implementation should also support values that implement specific
|
||||
interfaces from both packages for logging (`logr.Marshaler`, `slog.LogValuer`,
|
||||
`slog.GroupValue`). logr does not convert those.
|
||||
|
||||
Not supporting slog has several drawbacks:
|
||||
- Recording source code locations works correctly if the handler gets called
|
||||
through `slog.Logger`, but may be wrong in other cases. That's because a
|
||||
`logr.Sink` does its own stack unwinding instead of using the program counter
|
||||
provided by the high-level API.
|
||||
- slog levels <= 0 can be mapped to logr levels by negating the level without a
|
||||
loss of information. But all slog levels > 0 (e.g. `slog.LevelWarning` as
|
||||
used by `slog.Logger.Warn`) must be mapped to 0 before calling the sink
|
||||
because logr does not support "more important than info" levels.
|
||||
- The slog group concept is supported by prefixing each key in a key/value
|
||||
pair with the group names, separated by a dot. For structured output like
|
||||
JSON it would be better to group the key/value pairs inside an object.
|
||||
- Special slog values and interfaces don't work as expected.
|
||||
- The overhead is likely to be higher.
|
||||
|
||||
These drawbacks are severe enough that applications using a mixture of slog and
|
||||
logr should switch to a different backend.
|
||||
|
||||
### Using a `slog.Handler` as backend for logr
|
||||
|
||||
Using a plain `slog.Handler` without support for logr works better than the
|
||||
other direction:
|
||||
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level
|
||||
by negating them.
|
||||
- Stack unwinding is done by the `SlogSink` and the resulting program
|
||||
counter is passed to the `slog.Handler`.
|
||||
- Names added via `Logger.WithName` are gathered and recorded in an additional
|
||||
attribute with `logger` as key and the names separated by slash as value.
|
||||
- `Logger.Error` is turned into a log record with `slog.LevelError` as level
|
||||
and an additional attribute with `err` as key, if an error was provided.
|
||||
|
||||
The main drawback is that `logr.Marshaler` will not be supported. Types should
|
||||
ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
|
||||
with logr implementations without slog support is not important, then
|
||||
`slog.Valuer` is sufficient.
|
||||
|
||||
### Context support for slog
|
||||
|
||||
Storing a logger in a `context.Context` is not supported by
|
||||
slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
|
||||
used to fill this gap. They store and retrieve a `slog.Logger` pointer
|
||||
under the same context key that is also used by `NewContext` and
|
||||
`FromContext` for `logr.Logger` value.
|
||||
|
||||
When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
|
||||
automatically convert the `slog.Logger` to a
|
||||
`logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
|
||||
|
||||
With this approach, binaries which use either slog or logr are as efficient as
|
||||
possible with no unnecessary allocations. This is also why the API stores a
|
||||
`slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
|
||||
on retrieval would need to allocate one.
|
||||
|
||||
The downside is that switching back and forth needs more allocations. Because
|
||||
logr is the API that is already in use by different packages, in particular
|
||||
Kubernetes, the recommendation is to use the `logr.Logger` API in code which
|
||||
uses contextual logging.
|
||||
|
||||
An alternative to adding values to a logger and storing that logger in the
|
||||
context is to store the values in the context and to configure a logging
|
||||
backend to extract those values when emitting log entries. This only works when
|
||||
log calls are passed the context, which is not supported by the logr API.
|
||||
|
||||
With the slog API, it is possible, but not
|
||||
required. https://github.com/veqryn/slog-context is a package for slog which
|
||||
provides additional support code for this approach. It also contains wrappers
|
||||
for the context functions in logr, so developers who prefer to not use the logr
|
||||
APIs directly can use those instead and the resulting code will still be
|
||||
interoperable with logr.
|
||||
|
||||
## FAQ
|
||||
|
||||
### Conceptual
|
||||
|
||||
#### Why structured logging?
|
||||
|
||||
- **Structured logs are more easily queryable**: Since you've got
|
||||
key-value pairs, it's much easier to query your structured logs for
|
||||
particular values by filtering on the contents of a particular key --
|
||||
think searching request logs for error codes, Kubernetes reconcilers for
|
||||
the name and namespace of the reconciled object, etc.
|
||||
|
||||
- **Structured logging makes it easier to have cross-referenceable logs**:
|
||||
Similarly to searchability, if you maintain conventions around your
|
||||
keys, it becomes easy to gather all log lines related to a particular
|
||||
concept.
|
||||
|
||||
- **Structured logs allow better dimensions of filtering**: if you have
|
||||
structure to your logs, you've got more precise control over how much
|
||||
information is logged -- you might choose in a particular configuration
|
||||
to log certain keys but not others, only log lines where a certain key
|
||||
matches a certain value, etc., instead of just having v-levels and names
|
||||
to key off of.
|
||||
|
||||
- **Structured logs better represent structured data**: sometimes, the
|
||||
data that you want to log is inherently structured (think tuple-link
|
||||
objects.) Structured logs allow you to preserve that structure when
|
||||
outputting.
|
||||
|
||||
#### Why V-levels?
|
||||
|
||||
**V-levels give operators an easy way to control the chattiness of log
|
||||
operations**. V-levels provide a way for a given package to distinguish
|
||||
the relative importance or verbosity of a given log message. Then, if
|
||||
a particular logger or package is logging too many messages, the user
|
||||
of the package can simply change the v-levels for that library.
|
||||
|
||||
#### Why not named levels, like Info/Warning/Error?
|
||||
|
||||
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
|
||||
from Dave's ideas](#differences-from-daves-ideas).
|
||||
|
||||
#### Why not allow format strings, too?
|
||||
|
||||
**Format strings negate many of the benefits of structured logs**:
|
||||
|
||||
- They're not easily searchable without resorting to fuzzy searching,
|
||||
regular expressions, etc.
|
||||
|
||||
- They don't store structured data well, since contents are flattened into
|
||||
a string.
|
||||
|
||||
- They're not cross-referenceable.
|
||||
|
||||
- They don't compress easily, since the message is not constant.
|
||||
|
||||
(Unless you turn positional parameters into key-value pairs with numerical
|
||||
keys, at which point you've gotten key-value logging with meaningless
|
||||
keys.)
|
||||
|
||||
### Practical
|
||||
|
||||
#### Why key-value pairs, and not a map?
|
||||
|
||||
Key-value pairs are *much* easier to optimize, especially around
|
||||
allocations. Zap (a structured logger that inspired logr's interface) has
|
||||
[performance measurements](https://github.com/uber-go/zap#performance)
|
||||
that show this quite nicely.
|
||||
|
||||
While the interface ends up being a little less obvious, you get
|
||||
potentially better performance, plus avoid making users type
|
||||
`map[string]string{}` every time they want to log.
|
||||
|
||||
#### What if my V-levels differ between libraries?
|
||||
|
||||
That's fine. Control your V-levels on a per-logger basis, and use the
|
||||
`WithName` method to pass different loggers to different libraries.
|
||||
|
||||
Generally, you should take care to ensure that you have relatively
|
||||
consistent V-levels within a given logger, however, as this makes deciding
|
||||
on what verbosity of logs to request easier.
|
||||
|
||||
#### But I really want to use a format string!
|
||||
|
||||
That's not actually a question. Assuming your question is "how do
|
||||
I convert my mental model of logging with format strings to logging with
|
||||
constant messages":
|
||||
|
||||
1. Figure out what the error actually is, as you'd write in a TL;DR style,
|
||||
and use that as a message.
|
||||
|
||||
2. For every place you'd write a format specifier, look to the word before
|
||||
it, and add that as a key value pair.
|
||||
|
||||
For instance, consider the following examples (all taken from spots in the
|
||||
Kubernetes codebase):
|
||||
|
||||
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
|
||||
responseCode, err)` becomes `logger.Error(err, "client returned an
|
||||
error", "code", responseCode)`
|
||||
|
||||
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
|
||||
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
|
||||
response when requesting url", "attempt", retries, "after
|
||||
seconds", seconds, "url", url)`
|
||||
|
||||
If you *really* must use a format string, use it in a key's value, and
|
||||
call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
|
||||
reflect over type %T")` becomes `logger.Info("unable to reflect over
|
||||
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
|
||||
this is necessary should be few and far between.
|
||||
|
||||
#### How do I choose my V-levels?
|
||||
|
||||
This is basically the only hard constraint: increase V-levels to denote
|
||||
more verbose or more debug-y logs.
|
||||
|
||||
Otherwise, you can start out with `0` as "you always want to see this",
|
||||
`1` as "common logging that you might *possibly* want to turn off", and
|
||||
`10` as "I would like to performance-test your log collection stack."
|
||||
|
||||
Then gradually choose levels in between as you need them, working your way
|
||||
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
||||
info-type logs). For reference, slog pre-defines -4 for debug logs
|
||||
(corresponds to 4 in logr), which matches what is
|
||||
[recommended for Kubernetes](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md#what-method-to-use).
|
||||
|
||||
#### How do I choose my keys?
|
||||
|
||||
Keys are fairly flexible, and can hold more or less any string
|
||||
value. For best compatibility with implementations and consistency
|
||||
with existing code in other projects, there are a few conventions you
|
||||
should consider.
|
||||
|
||||
- Make your keys human-readable.
|
||||
- Constant keys are generally a good idea.
|
||||
- Be consistent across your codebase.
|
||||
- Keys should naturally match parts of the message string.
|
||||
- Use lower case for simple keys and
|
||||
[lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
|
||||
more complex ones. Kubernetes is one example of a project that has
|
||||
[adopted that
|
||||
convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
|
||||
|
||||
While key names are mostly unrestricted (and spaces are acceptable),
|
||||
it's generally a good idea to stick to printable ascii characters, or at
|
||||
least match the general character set of your log lines.
|
||||
|
||||
#### Why should keys be constant values?
|
||||
|
||||
The point of structured logging is to make later log processing easier. Your
|
||||
keys are, effectively, the schema of each log message. If you use different
|
||||
keys across instances of the same log line, you will make your structured logs
|
||||
much harder to use. `Sprintf()` is for values, not for keys!
|
||||
|
||||
#### Why is this not a pure interface?
|
||||
|
||||
The Logger type is implemented as a struct in order to allow the Go compiler to
|
||||
optimize things like high-V `Info` logs that are not triggered. Not all of
|
||||
these implementations are implemented yet, but this structure was suggested as
|
||||
a way to ensure they *can* be implemented. All of the real work is behind the
|
||||
`LogSink` interface.
|
||||
|
||||
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
18
vendor/github.com/go-logr/logr/SECURITY.md
generated
vendored
18
vendor/github.com/go-logr/logr/SECURITY.md
generated
vendored
@@ -1,18 +0,0 @@
|
||||
# Security Policy
|
||||
|
||||
If you have discovered a security vulnerability in this project, please report it
|
||||
privately. **Do not disclose it as a public issue.** This gives us time to work with you
|
||||
to fix the issue before public exposure, reducing the chance that the exploit will be
|
||||
used before a patch is released.
|
||||
|
||||
You may submit the report in the following ways:
|
||||
|
||||
- send an email to go-logr-security@googlegroups.com
|
||||
- send us a [private vulnerability report](https://github.com/go-logr/logr/security/advisories/new)
|
||||
|
||||
Please provide the following information in your report:
|
||||
|
||||
- A description of the vulnerability and its impact
|
||||
- How to reproduce the issue
|
||||
|
||||
We ask that you give us 90 days to work on a fix before public exposure.
|
33
vendor/github.com/go-logr/logr/context.go
generated
vendored
33
vendor/github.com/go-logr/logr/context.go
generated
vendored
@@ -1,33 +0,0 @@
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
|
||||
// the value is always a Logger value. With Go >= 1.21, the value can be a
|
||||
// Logger value or a slog.Logger pointer.
|
||||
type contextKey struct{}
|
||||
|
||||
// notFoundError exists to carry an IsNotFound method.
|
||||
type notFoundError struct{}
|
||||
|
||||
func (notFoundError) Error() string {
|
||||
return "no logr.Logger was present"
|
||||
}
|
||||
|
||||
func (notFoundError) IsNotFound() bool {
|
||||
return true
|
||||
}
|
49
vendor/github.com/go-logr/logr/context_noslog.go
generated
vendored
49
vendor/github.com/go-logr/logr/context_noslog.go
generated
vendored
@@ -1,49 +0,0 @@
|
||||
//go:build !go1.21
|
||||
// +build !go1.21
|
||||
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v, nil
|
||||
}
|
||||
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||
return v
|
||||
}
|
||||
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
83
vendor/github.com/go-logr/logr/context_slog.go
generated
vendored
83
vendor/github.com/go-logr/logr/context_slog.go
generated
vendored
@@ -1,83 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||
func FromContext(ctx context.Context) (Logger, error) {
|
||||
v := ctx.Value(contextKey{})
|
||||
if v == nil {
|
||||
return Logger{}, notFoundError{}
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case Logger:
|
||||
return v, nil
|
||||
case *slog.Logger:
|
||||
return FromSlogHandler(v.Handler()), nil
|
||||
default:
|
||||
// Not reached.
|
||||
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
|
||||
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
|
||||
v := ctx.Value(contextKey{})
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch v := v.(type) {
|
||||
case Logger:
|
||||
return slog.New(ToSlogHandler(v))
|
||||
case *slog.Logger:
|
||||
return v
|
||||
default:
|
||||
// Not reached.
|
||||
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
|
||||
}
|
||||
}
|
||||
|
||||
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||
// returns a Logger that discards all log messages.
|
||||
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||
if logger, err := FromContext(ctx); err == nil {
|
||||
return logger
|
||||
}
|
||||
return Discard()
|
||||
}
|
||||
|
||||
// NewContext returns a new Context, derived from ctx, which carries the
|
||||
// provided Logger.
|
||||
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
||||
|
||||
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
|
||||
// provided slog.Logger.
|
||||
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
|
||||
return context.WithValue(ctx, contextKey{}, logger)
|
||||
}
|
24
vendor/github.com/go-logr/logr/discard.go
generated
vendored
24
vendor/github.com/go-logr/logr/discard.go
generated
vendored
@@ -1,24 +0,0 @@
|
||||
/*
|
||||
Copyright 2020 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
// Discard returns a Logger that discards all messages logged to it. It can be
|
||||
// used whenever the caller is not interested in the logs. Logger instances
|
||||
// produced by this function always compare as equal.
|
||||
func Discard() Logger {
|
||||
return New(nil)
|
||||
}
|
911
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
911
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
@@ -1,911 +0,0 @@
|
||||
/*
|
||||
Copyright 2021 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package funcr implements formatting of structured log messages and
|
||||
// optionally captures the call site and timestamp.
|
||||
//
|
||||
// The simplest way to use it is via its implementation of a
|
||||
// github.com/go-logr/logr.LogSink with output through an arbitrary
|
||||
// "write" function. See New and NewJSON for details.
|
||||
//
|
||||
// # Custom LogSinks
|
||||
//
|
||||
// For users who need more control, a funcr.Formatter can be embedded inside
|
||||
// your own custom LogSink implementation. This is useful when the LogSink
|
||||
// needs to implement additional methods, for example.
|
||||
//
|
||||
// # Formatting
|
||||
//
|
||||
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
|
||||
// values which are being logged. When rendering a struct, funcr will use Go's
|
||||
// standard JSON tags (all except "string").
|
||||
package funcr
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// New returns a logr.Logger which is implemented by an arbitrary function.
|
||||
func New(fn func(prefix, args string), opts Options) logr.Logger {
|
||||
return logr.New(newSink(fn, NewFormatter(opts)))
|
||||
}
|
||||
|
||||
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
|
||||
// and produces JSON output.
|
||||
func NewJSON(fn func(obj string), opts Options) logr.Logger {
|
||||
fnWrapper := func(_, obj string) {
|
||||
fn(obj)
|
||||
}
|
||||
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
|
||||
}
|
||||
|
||||
// Underlier exposes access to the underlying logging function. Since
|
||||
// callers only have a logr.Logger, they have to know which
|
||||
// implementation is in use, so this interface is less of an
|
||||
// abstraction and more of a way to test type conversion.
|
||||
type Underlier interface {
|
||||
GetUnderlying() func(prefix, args string)
|
||||
}
|
||||
|
||||
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
|
||||
l := &fnlogger{
|
||||
Formatter: formatter,
|
||||
write: fn,
|
||||
}
|
||||
// For skipping fnlogger.Info and fnlogger.Error.
|
||||
l.Formatter.AddCallDepth(1)
|
||||
return l
|
||||
}
|
||||
|
||||
// Options carries parameters which influence the way logs are generated.
|
||||
type Options struct {
|
||||
// LogCaller tells funcr to add a "caller" key to some or all log lines.
|
||||
// This has some overhead, so some users might not want it.
|
||||
LogCaller MessageClass
|
||||
|
||||
// LogCallerFunc tells funcr to also log the calling function name. This
|
||||
// has no effect if caller logging is not enabled (see Options.LogCaller).
|
||||
LogCallerFunc bool
|
||||
|
||||
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
|
||||
// overhead, so some users might not want it.
|
||||
LogTimestamp bool
|
||||
|
||||
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
|
||||
// is enabled. If not specified, a default format will be used. For more
|
||||
// details, see docs for Go's time.Layout.
|
||||
TimestampFormat string
|
||||
|
||||
// LogInfoLevel tells funcr what key to use to log the info level.
|
||||
// If not specified, the info level will be logged as "level".
|
||||
// If this is set to "", the info level will not be logged at all.
|
||||
LogInfoLevel *string
|
||||
|
||||
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||
// more logs. Info logs at or below this level will be written, while logs
|
||||
// above this level will be discarded.
|
||||
Verbosity int
|
||||
|
||||
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
|
||||
// while a log line is being rendered. The kvList argument follows logr
|
||||
// conventions - each pair of slice elements is comprised of a string key
|
||||
// and an arbitrary value (verified and sanitized before calling this
|
||||
// hook). The value returned must follow the same conventions. This hook
|
||||
// can be used to audit or modify logged data. For example, you might want
|
||||
// to prefix all of funcr's built-in keys with some string. This hook is
|
||||
// only called for built-in (provided by funcr itself) key-value pairs.
|
||||
// Equivalent hooks are offered for key-value pairs saved via
|
||||
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
|
||||
// for user-provided pairs (see RenderArgsHook).
|
||||
RenderBuiltinsHook func(kvList []any) []any
|
||||
|
||||
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
|
||||
// only called for key-value pairs saved via logr.Logger.WithValues. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderValuesHook func(kvList []any) []any
|
||||
|
||||
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
|
||||
// called for key-value pairs passed directly to Info and Error. See
|
||||
// RenderBuiltinsHook for more details.
|
||||
RenderArgsHook func(kvList []any) []any
|
||||
|
||||
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
|
||||
// that contains a struct, etc.) it may log. Every time it finds a struct,
|
||||
// slice, array, or map the depth is increased by one. When the maximum is
|
||||
// reached, the value will be converted to a string indicating that the max
|
||||
// depth has been exceeded. If this field is not specified, a default
|
||||
// value will be used.
|
||||
MaxLogDepth int
|
||||
}
|
||||
|
||||
// MessageClass indicates which category or categories of messages to consider.
|
||||
type MessageClass int
|
||||
|
||||
const (
|
||||
// None ignores all message classes.
|
||||
None MessageClass = iota
|
||||
// All considers all message classes.
|
||||
All
|
||||
// Info only considers info messages.
|
||||
Info
|
||||
// Error only considers error messages.
|
||||
Error
|
||||
)
|
||||
|
||||
// fnlogger inherits some of its LogSink implementation from Formatter
|
||||
// and just needs to add some glue code.
|
||||
type fnlogger struct {
|
||||
Formatter
|
||||
write func(prefix, args string)
|
||||
}
|
||||
|
||||
func (l fnlogger) WithName(name string) logr.LogSink {
|
||||
l.Formatter.AddName(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithValues(kvList ...any) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
||||
l.Formatter.AddCallDepth(depth)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) Info(level int, msg string, kvList ...any) {
|
||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) Error(err error, msg string, kvList ...any) {
|
||||
prefix, args := l.FormatError(err, msg, kvList)
|
||||
l.write(prefix, args)
|
||||
}
|
||||
|
||||
func (l fnlogger) GetUnderlying() func(prefix, args string) {
|
||||
return l.write
|
||||
}
|
||||
|
||||
// Assert conformance to the interfaces.
|
||||
var _ logr.LogSink = &fnlogger{}
|
||||
var _ logr.CallDepthLogSink = &fnlogger{}
|
||||
var _ Underlier = &fnlogger{}
|
||||
|
||||
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
|
||||
func NewFormatter(opts Options) Formatter {
|
||||
return newFormatter(opts, outputKeyValue)
|
||||
}
|
||||
|
||||
// NewFormatterJSON constructs a Formatter which emits strict JSON.
|
||||
func NewFormatterJSON(opts Options) Formatter {
|
||||
return newFormatter(opts, outputJSON)
|
||||
}
|
||||
|
||||
// Defaults for Options.
|
||||
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
|
||||
const defaultMaxLogDepth = 16
|
||||
|
||||
func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||
if opts.TimestampFormat == "" {
|
||||
opts.TimestampFormat = defaultTimestampFormat
|
||||
}
|
||||
if opts.MaxLogDepth == 0 {
|
||||
opts.MaxLogDepth = defaultMaxLogDepth
|
||||
}
|
||||
if opts.LogInfoLevel == nil {
|
||||
opts.LogInfoLevel = new(string)
|
||||
*opts.LogInfoLevel = "level"
|
||||
}
|
||||
f := Formatter{
|
||||
outputFormat: outfmt,
|
||||
prefix: "",
|
||||
values: nil,
|
||||
depth: 0,
|
||||
opts: &opts,
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// Formatter is an opaque struct which can be embedded in a LogSink
|
||||
// implementation. It should be constructed with NewFormatter. Some of
|
||||
// its methods directly implement logr.LogSink.
|
||||
type Formatter struct {
|
||||
outputFormat outputFormat
|
||||
prefix string
|
||||
values []any
|
||||
valuesStr string
|
||||
parentValuesStr string
|
||||
depth int
|
||||
opts *Options
|
||||
group string // for slog groups
|
||||
groupDepth int
|
||||
}
|
||||
|
||||
// outputFormat indicates which outputFormat to use.
|
||||
type outputFormat int
|
||||
|
||||
const (
|
||||
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
|
||||
outputKeyValue outputFormat = iota
|
||||
// outputJSON emits strict JSON.
|
||||
outputJSON
|
||||
)
|
||||
|
||||
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
||||
type PseudoStruct []any
|
||||
|
||||
// render produces a log line, ready to use.
|
||||
func (f Formatter) render(builtins, args []any) string {
|
||||
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('{') // for the whole line
|
||||
}
|
||||
|
||||
vals := builtins
|
||||
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||
continuing := len(builtins) > 0
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
groupDepth := f.groupDepth
|
||||
if f.group != "" {
|
||||
if f.valuesStr != "" || len(args) != 0 {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
} else {
|
||||
// The group was empty
|
||||
groupDepth--
|
||||
}
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
vals = args
|
||||
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||
|
||||
for i := 0; i < groupDepth; i++ {
|
||||
buf.WriteByte('}') // for the groups
|
||||
}
|
||||
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte('}') // for the whole line
|
||||
}
|
||||
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// flatten renders a list of key-value pairs into a buffer. If continuing is
|
||||
// true, it assumes that the buffer has previous values and will emit a
|
||||
// separator (which depends on the output format) before the first pair it
|
||||
// writes. If escapeKeys is true, the keys are assumed to have
|
||||
// non-JSON-compatible characters in them and must be evaluated for escapes.
|
||||
//
|
||||
// This function returns a potentially modified version of kvList, which
|
||||
// ensures that there is a value for every key (adding a value if needed) and
|
||||
// that each key is a string (substituting a key if needed).
|
||||
func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, escapeKeys bool) []any {
|
||||
// This logic overlaps with sanitize() but saves one type-cast per key,
|
||||
// which can be measurable.
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
copied := false
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
k, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
if !copied {
|
||||
newList := make([]any, len(kvList))
|
||||
copy(newList, kvList)
|
||||
kvList = newList
|
||||
copied = true
|
||||
}
|
||||
k = f.nonStringKey(kvList[i])
|
||||
kvList[i] = k
|
||||
}
|
||||
v := kvList[i+1]
|
||||
|
||||
if i > 0 || continuing {
|
||||
if f.outputFormat == outputJSON {
|
||||
buf.WriteByte(f.comma())
|
||||
} else {
|
||||
// In theory the format could be something we don't understand. In
|
||||
// practice, we control it, so it won't be.
|
||||
buf.WriteByte(' ')
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString(f.quoted(k, escapeKeys))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.pretty(v))
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
func (f Formatter) quoted(str string, escape bool) string {
|
||||
if escape {
|
||||
return prettyString(str)
|
||||
}
|
||||
// this is faster
|
||||
return `"` + str + `"`
|
||||
}
|
||||
|
||||
func (f Formatter) comma() byte {
|
||||
if f.outputFormat == outputJSON {
|
||||
return ','
|
||||
}
|
||||
return ' '
|
||||
}
|
||||
|
||||
func (f Formatter) colon() byte {
|
||||
if f.outputFormat == outputJSON {
|
||||
return ':'
|
||||
}
|
||||
return '='
|
||||
}
|
||||
|
||||
func (f Formatter) pretty(value any) string {
|
||||
return f.prettyWithFlags(value, 0, 0)
|
||||
}
|
||||
|
||||
const (
|
||||
flagRawStruct = 0x1 // do not print braces on structs
|
||||
)
|
||||
|
||||
// TODO: This is not fast. Most of the overhead goes here.
|
||||
func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
|
||||
if depth > f.opts.MaxLogDepth {
|
||||
return `"<max-log-depth-exceeded>"`
|
||||
}
|
||||
|
||||
// Handle types that take full control of logging.
|
||||
if v, ok := value.(logr.Marshaler); ok {
|
||||
// Replace the value with what the type wants to get logged.
|
||||
// That then gets handled below via reflection.
|
||||
value = invokeMarshaler(v)
|
||||
}
|
||||
|
||||
// Handle types that want to format themselves.
|
||||
switch v := value.(type) {
|
||||
case fmt.Stringer:
|
||||
value = invokeStringer(v)
|
||||
case error:
|
||||
value = invokeError(v)
|
||||
}
|
||||
|
||||
// Handling the most common types without reflect is a small perf win.
|
||||
switch v := value.(type) {
|
||||
case bool:
|
||||
return strconv.FormatBool(v)
|
||||
case string:
|
||||
return prettyString(v)
|
||||
case int:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int8:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int16:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int32:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case int64:
|
||||
return strconv.FormatInt(int64(v), 10)
|
||||
case uint:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint8:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint16:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint32:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case uint64:
|
||||
return strconv.FormatUint(v, 10)
|
||||
case uintptr:
|
||||
return strconv.FormatUint(uint64(v), 10)
|
||||
case float32:
|
||||
return strconv.FormatFloat(float64(v), 'f', -1, 32)
|
||||
case float64:
|
||||
return strconv.FormatFloat(v, 'f', -1, 64)
|
||||
case complex64:
|
||||
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
|
||||
case complex128:
|
||||
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
|
||||
case PseudoStruct:
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
v = f.sanitize(v)
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
for i := 0; i < len(v); i += 2 {
|
||||
if i > 0 {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||
// arbitrary keys might need escaping
|
||||
buf.WriteString(prettyString(k))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||
t := reflect.TypeOf(value)
|
||||
if t == nil {
|
||||
return "null"
|
||||
}
|
||||
v := reflect.ValueOf(value)
|
||||
switch t.Kind() {
|
||||
case reflect.Bool:
|
||||
return strconv.FormatBool(v.Bool())
|
||||
case reflect.String:
|
||||
return prettyString(v.String())
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return strconv.FormatInt(int64(v.Int()), 10)
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return strconv.FormatUint(uint64(v.Uint()), 10)
|
||||
case reflect.Float32:
|
||||
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
|
||||
case reflect.Float64:
|
||||
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
|
||||
case reflect.Complex64:
|
||||
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
|
||||
case reflect.Complex128:
|
||||
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
|
||||
case reflect.Struct:
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('{')
|
||||
}
|
||||
printComma := false // testing i>0 is not enough because of JSON omitted fields
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
fld := t.Field(i)
|
||||
if fld.PkgPath != "" {
|
||||
// reflect says this field is only defined for non-exported fields.
|
||||
continue
|
||||
}
|
||||
if !v.Field(i).CanInterface() {
|
||||
// reflect isn't clear exactly what this means, but we can't use it.
|
||||
continue
|
||||
}
|
||||
name := ""
|
||||
omitempty := false
|
||||
if tag, found := fld.Tag.Lookup("json"); found {
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
if comma := strings.Index(tag, ","); comma != -1 {
|
||||
if n := tag[:comma]; n != "" {
|
||||
name = n
|
||||
}
|
||||
rest := tag[comma:]
|
||||
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
|
||||
omitempty = true
|
||||
}
|
||||
} else {
|
||||
name = tag
|
||||
}
|
||||
}
|
||||
if omitempty && isEmpty(v.Field(i)) {
|
||||
continue
|
||||
}
|
||||
if printComma {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
printComma = true // if we got here, we are rendering a field
|
||||
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
|
||||
continue
|
||||
}
|
||||
if name == "" {
|
||||
name = fld.Name
|
||||
}
|
||||
// field names can't contain characters which need escaping
|
||||
buf.WriteString(f.quoted(name, false))
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||
}
|
||||
if flags&flagRawStruct == 0 {
|
||||
buf.WriteByte('}')
|
||||
}
|
||||
return buf.String()
|
||||
case reflect.Slice, reflect.Array:
|
||||
// If this is outputing as JSON make sure this isn't really a json.RawMessage.
|
||||
// If so just emit "as-is" and don't pretty it as that will just print
|
||||
// it as [X,Y,Z,...] which isn't terribly useful vs the string form you really want.
|
||||
if f.outputFormat == outputJSON {
|
||||
if rm, ok := value.(json.RawMessage); ok {
|
||||
// If it's empty make sure we emit an empty value as the array style would below.
|
||||
if len(rm) > 0 {
|
||||
buf.Write(rm)
|
||||
} else {
|
||||
buf.WriteString("null")
|
||||
}
|
||||
return buf.String()
|
||||
}
|
||||
}
|
||||
buf.WriteByte('[')
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i > 0 {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
e := v.Index(i)
|
||||
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||
}
|
||||
buf.WriteByte(']')
|
||||
return buf.String()
|
||||
case reflect.Map:
|
||||
buf.WriteByte('{')
|
||||
// This does not sort the map keys, for best perf.
|
||||
it := v.MapRange()
|
||||
i := 0
|
||||
for it.Next() {
|
||||
if i > 0 {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
// If a map key supports TextMarshaler, use it.
|
||||
keystr := ""
|
||||
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
|
||||
txt, err := m.MarshalText()
|
||||
if err != nil {
|
||||
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
|
||||
} else {
|
||||
keystr = string(txt)
|
||||
}
|
||||
keystr = prettyString(keystr)
|
||||
} else {
|
||||
// prettyWithFlags will produce already-escaped values
|
||||
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
|
||||
if t.Key().Kind() != reflect.String {
|
||||
// JSON only does string keys. Unlike Go's standard JSON, we'll
|
||||
// convert just about anything to a string.
|
||||
keystr = prettyString(keystr)
|
||||
}
|
||||
}
|
||||
buf.WriteString(keystr)
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||
i++
|
||||
}
|
||||
buf.WriteByte('}')
|
||||
return buf.String()
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
if v.IsNil() {
|
||||
return "null"
|
||||
}
|
||||
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
|
||||
}
|
||||
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
|
||||
}
|
||||
|
||||
func prettyString(s string) string {
|
||||
// Avoid escaping (which does allocations) if we can.
|
||||
if needsEscape(s) {
|
||||
return strconv.Quote(s)
|
||||
}
|
||||
b := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
b.WriteByte('"')
|
||||
b.WriteString(s)
|
||||
b.WriteByte('"')
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// needsEscape determines whether the input string needs to be escaped or not,
|
||||
// without doing any allocations.
|
||||
func needsEscape(s string) bool {
|
||||
for _, r := range s {
|
||||
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return v.Complex() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
return v.IsNil()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func invokeMarshaler(m logr.Marshaler) (ret any) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return m.MarshalLog()
|
||||
}
|
||||
|
||||
func invokeStringer(s fmt.Stringer) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func invokeError(e error) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// Caller represents the original call site for a log line, after considering
|
||||
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
|
||||
// Line fields will always be provided, while the Func field is optional.
|
||||
// Users can set the render hook fields in Options to examine logged key-value
|
||||
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
|
||||
// field is enabled for the given MessageClass.
|
||||
type Caller struct {
|
||||
// File is the basename of the file for this call site.
|
||||
File string `json:"file"`
|
||||
// Line is the line number in the file for this call site.
|
||||
Line int `json:"line"`
|
||||
// Func is the function name for this call site, or empty if
|
||||
// Options.LogCallerFunc is not enabled.
|
||||
Func string `json:"function,omitempty"`
|
||||
}
|
||||
|
||||
func (f Formatter) caller() Caller {
|
||||
// +1 for this frame, +1 for Info/Error.
|
||||
pc, file, line, ok := runtime.Caller(f.depth + 2)
|
||||
if !ok {
|
||||
return Caller{"<unknown>", 0, ""}
|
||||
}
|
||||
fn := ""
|
||||
if f.opts.LogCallerFunc {
|
||||
if fp := runtime.FuncForPC(pc); fp != nil {
|
||||
fn = fp.Name()
|
||||
}
|
||||
}
|
||||
|
||||
return Caller{filepath.Base(file), line, fn}
|
||||
}
|
||||
|
||||
const noValue = "<no-value>"
|
||||
|
||||
func (f Formatter) nonStringKey(v any) string {
|
||||
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
|
||||
}
|
||||
|
||||
// snippet produces a short snippet string of an arbitrary value.
|
||||
func (f Formatter) snippet(v any) string {
|
||||
const snipLen = 16
|
||||
|
||||
snip := f.pretty(v)
|
||||
if len(snip) > snipLen {
|
||||
snip = snip[:snipLen]
|
||||
}
|
||||
return snip
|
||||
}
|
||||
|
||||
// sanitize ensures that a list of key-value pairs has a value for every key
|
||||
// (adding a value if needed) and that each key is a string (substituting a key
|
||||
// if needed).
|
||||
func (f Formatter) sanitize(kvList []any) []any {
|
||||
if len(kvList)%2 != 0 {
|
||||
kvList = append(kvList, noValue)
|
||||
}
|
||||
for i := 0; i < len(kvList); i += 2 {
|
||||
_, ok := kvList[i].(string)
|
||||
if !ok {
|
||||
kvList[i] = f.nonStringKey(kvList[i])
|
||||
}
|
||||
}
|
||||
return kvList
|
||||
}
|
||||
|
||||
// startGroup opens a new group scope (basically a sub-struct), which locks all
|
||||
// the current saved values and starts them anew. This is needed to satisfy
|
||||
// slog.
|
||||
func (f *Formatter) startGroup(group string) {
|
||||
// Unnamed groups are just inlined.
|
||||
if group == "" {
|
||||
return
|
||||
}
|
||||
|
||||
// Any saved values can no longer be changed.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
continuing := false
|
||||
|
||||
if f.parentValuesStr != "" {
|
||||
buf.WriteString(f.parentValuesStr)
|
||||
continuing = true
|
||||
}
|
||||
|
||||
if f.group != "" && f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
|
||||
buf.WriteByte(f.colon())
|
||||
buf.WriteByte('{') // for the group
|
||||
continuing = false
|
||||
}
|
||||
|
||||
if f.valuesStr != "" {
|
||||
if continuing {
|
||||
buf.WriteByte(f.comma())
|
||||
}
|
||||
buf.WriteString(f.valuesStr)
|
||||
}
|
||||
|
||||
// NOTE: We don't close the scope here - that's done later, when a log line
|
||||
// is actually rendered (because we have N scopes to close).
|
||||
|
||||
f.parentValuesStr = buf.String()
|
||||
|
||||
// Start collecting new values.
|
||||
f.group = group
|
||||
f.groupDepth++
|
||||
f.valuesStr = ""
|
||||
f.values = nil
|
||||
}
|
||||
|
||||
// Init configures this Formatter from runtime info, such as the call depth
|
||||
// imposed by logr itself.
|
||||
// Note that this receiver is a pointer, so depth can be saved.
|
||||
func (f *Formatter) Init(info logr.RuntimeInfo) {
|
||||
f.depth += info.CallDepth
|
||||
}
|
||||
|
||||
// Enabled checks whether an info message at the given level should be logged.
|
||||
func (f Formatter) Enabled(level int) bool {
|
||||
return level <= f.opts.Verbosity
|
||||
}
|
||||
|
||||
// GetDepth returns the current depth of this Formatter. This is useful for
|
||||
// implementations which do their own caller attribution.
|
||||
func (f Formatter) GetDepth() int {
|
||||
return f.depth
|
||||
}
|
||||
|
||||
// FormatInfo renders an Info log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, argsStr string) {
|
||||
args := make([]any, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
prefix = ""
|
||||
}
|
||||
if f.opts.LogTimestamp {
|
||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||
}
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
if key := *f.opts.LogInfoLevel; key != "" {
|
||||
args = append(args, key, level)
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// FormatError renders an Error log message into strings. The prefix will be
|
||||
// empty when no names were set (via AddNames), or when the output is
|
||||
// configured for JSON.
|
||||
func (f Formatter) FormatError(err error, msg string, kvList []any) (prefix, argsStr string) {
|
||||
args := make([]any, 0, 64) // using a constant here impacts perf
|
||||
prefix = f.prefix
|
||||
if f.outputFormat == outputJSON {
|
||||
args = append(args, "logger", prefix)
|
||||
prefix = ""
|
||||
}
|
||||
if f.opts.LogTimestamp {
|
||||
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||
}
|
||||
if policy := f.opts.LogCaller; policy == All || policy == Error {
|
||||
args = append(args, "caller", f.caller())
|
||||
}
|
||||
args = append(args, "msg", msg)
|
||||
var loggableErr any
|
||||
if err != nil {
|
||||
loggableErr = err.Error()
|
||||
}
|
||||
args = append(args, "error", loggableErr)
|
||||
return prefix, f.render(args, kvList)
|
||||
}
|
||||
|
||||
// AddName appends the specified name. funcr uses '/' characters to separate
|
||||
// name elements. Callers should not pass '/' in the provided name string, but
|
||||
// this library does not actually enforce that.
|
||||
func (f *Formatter) AddName(name string) {
|
||||
if len(f.prefix) > 0 {
|
||||
f.prefix += "/"
|
||||
}
|
||||
f.prefix += name
|
||||
}
|
||||
|
||||
// AddValues adds key-value pairs to the set of saved values to be logged with
|
||||
// each log line.
|
||||
func (f *Formatter) AddValues(kvList []any) {
|
||||
// Three slice args forces a copy.
|
||||
n := len(f.values)
|
||||
f.values = append(f.values[:n:n], kvList...)
|
||||
|
||||
vals := f.values
|
||||
if hook := f.opts.RenderValuesHook; hook != nil {
|
||||
vals = hook(f.sanitize(vals))
|
||||
}
|
||||
|
||||
// Pre-render values, so we don't have to do it on each Info/Error call.
|
||||
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||
f.flatten(buf, vals, false, true) // escape user-provided keys
|
||||
f.valuesStr = buf.String()
|
||||
}
|
||||
|
||||
// AddCallDepth increases the number of stack-frames to skip when attributing
|
||||
// the log line to a file and line.
|
||||
func (f *Formatter) AddCallDepth(depth int) {
|
||||
f.depth += depth
|
||||
}
|
105
vendor/github.com/go-logr/logr/funcr/slogsink.go
generated
vendored
105
vendor/github.com/go-logr/logr/funcr/slogsink.go
generated
vendored
@@ -1,105 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package funcr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
var _ logr.SlogSink = &fnlogger{}
|
||||
|
||||
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
|
||||
|
||||
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
kvList = attrToKVs(attr, kvList)
|
||||
return true
|
||||
})
|
||||
|
||||
if record.Level >= slog.LevelError {
|
||||
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
|
||||
} else {
|
||||
level := l.levelFromSlog(record.Level)
|
||||
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
kvList = attrToKVs(attr, kvList)
|
||||
}
|
||||
l.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l fnlogger) WithGroup(name string) logr.SlogSink {
|
||||
l.startGroup(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||
// and other details of slog.
|
||||
func attrToKVs(attr slog.Attr, kvList []any) []any {
|
||||
attrVal := attr.Value.Resolve()
|
||||
if attrVal.Kind() == slog.KindGroup {
|
||||
groupVal := attrVal.Group()
|
||||
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||
for _, attr := range groupVal {
|
||||
grpKVs = attrToKVs(attr, grpKVs)
|
||||
}
|
||||
if attr.Key == "" {
|
||||
// slog says we have to inline these
|
||||
kvList = append(kvList, grpKVs...)
|
||||
} else {
|
||||
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
|
||||
}
|
||||
} else if attr.Key != "" {
|
||||
kvList = append(kvList, attr.Key, attrVal.Any())
|
||||
}
|
||||
|
||||
return kvList
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
//
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l fnlogger) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
if result < 0 {
|
||||
result = 0 // because LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
520
vendor/github.com/go-logr/logr/logr.go
generated
vendored
520
vendor/github.com/go-logr/logr/logr.go
generated
vendored
@@ -1,520 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// This design derives from Dave Cheney's blog:
|
||||
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||
|
||||
// Package logr defines a general-purpose logging API and abstract interfaces
|
||||
// to back that API. Packages in the Go ecosystem can depend on this package,
|
||||
// while callers can implement logging with whatever backend is appropriate.
|
||||
//
|
||||
// # Usage
|
||||
//
|
||||
// Logging is done using a Logger instance. Logger is a concrete type with
|
||||
// methods, which defers the actual logging to a LogSink interface. The main
|
||||
// methods of Logger are Info() and Error(). Arguments to Info() and Error()
|
||||
// are key/value pairs rather than printf-style formatted strings, emphasizing
|
||||
// "structured logging".
|
||||
//
|
||||
// With Go's standard log package, we might write:
|
||||
//
|
||||
// log.Printf("setting target value %s", targetValue)
|
||||
//
|
||||
// With logr's structured logging, we'd write:
|
||||
//
|
||||
// logger.Info("setting target", "value", targetValue)
|
||||
//
|
||||
// Errors are much the same. Instead of:
|
||||
//
|
||||
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
|
||||
//
|
||||
// We'd write:
|
||||
//
|
||||
// logger.Error(err, "failed to open the pod bay door", "user", user)
|
||||
//
|
||||
// Info() and Error() are very similar, but they are separate methods so that
|
||||
// LogSink implementations can choose to do things like attach additional
|
||||
// information (such as stack traces) on calls to Error(). Error() messages are
|
||||
// always logged, regardless of the current verbosity. If there is no error
|
||||
// instance available, passing nil is valid.
|
||||
//
|
||||
// # Verbosity
|
||||
//
|
||||
// Often we want to log information only when the application in "verbose
|
||||
// mode". To write log lines that are more verbose, Logger has a V() method.
|
||||
// The higher the V-level of a log line, the less critical it is considered.
|
||||
// Log-lines with V-levels that are not enabled (as per the LogSink) will not
|
||||
// be written. Level V(0) is the default, and logger.V(0).Info() has the same
|
||||
// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
|
||||
// Error messages do not have a verbosity level and are always logged.
|
||||
//
|
||||
// Where we might have written:
|
||||
//
|
||||
// if flVerbose >= 2 {
|
||||
// log.Printf("an unusual thing happened")
|
||||
// }
|
||||
//
|
||||
// We can write:
|
||||
//
|
||||
// logger.V(2).Info("an unusual thing happened")
|
||||
//
|
||||
// # Logger Names
|
||||
//
|
||||
// Logger instances can have name strings so that all messages logged through
|
||||
// that instance have additional context. For example, you might want to add
|
||||
// a subsystem name:
|
||||
//
|
||||
// logger.WithName("compactor").Info("started", "time", time.Now())
|
||||
//
|
||||
// The WithName() method returns a new Logger, which can be passed to
|
||||
// constructors or other functions for further use. Repeated use of WithName()
|
||||
// will accumulate name "segments". These name segments will be joined in some
|
||||
// way by the LogSink implementation. It is strongly recommended that name
|
||||
// segments contain simple identifiers (letters, digits, and hyphen), and do
|
||||
// not contain characters that could muddle the log output or confuse the
|
||||
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
|
||||
// quotes, etc).
|
||||
//
|
||||
// # Saved Values
|
||||
//
|
||||
// Logger instances can store any number of key/value pairs, which will be
|
||||
// logged alongside all messages logged through that instance. For example,
|
||||
// you might want to create a Logger instance per managed object:
|
||||
//
|
||||
// With the standard log package, we might write:
|
||||
//
|
||||
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
||||
// targetValue, object.Namespace, object.Name)
|
||||
//
|
||||
// With logr we'd write:
|
||||
//
|
||||
// // Elsewhere: set up the logger to log the object name.
|
||||
// obj.logger = mainLogger.WithValues(
|
||||
// "name", obj.name, "namespace", obj.namespace)
|
||||
//
|
||||
// // later on...
|
||||
// obj.logger.Info("setting foo", "value", targetValue)
|
||||
//
|
||||
// # Best Practices
|
||||
//
|
||||
// Logger has very few hard rules, with the goal that LogSink implementations
|
||||
// might have a lot of freedom to differentiate. There are, however, some
|
||||
// things to consider.
|
||||
//
|
||||
// The log message consists of a constant message attached to the log line.
|
||||
// This should generally be a simple description of what's occurring, and should
|
||||
// never be a format string. Variable information can then be attached using
|
||||
// named values.
|
||||
//
|
||||
// Keys are arbitrary strings, but should generally be constant values. Values
|
||||
// may be any Go value, but how the value is formatted is determined by the
|
||||
// LogSink implementation.
|
||||
//
|
||||
// Logger instances are meant to be passed around by value. Code that receives
|
||||
// such a value can call its methods without having to check whether the
|
||||
// instance is ready for use.
|
||||
//
|
||||
// The zero logger (= Logger{}) is identical to Discard() and discards all log
|
||||
// entries. Code that receives a Logger by value can simply call it, the methods
|
||||
// will never crash. For cases where passing a logger is optional, a pointer to Logger
|
||||
// should be used.
|
||||
//
|
||||
// # Key Naming Conventions
|
||||
//
|
||||
// Keys are not strictly required to conform to any specification or regex, but
|
||||
// it is recommended that they:
|
||||
// - be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||
// - be constant (not dependent on input data)
|
||||
// - contain only printable characters
|
||||
// - not contain whitespace or punctuation
|
||||
// - use lower case for simple keys and lowerCamelCase for more complex ones
|
||||
//
|
||||
// These guidelines help ensure that log data is processed properly regardless
|
||||
// of the log implementation. For example, log implementations will try to
|
||||
// output JSON data or will store data for later database (e.g. SQL) queries.
|
||||
//
|
||||
// While users are generally free to use key names of their choice, it's
|
||||
// generally best to avoid using the following keys, as they're frequently used
|
||||
// by implementations:
|
||||
// - "caller": the calling information (file/line) of a particular log line
|
||||
// - "error": the underlying error value in the `Error` method
|
||||
// - "level": the log level
|
||||
// - "logger": the name of the associated logger
|
||||
// - "msg": the log message
|
||||
// - "stacktrace": the stack trace associated with a particular log line or
|
||||
// error (often from the `Error` message)
|
||||
// - "ts": the timestamp for a log line
|
||||
//
|
||||
// Implementations are encouraged to make use of these keys to represent the
|
||||
// above concepts, when necessary (for example, in a pure-JSON output form, it
|
||||
// would be necessary to represent at least message and timestamp as ordinary
|
||||
// named values).
|
||||
//
|
||||
// # Break Glass
|
||||
//
|
||||
// Implementations may choose to give callers access to the underlying
|
||||
// logging implementation. The recommended pattern for this is:
|
||||
//
|
||||
// // Underlier exposes access to the underlying logging implementation.
|
||||
// // Since callers only have a logr.Logger, they have to know which
|
||||
// // implementation is in use, so this interface is less of an abstraction
|
||||
// // and more of way to test type conversion.
|
||||
// type Underlier interface {
|
||||
// GetUnderlying() <underlying-type>
|
||||
// }
|
||||
//
|
||||
// Logger grants access to the sink to enable type assertions like this:
|
||||
//
|
||||
// func DoSomethingWithImpl(log logr.Logger) {
|
||||
// if underlier, ok := log.GetSink().(impl.Underlier); ok {
|
||||
// implLogger := underlier.GetUnderlying()
|
||||
// ...
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// Custom `With*` functions can be implemented by copying the complete
|
||||
// Logger struct and replacing the sink in the copy:
|
||||
//
|
||||
// // WithFooBar changes the foobar parameter in the log sink and returns a
|
||||
// // new logger with that modified sink. It does nothing for loggers where
|
||||
// // the sink doesn't support that parameter.
|
||||
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
|
||||
// if foobarLogSink, ok := log.GetSink().(FoobarSink); ok {
|
||||
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
|
||||
// }
|
||||
// return log
|
||||
// }
|
||||
//
|
||||
// Don't use New to construct a new Logger with a LogSink retrieved from an
|
||||
// existing Logger. Source code attribution might not work correctly and
|
||||
// unexported fields in Logger get lost.
|
||||
//
|
||||
// Beware that the same LogSink instance may be shared by different logger
|
||||
// instances. Calling functions that modify the LogSink will affect all of
|
||||
// those.
|
||||
package logr
|
||||
|
||||
// New returns a new Logger instance. This is primarily used by libraries
|
||||
// implementing LogSink, rather than end users. Passing a nil sink will create
|
||||
// a Logger which discards all log lines.
|
||||
func New(sink LogSink) Logger {
|
||||
logger := Logger{}
|
||||
logger.setSink(sink)
|
||||
if sink != nil {
|
||||
sink.Init(runtimeInfo)
|
||||
}
|
||||
return logger
|
||||
}
|
||||
|
||||
// setSink stores the sink and updates any related fields. It mutates the
|
||||
// logger and thus is only safe to use for loggers that are not currently being
|
||||
// used concurrently.
|
||||
func (l *Logger) setSink(sink LogSink) {
|
||||
l.sink = sink
|
||||
}
|
||||
|
||||
// GetSink returns the stored sink.
|
||||
func (l Logger) GetSink() LogSink {
|
||||
return l.sink
|
||||
}
|
||||
|
||||
// WithSink returns a copy of the logger with the new sink.
|
||||
func (l Logger) WithSink(sink LogSink) Logger {
|
||||
l.setSink(sink)
|
||||
return l
|
||||
}
|
||||
|
||||
// Logger is an interface to an abstract logging implementation. This is a
|
||||
// concrete type for performance reasons, but all the real work is passed on to
|
||||
// a LogSink. Implementations of LogSink should provide their own constructors
|
||||
// that return Logger, not LogSink.
|
||||
//
|
||||
// The underlying sink can be accessed through GetSink and be modified through
|
||||
// WithSink. This enables the implementation of custom extensions (see "Break
|
||||
// Glass" in the package documentation). Normally the sink should be used only
|
||||
// indirectly.
|
||||
type Logger struct {
|
||||
sink LogSink
|
||||
level int
|
||||
}
|
||||
|
||||
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||
// flags might be used to set the logging verbosity and disable some info logs.
|
||||
func (l Logger) Enabled() bool {
|
||||
// Some implementations of LogSink look at the caller in Enabled (e.g.
|
||||
// different verbosity levels per package or file), but we only pass one
|
||||
// CallDepth in (via Init). This means that all calls from Logger to the
|
||||
// LogSink's Enabled, Info, and Error methods must have the same number of
|
||||
// frames. In other words, Logger methods can't call other Logger methods
|
||||
// which call these LogSink methods unless we do it the same in all paths.
|
||||
return l.sink != nil && l.sink.Enabled(l.level)
|
||||
}
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
//
|
||||
// The msg argument should be used to add some constant description to the log
|
||||
// line. The key/value pairs can then be used to add additional variable
|
||||
// information. The key/value pairs must alternate string keys and arbitrary
|
||||
// values.
|
||||
func (l Logger) Info(msg string, keysAndValues ...any) {
|
||||
if l.sink == nil {
|
||||
return
|
||||
}
|
||||
if l.sink.Enabled(l.level) { // see comment in Enabled
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
l.sink.Info(l.level, msg, keysAndValues...)
|
||||
}
|
||||
}
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as context.
|
||||
// It functions similarly to Info, but may have unique behavior, and should be
|
||||
// preferred for logging errors (see the package documentations for more
|
||||
// information). The log message will always be emitted, regardless of
|
||||
// verbosity level.
|
||||
//
|
||||
// The msg argument should be used to add context to any underlying error,
|
||||
// while the err argument should be used to attach the actual error that
|
||||
// triggered this log line, if present. The err parameter is optional
|
||||
// and nil may be passed instead of an error instance.
|
||||
func (l Logger) Error(err error, msg string, keysAndValues ...any) {
|
||||
if l.sink == nil {
|
||||
return
|
||||
}
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
withHelper.GetCallStackHelper()()
|
||||
}
|
||||
l.sink.Error(err, msg, keysAndValues...)
|
||||
}
|
||||
|
||||
// V returns a new Logger instance for a specific verbosity level, relative to
|
||||
// this Logger. In other words, V-levels are additive. A higher verbosity
|
||||
// level means a log message is less important. Negative V-levels are treated
|
||||
// as 0.
|
||||
func (l Logger) V(level int) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
if level < 0 {
|
||||
level = 0
|
||||
}
|
||||
l.level += level
|
||||
return l
|
||||
}
|
||||
|
||||
// GetV returns the verbosity level of the logger. If the logger's LogSink is
|
||||
// nil as in the Discard logger, this will always return 0.
|
||||
func (l Logger) GetV() int {
|
||||
// 0 if l.sink nil because of the if check in V above.
|
||||
return l.level
|
||||
}
|
||||
|
||||
// WithValues returns a new Logger instance with additional key/value pairs.
|
||||
// See Info for documentation on how key/value pairs work.
|
||||
func (l Logger) WithValues(keysAndValues ...any) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
l.setSink(l.sink.WithValues(keysAndValues...))
|
||||
return l
|
||||
}
|
||||
|
||||
// WithName returns a new Logger instance with the specified name element added
|
||||
// to the Logger's name. Successive calls with WithName append additional
|
||||
// suffixes to the Logger's name. It's strongly recommended that name segments
|
||||
// contain only letters, digits, and hyphens (see the package documentation for
|
||||
// more information).
|
||||
func (l Logger) WithName(name string) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
l.setSink(l.sink.WithName(name))
|
||||
return l
|
||||
}
|
||||
|
||||
// WithCallDepth returns a Logger instance that offsets the call stack by the
|
||||
// specified number of frames when logging call site information, if possible.
|
||||
// This is useful for users who have helper functions between the "real" call
|
||||
// site and the actual calls to Logger methods. If depth is 0 the attribution
|
||||
// should be to the direct caller of this function. If depth is 1 the
|
||||
// attribution should skip 1 call frame, and so on. Successive calls to this
|
||||
// are additive.
|
||||
//
|
||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||
// it will be called and the result returned. If the implementation does not
|
||||
// support CallDepthLogSink, the original Logger will be returned.
|
||||
//
|
||||
// To skip one level, WithCallStackHelper() should be used instead of
|
||||
// WithCallDepth(1) because it works with implementions that support the
|
||||
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
|
||||
func (l Logger) WithCallDepth(depth int) Logger {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(depth))
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
// WithCallStackHelper returns a new Logger instance that skips the direct
|
||||
// caller when logging call site information, if possible. This is useful for
|
||||
// users who have helper functions between the "real" call site and the actual
|
||||
// calls to Logger methods and want to support loggers which depend on marking
|
||||
// each individual helper function, like loggers based on testing.T.
|
||||
//
|
||||
// In addition to using that new logger instance, callers also must call the
|
||||
// returned function.
|
||||
//
|
||||
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||
// WithCallDepth(1) will be called to produce a new logger. If it supports a
|
||||
// WithCallStackHelper() method, that will be also called. If the
|
||||
// implementation does not support either of these, the original Logger will be
|
||||
// returned.
|
||||
func (l Logger) WithCallStackHelper() (func(), Logger) {
|
||||
if l.sink == nil {
|
||||
return func() {}, l
|
||||
}
|
||||
var helper func()
|
||||
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||
l.setSink(withCallDepth.WithCallDepth(1))
|
||||
}
|
||||
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||
helper = withHelper.GetCallStackHelper()
|
||||
} else {
|
||||
helper = func() {}
|
||||
}
|
||||
return helper, l
|
||||
}
|
||||
|
||||
// IsZero returns true if this logger is an uninitialized zero value
|
||||
func (l Logger) IsZero() bool {
|
||||
return l.sink == nil
|
||||
}
|
||||
|
||||
// RuntimeInfo holds information that the logr "core" library knows which
|
||||
// LogSinks might want to know.
|
||||
type RuntimeInfo struct {
|
||||
// CallDepth is the number of call frames the logr library adds between the
|
||||
// end-user and the LogSink. LogSink implementations which choose to print
|
||||
// the original logging site (e.g. file & line) should climb this many
|
||||
// additional frames to find it.
|
||||
CallDepth int
|
||||
}
|
||||
|
||||
// runtimeInfo is a static global. It must not be changed at run time.
|
||||
var runtimeInfo = RuntimeInfo{
|
||||
CallDepth: 1,
|
||||
}
|
||||
|
||||
// LogSink represents a logging implementation. End-users will generally not
|
||||
// interact with this type.
|
||||
type LogSink interface {
|
||||
// Init receives optional information about the logr library for LogSink
|
||||
// implementations that need it.
|
||||
Init(info RuntimeInfo)
|
||||
|
||||
// Enabled tests whether this LogSink is enabled at the specified V-level.
|
||||
// For example, commandline flags might be used to set the logging
|
||||
// verbosity and disable some info logs.
|
||||
Enabled(level int) bool
|
||||
|
||||
// Info logs a non-error message with the given key/value pairs as context.
|
||||
// The level argument is provided for optional logging. This method will
|
||||
// only be called when Enabled(level) is true. See Logger.Info for more
|
||||
// details.
|
||||
Info(level int, msg string, keysAndValues ...any)
|
||||
|
||||
// Error logs an error, with the given message and key/value pairs as
|
||||
// context. See Logger.Error for more details.
|
||||
Error(err error, msg string, keysAndValues ...any)
|
||||
|
||||
// WithValues returns a new LogSink with additional key/value pairs. See
|
||||
// Logger.WithValues for more details.
|
||||
WithValues(keysAndValues ...any) LogSink
|
||||
|
||||
// WithName returns a new LogSink with the specified name appended. See
|
||||
// Logger.WithName for more details.
|
||||
WithName(name string) LogSink
|
||||
}
|
||||
|
||||
// CallDepthLogSink represents a LogSink that knows how to climb the call stack
|
||||
// to identify the original call site and can offset the depth by a specified
|
||||
// number of frames. This is useful for users who have helper functions
|
||||
// between the "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as file,
|
||||
// function, or line) would otherwise log information about the intermediate
|
||||
// helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required to
|
||||
// support it.
|
||||
type CallDepthLogSink interface {
|
||||
// WithCallDepth returns a LogSink that will offset the call
|
||||
// stack by the specified number of frames when logging call
|
||||
// site information.
|
||||
//
|
||||
// If depth is 0, the LogSink should skip exactly the number
|
||||
// of call frames defined in RuntimeInfo.CallDepth when Info
|
||||
// or Error are called, i.e. the attribution should be to the
|
||||
// direct caller of Logger.Info or Logger.Error.
|
||||
//
|
||||
// If depth is 1 the attribution should skip 1 call frame, and so on.
|
||||
// Successive calls to this are additive.
|
||||
WithCallDepth(depth int) LogSink
|
||||
}
|
||||
|
||||
// CallStackHelperLogSink represents a LogSink that knows how to climb
|
||||
// the call stack to identify the original call site and can skip
|
||||
// intermediate helper functions if they mark themselves as
|
||||
// helper. Go's testing package uses that approach.
|
||||
//
|
||||
// This is useful for users who have helper functions between the
|
||||
// "real" call site and the actual calls to Logger methods.
|
||||
// Implementations that log information about the call site (such as
|
||||
// file, function, or line) would otherwise log information about the
|
||||
// intermediate helper functions.
|
||||
//
|
||||
// This is an optional interface and implementations are not required
|
||||
// to support it. Implementations that choose to support this must not
|
||||
// simply implement it as WithCallDepth(1), because
|
||||
// Logger.WithCallStackHelper will call both methods if they are
|
||||
// present. This should only be implemented for LogSinks that actually
|
||||
// need it, as with testing.T.
|
||||
type CallStackHelperLogSink interface {
|
||||
// GetCallStackHelper returns a function that must be called
|
||||
// to mark the direct caller as helper function when logging
|
||||
// call site information.
|
||||
GetCallStackHelper() func()
|
||||
}
|
||||
|
||||
// Marshaler is an optional interface that logged values may choose to
|
||||
// implement. Loggers with structured output, such as JSON, should
|
||||
// log the object return by the MarshalLog method instead of the
|
||||
// original value.
|
||||
type Marshaler interface {
|
||||
// MarshalLog can be used to:
|
||||
// - ensure that structs are not logged as strings when the original
|
||||
// value has a String method: return a different type without a
|
||||
// String method
|
||||
// - select which fields of a complex type should get logged:
|
||||
// return a simpler struct with fewer fields
|
||||
// - log unexported fields: return a different struct
|
||||
// with exported fields
|
||||
//
|
||||
// It may return any value of any type.
|
||||
MarshalLog() any
|
||||
}
|
192
vendor/github.com/go-logr/logr/sloghandler.go
generated
vendored
192
vendor/github.com/go-logr/logr/sloghandler.go
generated
vendored
@@ -1,192 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
type slogHandler struct {
|
||||
// May be nil, in which case all logs get discarded.
|
||||
sink LogSink
|
||||
// Non-nil if sink is non-nil and implements SlogSink.
|
||||
slogSink SlogSink
|
||||
|
||||
// groupPrefix collects values from WithGroup calls. It gets added as
|
||||
// prefix to value keys when handling a log record.
|
||||
groupPrefix string
|
||||
|
||||
// levelBias can be set when constructing the handler to influence the
|
||||
// slog.Level of log records. A positive levelBias reduces the
|
||||
// slog.Level value. slog has no API to influence this value after the
|
||||
// handler got created, so it can only be set indirectly through
|
||||
// Logger.V.
|
||||
levelBias slog.Level
|
||||
}
|
||||
|
||||
var _ slog.Handler = &slogHandler{}
|
||||
|
||||
// groupSeparator is used to concatenate WithGroup names and attribute keys.
|
||||
const groupSeparator = "."
|
||||
|
||||
// GetLevel is used for black box unit testing.
|
||||
func (l *slogHandler) GetLevel() slog.Level {
|
||||
return l.levelBias
|
||||
}
|
||||
|
||||
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
|
||||
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
|
||||
}
|
||||
|
||||
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
|
||||
if l.slogSink != nil {
|
||||
// Only adjust verbosity level of log entries < slog.LevelError.
|
||||
if record.Level < slog.LevelError {
|
||||
record.Level -= l.levelBias
|
||||
}
|
||||
return l.slogSink.Handle(ctx, record)
|
||||
}
|
||||
|
||||
// No need to check for nil sink here because Handle will only be called
|
||||
// when Enabled returned true.
|
||||
|
||||
kvList := make([]any, 0, 2*record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||
return true
|
||||
})
|
||||
if record.Level >= slog.LevelError {
|
||||
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
|
||||
} else {
|
||||
level := l.levelFromSlog(record.Level)
|
||||
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
|
||||
// are called by Handle, code in slog gets skipped.
|
||||
//
|
||||
// This offset currently (Go 1.21.0) works for calls through
|
||||
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
|
||||
// chain won't change. Wrapping the handler will also break unwinding. It's
|
||||
// still better than not adjusting at all....
|
||||
//
|
||||
// This cannot be done when constructing the handler because FromSlogHandler needs
|
||||
// access to the original sink without this adjustment. A second copy would
|
||||
// work, but then WithAttrs would have to be called for both of them.
|
||||
func (l *slogHandler) sinkWithCallDepth() LogSink {
|
||||
if sink, ok := l.sink.(CallDepthLogSink); ok {
|
||||
return sink.WithCallDepth(2)
|
||||
}
|
||||
return l.sink
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
|
||||
if l.sink == nil || len(attrs) == 0 {
|
||||
return l
|
||||
}
|
||||
|
||||
clone := *l
|
||||
if l.slogSink != nil {
|
||||
clone.slogSink = l.slogSink.WithAttrs(attrs)
|
||||
clone.sink = clone.slogSink
|
||||
} else {
|
||||
kvList := make([]any, 0, 2*len(attrs))
|
||||
for _, attr := range attrs {
|
||||
kvList = attrToKVs(attr, l.groupPrefix, kvList)
|
||||
}
|
||||
clone.sink = l.sink.WithValues(kvList...)
|
||||
}
|
||||
return &clone
|
||||
}
|
||||
|
||||
func (l *slogHandler) WithGroup(name string) slog.Handler {
|
||||
if l.sink == nil {
|
||||
return l
|
||||
}
|
||||
if name == "" {
|
||||
// slog says to inline empty groups
|
||||
return l
|
||||
}
|
||||
clone := *l
|
||||
if l.slogSink != nil {
|
||||
clone.slogSink = l.slogSink.WithGroup(name)
|
||||
clone.sink = clone.slogSink
|
||||
} else {
|
||||
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
|
||||
}
|
||||
return &clone
|
||||
}
|
||||
|
||||
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
|
||||
// and other details of slog.
|
||||
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
|
||||
attrVal := attr.Value.Resolve()
|
||||
if attrVal.Kind() == slog.KindGroup {
|
||||
groupVal := attrVal.Group()
|
||||
grpKVs := make([]any, 0, 2*len(groupVal))
|
||||
prefix := groupPrefix
|
||||
if attr.Key != "" {
|
||||
prefix = addPrefix(groupPrefix, attr.Key)
|
||||
}
|
||||
for _, attr := range groupVal {
|
||||
grpKVs = attrToKVs(attr, prefix, grpKVs)
|
||||
}
|
||||
kvList = append(kvList, grpKVs...)
|
||||
} else if attr.Key != "" {
|
||||
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
|
||||
}
|
||||
|
||||
return kvList
|
||||
}
|
||||
|
||||
func addPrefix(prefix, name string) string {
|
||||
if prefix == "" {
|
||||
return name
|
||||
}
|
||||
if name == "" {
|
||||
return prefix
|
||||
}
|
||||
return prefix + groupSeparator + name
|
||||
}
|
||||
|
||||
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
|
||||
// It ensures that the result is >= 0. This is necessary because the result is
|
||||
// passed to a LogSink and that API did not historically document whether
|
||||
// levels could be negative or what that meant.
|
||||
//
|
||||
// Some example usage:
|
||||
//
|
||||
// logrV0 := getMyLogger()
|
||||
// logrV2 := logrV0.V(2)
|
||||
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
|
||||
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
|
||||
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
|
||||
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
|
||||
func (l *slogHandler) levelFromSlog(level slog.Level) int {
|
||||
result := -level
|
||||
result += l.levelBias // in case the original Logger had a V level
|
||||
if result < 0 {
|
||||
result = 0 // because LogSink doesn't expect negative V levels
|
||||
}
|
||||
return int(result)
|
||||
}
|
100
vendor/github.com/go-logr/logr/slogr.go
generated
vendored
100
vendor/github.com/go-logr/logr/slogr.go
generated
vendored
@@ -1,100 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
)
|
||||
|
||||
// FromSlogHandler returns a Logger which writes to the slog.Handler.
|
||||
//
|
||||
// The logr verbosity level is mapped to slog levels such that V(0) becomes
|
||||
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
|
||||
func FromSlogHandler(handler slog.Handler) Logger {
|
||||
if handler, ok := handler.(*slogHandler); ok {
|
||||
if handler.sink == nil {
|
||||
return Discard()
|
||||
}
|
||||
return New(handler.sink).V(int(handler.levelBias))
|
||||
}
|
||||
return New(&slogSink{handler: handler})
|
||||
}
|
||||
|
||||
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
|
||||
//
|
||||
// The returned logger writes all records with level >= slog.LevelError as
|
||||
// error log entries with LogSink.Error, regardless of the verbosity level of
|
||||
// the Logger:
|
||||
//
|
||||
// logger := <some Logger with 0 as verbosity level>
|
||||
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
|
||||
//
|
||||
// The level of all other records gets reduced by the verbosity
|
||||
// level of the Logger and the result is negated. If it happens
|
||||
// to be negative, then it gets replaced by zero because a LogSink
|
||||
// is not expected to handled negative levels:
|
||||
//
|
||||
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
|
||||
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
|
||||
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
|
||||
func ToSlogHandler(logger Logger) slog.Handler {
|
||||
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
|
||||
return sink.handler
|
||||
}
|
||||
|
||||
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
|
||||
if slogSink, ok := handler.sink.(SlogSink); ok {
|
||||
handler.slogSink = slogSink
|
||||
}
|
||||
return handler
|
||||
}
|
||||
|
||||
// SlogSink is an optional interface that a LogSink can implement to support
|
||||
// logging through the slog.Logger or slog.Handler APIs better. It then should
|
||||
// also support special slog values like slog.Group. When used as a
|
||||
// slog.Handler, the advantages are:
|
||||
//
|
||||
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
|
||||
// as intended by slog
|
||||
// - proper grouping of key/value pairs via WithGroup
|
||||
// - verbosity levels > slog.LevelInfo can be recorded
|
||||
// - less overhead
|
||||
//
|
||||
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
|
||||
// well. Developers can pick whatever API suits them better and/or mix
|
||||
// packages which use either API in the same binary with a common logging
|
||||
// implementation.
|
||||
//
|
||||
// This interface is necessary because the type implementing the LogSink
|
||||
// interface cannot also implement the slog.Handler interface due to the
|
||||
// different prototype of the common Enabled method.
|
||||
//
|
||||
// An implementation could support both interfaces in two different types, but then
|
||||
// additional interfaces would be needed to convert between those types in FromSlogHandler
|
||||
// and ToSlogHandler.
|
||||
type SlogSink interface {
|
||||
LogSink
|
||||
|
||||
Handle(ctx context.Context, record slog.Record) error
|
||||
WithAttrs(attrs []slog.Attr) SlogSink
|
||||
WithGroup(name string) SlogSink
|
||||
}
|
120
vendor/github.com/go-logr/logr/slogsink.go
generated
vendored
120
vendor/github.com/go-logr/logr/slogsink.go
generated
vendored
@@ -1,120 +0,0 @@
|
||||
//go:build go1.21
|
||||
// +build go1.21
|
||||
|
||||
/*
|
||||
Copyright 2023 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package logr
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log/slog"
|
||||
"runtime"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
_ LogSink = &slogSink{}
|
||||
_ CallDepthLogSink = &slogSink{}
|
||||
_ Underlier = &slogSink{}
|
||||
)
|
||||
|
||||
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
|
||||
type Underlier interface {
|
||||
// GetUnderlying returns the Handler used by the LogSink.
|
||||
GetUnderlying() slog.Handler
|
||||
}
|
||||
|
||||
const (
|
||||
// nameKey is used to log the `WithName` values as an additional attribute.
|
||||
nameKey = "logger"
|
||||
|
||||
// errKey is used to log the error parameter of Error as an additional attribute.
|
||||
errKey = "err"
|
||||
)
|
||||
|
||||
type slogSink struct {
|
||||
callDepth int
|
||||
name string
|
||||
handler slog.Handler
|
||||
}
|
||||
|
||||
func (l *slogSink) Init(info RuntimeInfo) {
|
||||
l.callDepth = info.CallDepth
|
||||
}
|
||||
|
||||
func (l *slogSink) GetUnderlying() slog.Handler {
|
||||
return l.handler
|
||||
}
|
||||
|
||||
func (l *slogSink) WithCallDepth(depth int) LogSink {
|
||||
newLogger := *l
|
||||
newLogger.callDepth += depth
|
||||
return &newLogger
|
||||
}
|
||||
|
||||
func (l *slogSink) Enabled(level int) bool {
|
||||
return l.handler.Enabled(context.Background(), slog.Level(-level))
|
||||
}
|
||||
|
||||
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
|
||||
l.log(nil, msg, slog.Level(-level), kvList...)
|
||||
}
|
||||
|
||||
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
|
||||
l.log(err, msg, slog.LevelError, kvList...)
|
||||
}
|
||||
|
||||
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
|
||||
var pcs [1]uintptr
|
||||
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
|
||||
runtime.Callers(3+l.callDepth, pcs[:])
|
||||
|
||||
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
|
||||
if l.name != "" {
|
||||
record.AddAttrs(slog.String(nameKey, l.name))
|
||||
}
|
||||
if err != nil {
|
||||
record.AddAttrs(slog.Any(errKey, err))
|
||||
}
|
||||
record.Add(kvList...)
|
||||
_ = l.handler.Handle(context.Background(), record)
|
||||
}
|
||||
|
||||
func (l slogSink) WithName(name string) LogSink {
|
||||
if l.name != "" {
|
||||
l.name += "/"
|
||||
}
|
||||
l.name += name
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
|
||||
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
|
||||
return &l
|
||||
}
|
||||
|
||||
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
|
||||
// We don't need the record itself, only its Add method.
|
||||
record := slog.NewRecord(time.Time{}, 0, "", 0)
|
||||
record.Add(kvList...)
|
||||
attrs := make([]slog.Attr, 0, record.NumAttrs())
|
||||
record.Attrs(func(attr slog.Attr) bool {
|
||||
attrs = append(attrs, attr)
|
||||
return true
|
||||
})
|
||||
return attrs
|
||||
}
|
201
vendor/github.com/go-logr/stdr/LICENSE
generated
vendored
201
vendor/github.com/go-logr/stdr/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
6
vendor/github.com/go-logr/stdr/README.md
generated
vendored
6
vendor/github.com/go-logr/stdr/README.md
generated
vendored
@@ -1,6 +0,0 @@
|
||||
# Minimal Go logging using logr and Go's standard library
|
||||
|
||||
[](https://pkg.go.dev/github.com/go-logr/stdr)
|
||||
|
||||
This package implements the [logr interface](https://github.com/go-logr/logr)
|
||||
in terms of Go's standard log package(https://pkg.go.dev/log).
|
170
vendor/github.com/go-logr/stdr/stdr.go
generated
vendored
170
vendor/github.com/go-logr/stdr/stdr.go
generated
vendored
@@ -1,170 +0,0 @@
|
||||
/*
|
||||
Copyright 2019 The logr Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package stdr implements github.com/go-logr/logr.Logger in terms of
|
||||
// Go's standard log package.
|
||||
package stdr
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/go-logr/logr/funcr"
|
||||
)
|
||||
|
||||
// The global verbosity level. See SetVerbosity().
|
||||
var globalVerbosity int
|
||||
|
||||
// SetVerbosity sets the global level against which all info logs will be
|
||||
// compared. If this is greater than or equal to the "V" of the logger, the
|
||||
// message will be logged. A higher value here means more logs will be written.
|
||||
// The previous verbosity value is returned. This is not concurrent-safe -
|
||||
// callers must be sure to call it from only one goroutine.
|
||||
func SetVerbosity(v int) int {
|
||||
old := globalVerbosity
|
||||
globalVerbosity = v
|
||||
return old
|
||||
}
|
||||
|
||||
// New returns a logr.Logger which is implemented by Go's standard log package,
|
||||
// or something like it. If std is nil, this will use a default logger
|
||||
// instead.
|
||||
//
|
||||
// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
|
||||
func New(std StdLogger) logr.Logger {
|
||||
return NewWithOptions(std, Options{})
|
||||
}
|
||||
|
||||
// NewWithOptions returns a logr.Logger which is implemented by Go's standard
|
||||
// log package, or something like it. See New for details.
|
||||
func NewWithOptions(std StdLogger, opts Options) logr.Logger {
|
||||
if std == nil {
|
||||
// Go's log.Default() is only available in 1.16 and higher.
|
||||
std = log.New(os.Stderr, "", log.LstdFlags)
|
||||
}
|
||||
|
||||
if opts.Depth < 0 {
|
||||
opts.Depth = 0
|
||||
}
|
||||
|
||||
fopts := funcr.Options{
|
||||
LogCaller: funcr.MessageClass(opts.LogCaller),
|
||||
}
|
||||
|
||||
sl := &logger{
|
||||
Formatter: funcr.NewFormatter(fopts),
|
||||
std: std,
|
||||
}
|
||||
|
||||
// For skipping our own logger.Info/Error.
|
||||
sl.Formatter.AddCallDepth(1 + opts.Depth)
|
||||
|
||||
return logr.New(sl)
|
||||
}
|
||||
|
||||
// Options carries parameters which influence the way logs are generated.
|
||||
type Options struct {
|
||||
// Depth biases the assumed number of call frames to the "true" caller.
|
||||
// This is useful when the calling code calls a function which then calls
|
||||
// stdr (e.g. a logging shim to another API). Values less than zero will
|
||||
// be treated as zero.
|
||||
Depth int
|
||||
|
||||
// LogCaller tells stdr to add a "caller" key to some or all log lines.
|
||||
// Go's log package has options to log this natively, too.
|
||||
LogCaller MessageClass
|
||||
|
||||
// TODO: add an option to log the date/time
|
||||
}
|
||||
|
||||
// MessageClass indicates which category or categories of messages to consider.
|
||||
type MessageClass int
|
||||
|
||||
const (
|
||||
// None ignores all message classes.
|
||||
None MessageClass = iota
|
||||
// All considers all message classes.
|
||||
All
|
||||
// Info only considers info messages.
|
||||
Info
|
||||
// Error only considers error messages.
|
||||
Error
|
||||
)
|
||||
|
||||
// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
|
||||
// this adapter.
|
||||
type StdLogger interface {
|
||||
// Output is the same as log.Output and log.Logger.Output.
|
||||
Output(calldepth int, logline string) error
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
funcr.Formatter
|
||||
std StdLogger
|
||||
}
|
||||
|
||||
var _ logr.LogSink = &logger{}
|
||||
var _ logr.CallDepthLogSink = &logger{}
|
||||
|
||||
func (l logger) Enabled(level int) bool {
|
||||
return globalVerbosity >= level
|
||||
}
|
||||
|
||||
func (l logger) Info(level int, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||
if prefix != "" {
|
||||
args = prefix + ": " + args
|
||||
}
|
||||
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||
}
|
||||
|
||||
func (l logger) Error(err error, msg string, kvList ...interface{}) {
|
||||
prefix, args := l.FormatError(err, msg, kvList)
|
||||
if prefix != "" {
|
||||
args = prefix + ": " + args
|
||||
}
|
||||
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||
}
|
||||
|
||||
func (l logger) WithName(name string) logr.LogSink {
|
||||
l.Formatter.AddName(name)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||
l.Formatter.AddValues(kvList)
|
||||
return &l
|
||||
}
|
||||
|
||||
func (l logger) WithCallDepth(depth int) logr.LogSink {
|
||||
l.Formatter.AddCallDepth(depth)
|
||||
return &l
|
||||
}
|
||||
|
||||
// Underlier exposes access to the underlying logging implementation. Since
|
||||
// callers only have a logr.Logger, they have to know which implementation is
|
||||
// in use, so this interface is less of an abstraction and more of way to test
|
||||
// type conversion.
|
||||
type Underlier interface {
|
||||
GetUnderlying() StdLogger
|
||||
}
|
||||
|
||||
// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
|
||||
// is itself an interface, the result may or may not be a Go log.Logger.
|
||||
func (l logger) GetUnderlying() StdLogger {
|
||||
return l.std
|
||||
}
|
5
vendor/github.com/go-openapi/analysis/.codecov.yml
generated
vendored
5
vendor/github.com/go-openapi/analysis/.codecov.yml
generated
vendored
@@ -1,5 +0,0 @@
|
||||
coverage:
|
||||
status:
|
||||
patch:
|
||||
default:
|
||||
target: 80%
|
2
vendor/github.com/go-openapi/analysis/.gitattributes
generated
vendored
2
vendor/github.com/go-openapi/analysis/.gitattributes
generated
vendored
@@ -1,2 +0,0 @@
|
||||
*.go text eol=lf
|
||||
|
5
vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
5
vendor/github.com/go-openapi/analysis/.gitignore
generated
vendored
@@ -1,5 +0,0 @@
|
||||
secrets.yml
|
||||
coverage.out
|
||||
coverage.txt
|
||||
*.cov
|
||||
.idea
|
61
vendor/github.com/go-openapi/analysis/.golangci.yml
generated
vendored
61
vendor/github.com/go-openapi/analysis/.golangci.yml
generated
vendored
@@ -1,61 +0,0 @@
|
||||
linters-settings:
|
||||
govet:
|
||||
check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
gocyclo:
|
||||
min-complexity: 45
|
||||
maligned:
|
||||
suggest-new: true
|
||||
dupl:
|
||||
threshold: 200
|
||||
goconst:
|
||||
min-len: 2
|
||||
min-occurrences: 3
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- maligned
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoinits
|
||||
- gochecknoglobals
|
||||
- funlen
|
||||
- godox
|
||||
- gocognit
|
||||
- whitespace
|
||||
- wsl
|
||||
- wrapcheck
|
||||
- testpackage
|
||||
- nlreturn
|
||||
- gomnd
|
||||
- exhaustivestruct
|
||||
- goerr113
|
||||
- errorlint
|
||||
- nestif
|
||||
- godot
|
||||
- gofumpt
|
||||
- paralleltest
|
||||
- tparallel
|
||||
- thelper
|
||||
- ifshort
|
||||
- exhaustruct
|
||||
- varnamelen
|
||||
- gci
|
||||
- depguard
|
||||
- errchkjson
|
||||
- inamedparam
|
||||
- nonamedreturns
|
||||
- musttag
|
||||
- ireturn
|
||||
- forcetypeassert
|
||||
- cyclop
|
||||
# deprecated linters
|
||||
- deadcode
|
||||
- interfacer
|
||||
- scopelint
|
||||
- varcheck
|
||||
- structcheck
|
||||
- golint
|
||||
- nosnakecase
|
74
vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
74
vendor/github.com/go-openapi/analysis/CODE_OF_CONDUCT.md
generated
vendored
@@ -1,74 +0,0 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as
|
||||
contributors and maintainers pledge to making participation in our project and
|
||||
our community a harassment-free experience for everyone, regardless of age, body
|
||||
size, disability, ethnicity, gender identity and expression, level of experience,
|
||||
nationality, personal appearance, race, religion, or sexual identity and
|
||||
orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment
|
||||
include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or
|
||||
advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic
|
||||
address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable
|
||||
behavior and are expected to take appropriate and fair corrective action in
|
||||
response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or
|
||||
reject comments, commits, code, wiki edits, issues, and other contributions
|
||||
that are not aligned to this Code of Conduct, or to ban temporarily or
|
||||
permanently any contributor for other behaviors that they deem inappropriate,
|
||||
threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces
|
||||
when an individual is representing the project or its community. Examples of
|
||||
representing a project or community include using an official project e-mail
|
||||
address, posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event. Representation of a project may be
|
||||
further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported by contacting the project team at ivan+abuse@flanders.co.nz. All
|
||||
complaints will be reviewed and investigated and will result in a response that
|
||||
is deemed necessary and appropriate to the circumstances. The project team is
|
||||
obligated to maintain confidentiality with regard to the reporter of an incident.
|
||||
Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good
|
||||
faith may face temporary or permanent repercussions as determined by other
|
||||
members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
|
||||
available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
202
vendor/github.com/go-openapi/analysis/LICENSE
generated
vendored
202
vendor/github.com/go-openapi/analysis/LICENSE
generated
vendored
@@ -1,202 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
27
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
27
vendor/github.com/go-openapi/analysis/README.md
generated
vendored
@@ -1,27 +0,0 @@
|
||||
# OpenAPI analysis [](https://github.com/go-openapi/analysis/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/analysis)
|
||||
|
||||
[](https://slackin.goswagger.io)
|
||||
[](https://raw.githubusercontent.com/go-openapi/analysis/master/LICENSE)
|
||||
[](https://pkg.go.dev/github.com/go-openapi/analysis)
|
||||
[](https://goreportcard.com/report/github.com/go-openapi/analysis)
|
||||
|
||||
|
||||
A foundational library to analyze an OAI specification document for easier reasoning about the content.
|
||||
|
||||
## What's inside?
|
||||
|
||||
* An analyzer providing methods to walk the functional content of a specification
|
||||
* A spec flattener producing a self-contained document bundle, while preserving `$ref`s
|
||||
* A spec merger ("mixin") to merge several spec documents into a primary spec
|
||||
* A spec "fixer" ensuring that response descriptions are non empty
|
||||
|
||||
[Documentation](https://pkg.go.dev/github.com/go-openapi/analysis)
|
||||
|
||||
## FAQ
|
||||
|
||||
* Does this library support OpenAPI 3?
|
||||
|
||||
> No.
|
||||
> This package currently only supports OpenAPI 2.0 (aka Swagger 2.0).
|
||||
> There is no plan to make it evolve toward supporting OpenAPI 3.x.
|
||||
> This [discussion thread](https://github.com/go-openapi/spec/issues/21) relates the full story.
|
1064
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
1064
vendor/github.com/go-openapi/analysis/analyzer.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user